├── .gitignore
├── pic
├── 1.png
├── 2.png
├── lstm.png
├── res.png
├── conv2d1.png
├── cnnfinal.png
└── stacking.png
├── Preliminary
├── PKL
│ ├── test_gpu.py
│ ├── run.sh
│ ├── 0721_conv2_2_net_oof_comm_nn0.84665
│ │ └── conv2d-avepooling_fc2-add_feature-Copy1.py
│ ├── 0725_conv2_2_net_weight_comm_0.85568
│ │ └── conv2d-avepooling_fc2-add_feature_template-weight.py
│ ├── 0728_08648_online792
│ │ └── conv2d-avepooling_fc2-add_feature_template-multiloss-Copy1.py
│ ├── 0730_generator_one_fifth_orig_mixup_087099
│ │ └── conv2d-avepooling_fc2-add_feature_template-multiloss-removedecay-generator-Copy1.py
│ ├── 0730_generator_one_fourth_orig_mixup_087765
│ │ └── conv2d-avepooling_fc2-add_feature_template-multiloss-removedecay-generator-Copy2.py
│ ├── 0729_generator_one_third_orig_mixup_086223
│ │ └── conv2d-avepooling_fc2-add_feature_template-multiloss-removedecay-generator.py
│ ├── 0729_generator_one_sixth_orig_mixup_086686
│ │ └── conv2d-avepooling_fc2-add_feature_template-multiloss-generator-Copy1.py
│ ├── multi_lstm
│ │ └── mutil_loss_Lstm.py
│ └── spetron_cnn
│ │ └── spetron_cnn.py
├── Dockerfile
├── README.md
├── ensemble_1_to_allin088681.py
└── ensemble_2_to_0806allin088716.py
├── Final
├── README.md
└── code
│ └── cos_dense_attention.py
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | *.csv
2 | *.pkl
3 | *.zip
4 |
--------------------------------------------------------------------------------
/pic/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miziha-zp/xw2020-top1/HEAD/pic/1.png
--------------------------------------------------------------------------------
/pic/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miziha-zp/xw2020-top1/HEAD/pic/2.png
--------------------------------------------------------------------------------
/pic/lstm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miziha-zp/xw2020-top1/HEAD/pic/lstm.png
--------------------------------------------------------------------------------
/pic/res.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miziha-zp/xw2020-top1/HEAD/pic/res.png
--------------------------------------------------------------------------------
/pic/conv2d1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miziha-zp/xw2020-top1/HEAD/pic/conv2d1.png
--------------------------------------------------------------------------------
/pic/cnnfinal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miziha-zp/xw2020-top1/HEAD/pic/cnnfinal.png
--------------------------------------------------------------------------------
/pic/stacking.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/miziha-zp/xw2020-top1/HEAD/pic/stacking.png
--------------------------------------------------------------------------------
/Preliminary/PKL/test_gpu.py:
--------------------------------------------------------------------------------
1 |
2 | from tensorflow.python.client import device_lib
3 | print('#'*30)
4 | print('#'*30)
5 | print(device_lib.list_local_devices())
6 | print('#'*30)
7 | print('#'*30)
--------------------------------------------------------------------------------
/Final/README.md:
--------------------------------------------------------------------------------
1 | # 说明
2 | 1. 赛场提供的是Tensorflow2.0,使用Attention层有问题,因此从新版本Tensorflow上复制了[dense_attention.py](https://github.com/tensorflow/tensorflow/blob/498e815097e74aff7fefdbbae69ba9daf6e9c023/tensorflow/python/keras/layers/dense_attention.py#L191)文件,手动导入
3 |
4 | # 文件结构
5 | ```
6 | |--
7 | |-- code # 复赛使用到的代码
8 | |-- CNNtower.ipynb # 双塔CNN代码
9 | |-- cos_dense_attention.py # 见说明1
10 | |-- lgb_final.ipynb # LightGBM模型
11 | |-- LSTMnet.ipynb # LSTM代码
12 | |-- ensembl8086.ipynb # 融合的LightGBM代码
13 | |-- data # 由于主办方要求,无法提供数据文件
14 | ```
--------------------------------------------------------------------------------
/Preliminary/Dockerfile:
--------------------------------------------------------------------------------
1 | # 指定python版本,这里使用3.6
2 |
3 |
4 | FROM registry.hub.docker.com/tensorflow/tensorflow:2.1.0-gpu-py3
5 |
6 | # 安装需要的python包,国内指定某些源,下载比较快,此处使用清华源(可以指定你用的版本,如tensorflow=1.0)
7 |
8 | RUN pip install numpy pandas scikit-learn lightgbm seaborn tqdm scipy -i https://pypi.tuna.tsinghua.edu.cn/simple
9 |
10 | COPY ./data/ xx/data
11 | COPY ./ensemble_1_to_allin088681.py xx/
12 | COPY ./ensemble_2_to_0806allin088716.py xx/
13 | COPY ./PKL/ xx/PKL
14 | COPY ./sub/ xx/sub
15 | # 如果你有多个目录,也需要依次拷贝到docker 容器中
16 | # 使用/* 的格式在docker中可能会造成错误
17 | # 设置工作目录为xx
18 |
19 | WORKDIR xx
20 |
21 | # 下面是测试
22 | #CMD echo start | tee start_ && pip list && nvidia-smi && python PKL/test_gpu.py && bash PKL/run.sh && echo done |tee done_ && sleep 2h
23 |
24 |
25 | # 生成所需的最终文件的时间大概为4天(RTX2080 GPU,共9个五折模型)
26 | CMD echo start | tee start_ && pip list && nvidia-smi && python PKL/test_gpu.py && bash PKL/run.sh && python ensemble_2_to_0806allin088716.py && python ensemble_1_to_allin088681.py && echo done |tee done_ && sleep 2h
27 |
28 |
--------------------------------------------------------------------------------
/Preliminary/PKL/run.sh:
--------------------------------------------------------------------------------
1 | cd PKL/0721_conv2_2_net_oof_comm_nn0.84665
2 | python conv2d-avepooling_fc2-add_feature-Copy1.py
3 | cd ..
4 | cd 0725_conv2_2_net_weight_comm_0.85568
5 | python conv2d-avepooling_fc2-add_feature_template-weight.py
6 | cd ..
7 | cd 0728_08648_online792
8 | python conv2d-avepooling_fc2-add_feature_template-multiloss-Copy1.py
9 | cd ..
10 | cd 0729_generator_one_sixth_orig_mixup_086686
11 | python conv2d-avepooling_fc2-add_feature_template-multiloss-generator-Copy1.py
12 | cd ..
13 | cd 0729_generator_one_third_orig_mixup_086223
14 | python conv2d-avepooling_fc2-add_feature_template-multiloss-removedecay-generator.py
15 | cd ..
16 | cd 0730_generator_one_fifth_orig_mixup_087099
17 | python conv2d-avepooling_fc2-add_feature_template-multiloss-removedecay-generator-Copy1.py
18 | cd ..
19 | cd 0730_generator_one_fourth_orig_mixup_087765
20 | python conv2d-avepooling_fc2-add_feature_template-multiloss-removedecay-generator-Copy2.py
21 | cd ..
22 | cd multi_lstm
23 | python mutil_loss_Lstm.py
24 | cd ..
25 | cd spetron_cnn
26 | python spetron_cnn.py
--------------------------------------------------------------------------------
/Preliminary/README.md:
--------------------------------------------------------------------------------
1 | # 队伍名称
2 | MTM
3 |
4 | # 方案说明
5 |
6 | NN包括9个模型,根据模型参数/结构的不同,单个模型的训练时间在2h-10h不等(单卡RTX2080),由于不了解主办方的机器设置,在此串行运行9个模型,生成的概率文件将会stacking成为最终的文件。预计复现时间需要4天左右
7 |
8 | # 训练说明
9 | 1. 基于CNN2D的深度学习模型,使用GTX2080Ti单卡五折交叉训练
10 | 2. 基于LightGBM的模型融合
11 |
12 | # 运行说明
13 | 1. Docker运行后将会直接启动训练+融合过程,代码推荐使用nvidia docker运行(**测试通过**),由于机器限制,我们仅在单卡RTX2080上进行了单卡的串行验证。
14 | 具体运行顺序如下:
15 | ```bash
16 | docker build -t team_mtm_docker ./
17 | docker run --gpus '"device=0"' team_mtm_docker # 启动gpu训练
18 | ```
19 |
20 | # 文件夹结构
21 | ```
22 | |--
23 | |-- Dockerfile # Docker文件,将会运行当前目录下两个py文件并生成两个sub文件
24 | |-- data # data
25 | |-- sensor_test.csv
26 | |-- sensor_train.csv
27 | |-- 提交结果示例.csv
28 | |-- PKL # 对应的数据文件(训练代码、模型及概率)
29 | |-- sub # 输出的预测文件,注意有两个文件
30 | |-- ensemble_1_to_allin088681.py # 对应提交文件为 allin0.88681.csv 0.799
31 | |-- ensemble_2_to_0806allin088716.py # 对应提交文件为 0806allin0.88716.csv 0.7979365079365079
32 | |-- lgb_online073.ipynb 使用Rolling的LightGBM实现,初赛线上0.73~,实际初赛融合未使用,仅供参考
33 | ```
34 | 注:
35 | 1. 由于最后两次不能确定哪个对应榜单最高成绩,故都进行了复现。
36 | 2. **由于lgb多进程训练的原因,不能保证在不同配置的机器上的结果完全一致。**
37 |
38 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 🥟交子杯 - 2020 - AI赛道 - TOP1
2 |
3 | ## summary
4 |
5 | 陆陆续续两个月的赛程结束了,从初赛A榜27到B榜第4,再到决赛A,B双榜第一,有过断断续续排名下降即将无缘现场决赛的失落,也有过现场决赛等待被超越的12小时,心情跌到起伏像极了今年来的股市。
6 |
7 | 下面进入正题,介绍MTM在这个比赛的相关工作,该方案来自MTM里面的两只年轻滚滚biubiubiu以及Fizzer。初赛选手及未参加的同学建议全篇阅读,决赛选手建议阅读集成与模型中LGB模型这两个小节,来快速温习我们取胜的关键。
8 |
9 | 我们强烈建议您关注我们的[公众号MeteoAI](https://mp.weixin.qq.com/s/ySLh3BSTEUSuHu0CpevXbQ),专注时序建模与气象AI,后期打算将我们在以后比赛中Baseline、EDA、以及解决锦囊放在上面。
10 | 如果您有问题,欢迎开一个issue,也欢迎来[知乎](https://zhuanlan.zhihu.com/p/201389840)讨论,我的知乎常用于赛后分享交流,也会刊载一个赛事中常见的trick代码。
11 |
12 |
13 | ## 题目要求
14 |
15 | 本次赛题旨在使用手机中的加速度传感器数据(包含重力加速度(acc_xg, acc_yg, acc_zg)和不含重力加速度(acc_x, acc_y, acc_z)),来进行不同场景下(站立,行走,坐卧)的行为(刷抖音,玩游戏...)预测,以期智能检测手机端的金融诈骗行为。详细介绍请参考[官方网站](https://www.kesci.com/home/competition/5ece30cc73a1b3002c9f1bf5/content)。
16 |
17 | ## 数据说明
18 |
19 | 初赛提供了7500左右的训练数据与7500条的测试数据,做一个人19分类,决赛数据量大约是初赛的两倍,做一个20分类,类别较初赛有所增删。
20 |
21 | **注意**:初赛数据,训练集与测试集分布差异较大(可以通过一些特征选择方法看出,猜测很可能是是按采集数据所用的设备id,或者按采集数据的志愿者id划分的),而采样点数据分布相对集中,集中在60左右。决赛数据分布差异则不太明显(猜测是按照样本id随机划分的,另外可进一步推测决赛AB榜也是按照样本id随机划分的),但是采样点数据分布差异很大,中值22,最大值60+,最小值只有2。另外,决赛数据精度要比初赛数据高。
22 |
23 | **采样点数量与训练集测试集分布**这两点的变化是决赛破题的关键。
24 |
25 |
26 | 决赛数据采样点数量分布
27 |
28 |
29 |
30 | 决赛数据train test分布差异很小
31 |
32 |
33 | ## 比赛流程
34 |
35 | 初赛时间:6.17-8.7,形式为线上A,B榜,B榜前30名,提交docker复现,取前10名现场决赛,现场决赛分代码比拼跟现场答辩两部分。
36 |
37 | 其中代码比拼赛制与初赛一样,时长25小时,共50次提交机会,很多选手不分昼夜,持续作战。有许多队伍决赛逆袭,甚至一战登顶。非常适合临场发挥稳定的选手参加。另外,决赛提供**一块V100(16G)**,不允许使用额外的算力,不允许使用初赛数据训练和预训练。这非常考验初赛模型的鲁棒性与轻便性,以及参赛选手的临场发挥能力。
38 |
39 | ## 思路
40 |
41 | 我们着眼的这个题的难点在于:1.**在保证特征交互的同时下对时序建模2.有效解决采样点数量差异**(决赛)
42 |
43 | - 对于1,我们尝试了CNN2d,CNN1d,LSTM这类自动化特征交互与时序建模的模型,也尝试了在特征工程阶段进行部分特征交互以及时序模式抽取,让模型成为学习特征组合的主力(LightGB)
44 | - 对于2,我们通过设计特长不同的专家模型有针对性的对长系列与短序列的建模,然后使用stacking进行集成,并进行引导择优。
45 |
46 | ## 模型
47 |
48 | ### Conv2d
49 |
50 | 
51 |
52 | 初赛模型以OTTO开源的baseline为基础,做了如下几个调整:
53 |
54 | 1. 添加以下补充特征序列(可以看成一种预先的特征交互)
55 |
56 | ```python
57 | def add_features(df):
58 | # 参考自CSDN文章,公式略有问题,详情见章节“一些可以探索的点/我们还未尝试”
59 | print(df.columns)
60 | df['acc'] = (df.acc_x ** 2 + df.acc_y ** 2 + df.acc_z ** 2) ** .5
61 | df['accg'] = (df.acc_xg ** 2 + df.acc_yg ** 2 + df.acc_zg ** 2) ** .5
62 | df['thetax']=np.arctan(df.acc_xg/
63 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_zg*df.acc_zg))*180/np.pi
64 | df['thetay']=np.arctan(df.acc_yg/
65 | np.sqrt(df.acc_xg*df.acc_xg+df.acc_zg*df.acc_zg))*180/np.pi
66 | df['thetaz']=np.arctan(df.acc_zg/
67 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_xg*df.acc_xg))*180/np.pi
68 |
69 | df['xy'] = (df['acc_x'] ** 2 + df['acc_y'] ** 2) ** 0.5
70 | df['xy_g'] = (df['acc_xg'] ** 2 + df['acc_yg'] ** 2) ** 0.5
71 |
72 | df['g'] = ((df["acc_x"] - df["acc_xg"]) ** 2 +
73 | (df["acc_y"] - df["acc_yg"]) ** 2 + (df["acc_z"] - df["acc_zg"]) ** 2) ** 0.5
74 |
75 | print(df.columns)
76 | return df
77 | train=add_features(train)
78 | test=add_features(test)
79 |
80 | ```
81 |
82 | 2. 决赛时,使用了重复拼接短序列凑齐60的策略,其他方式(如zero-padding,resample)未尝试。
83 | 3. 使用Con2d-BN-ReLU代替Con2d-ReLU-BN,并添加Dropout层。
84 | 4. 将输入特征进行分组,分别经过一个同样结构(但不共享参数)的卷积模块。这样可以使卷积核更有针对性提取各组特征内部的潜在规律,并且往往可以使用更少的卷积核得到更好的效果。
85 | 5. 使用了两个辅助分类场景与动作的loss(交叉熵损失),因此总共有3个loss,同时对于每一个loss内的label设置权重。
86 | 6. 使用MixUp数据扩增,同时为了防止潜在的问题,仅在相同label内进行数据扩增(可视化后发现比如递手机之类的动作可能只有一半的采样点有实际意义,所以认为全数据的mixup可能产生非常多的错误标签)
87 | 7. 决赛过程中,为了进一步强化5的效果,对模型结构做了进一步修改,即采用两个独立的卷积模块(同样结构,但不共享参数)分别对场景与动作进行建模,这一操作帮助我们将单模最高由77提升至78。
88 | 
89 | 8. 在初赛过程中一个有意思的操作是:将原始特征标准化之后,过fft变换到频域后,训练上述模型,可以取得76左右的成绩(初赛线上),与时域数据训练的模型进行集成,有5个千分位的提升,这一操作在决赛中并不奏效(效率太短,过fft效果不好)。
90 | 9. 初赛复赛均使用Adam优化器(tensorflow.keras.optimizers.Adam默认参数),初赛为五折交叉验证,复赛由于事件原因使用三折交叉验证
91 |
92 |
93 |
94 | ### LSTM
95 |
96 | 
97 | LSTM模型,采用与CNN2d相同的分组提取的思路,LSTM模块中添加LayerNormalization与DropOut层可以明显提升模型线上效果(初赛),SelfAttentionLayer收益不大,另外,需要对数据预先进行标准化处理(即对原始DataFrame按列标准化),否则模型很难收敛到很好的效果。决赛沿用了与初赛相同的模型,复赛线上77。
98 |
99 | ### LightGBM
100 |
101 | 作为梯度提升模型永远的神,LightGBM模型结合统计特征对时序进行建模具有很强的可解释性及出色的泛化能力。在决赛GPU资源有限的情况下,充分利用CPU资源不得不说是一个非常好的发力点。
102 | 在初赛过程 LightGB的表现并不抢眼(73左右),而在决赛数据中却大放异彩(768)。我们考虑可能的原因如下:
103 |
104 | 1. 决赛数据量是初赛的两倍,更多的数据帮助LightGBM模型获得了更好的泛化效果
105 | 2. 提却短序列的统计特征往往比提却长序列的统计特征损失的信息更少,而决赛的序列长度偏短,所以较初赛表现更佳
106 | 3. 较NN而言,树模型的短序列建模能力不见得差。
107 |
108 |
109 | 整个**决赛**对该模型的调试过程如下:
110 |
111 | 1. 跑通初赛开源的Baseline(基于原始特征构造统计特征('min', 'max', 'mean', 'median', 'std', 'skew')作为对序列的特征表示),线上72
112 | 2. 添加了一组时序的特征,并基于此构建与1相同的统计特征,线上73
113 | 3. 添加一组华(暴)丽(力)优(全)雅(面)的统计特征(如下),线上75
114 |
115 | > ['min', 'max', 'mean','sum','median',"skew",'kurt', 'mad','std','var','q10', 'q20', 'q25', 'q30', 'q40', 'q60', 'q70', 'q75', 'q80', 'q90','entropy_values',"range_value","root_mean_square"]
116 |
117 | 4. 基于原始特征及物理知识、生活经验等构建了一组交互特征(同Conv2d中的特征),线上768
118 |
119 | 得益于树模型与NN的模型差异,我们将此树模型集成收益高到2个百以上,具体请参考下一小节。
120 |
121 | ### 集成
122 |
123 | 赛后了解到很多队伍使用了blending(加权融合),有些队伍使用了stacking进行集成。
124 |
125 | 不难发现这一常识,对于序列长度较短的样本,使用LightGBM训练统计特征可以取得很好的效果,而且泛化能力较强,而序列长度较长的样本,依赖拟合能力更强的NN模型(CNN,LSTM等)来在保证充分进行特征间交互的前提下,充分提取时序特征,因此设计不同的专家模型是解决序列长度不一的好方法。需要注意的是,本文训练的模型,并不是说直接按照序列长度进行划分,直接训练不同的模型,而是分别使用全量的数据训练不同的模型(1树模型于deep模型本身可以独立的解决该问题,2更多的数据可以有效提高模型的泛化能力,所以使用全量数据训练,集成学习做专家选择)。
126 | 下面介绍我们的集成方案:
127 | 
128 |
129 | 
130 |
131 | 我们有三个模型,一个CNN2d(线上78,本小节分数皆为决赛A榜成绩),一个LSTM(线上77)一个LGB(线上768),(不难发现单模成绩并不高,)将三个模型的概率(按照stacking的模式)与sample number特征进行拼接,第二层学习器使用LGB进行集成,即可以达到813(当时现场公布的B榜成绩,对应的A榜为808),明显地可以看到较单模最高成绩有近三个百分位的提升,而很多模型比我们好的队伍,集成并没有取得很好的效果,追究原因,我觉得可以从下面几个角度跟大家分享:
132 |
133 | 1. blending,在表达能力方面较stacking的天然弱势。
134 | 2. 引导择优,(貌似在以前一些top方案中有看到类似的做法欢迎大家添加类似方案,很遗憾我再次没有找到),即在进行stacking时,在第一层模型输出的概率作为特征的同时,给第二层学习器提供一些原始特征进行参考帮助模型学习,在各种情况下,选择更有的预测分布。举一个例子,有一个模型擅长预测女性的偏好,有一个模型擅长预测男性的偏好,如果只将第一层模型的概率给第二层模型,那么他只能学到两个概率上的组合交互,而在训练第二层模型过程中,把性别作为特征加入训练,模型便可以学习到根据性别选择模型这一模式(实际上不添加的话也可以学到该模式,但是不能很好的学到)。
135 |
136 | > 在举个例子,某mtm想解决一个问题,于是找来一个数学家,一个历史学家,一个实验物理学家,一个经济学家,然后你有一个问题,并且你知道它属于哪个领域。
137 | > 使用blending,就是按照偏好给各个科学家(学者)权重(所有样本统一的权重,而不能case by case),然后让他们投票,自然不能很好地利用术业有专攻这一先验知识。
138 | > 另一方面,知道该问题是哪个领域非常关键,这解决影响mtm偏好于听取哪位学者的意见。
139 |
140 | ## 总结感悟
141 |
142 | 看似侥幸的胜利,实际上我们花了好多心思,非常感谢队友的努力,感谢初赛期间MTM-zhangqibot提供的机器。在成为MTM的道路上,有一群志同道合的小伙伴,真的是一件很幸运的事情。在比赛的过程,有幸结识了很多大佬,见识了他们比模型还deep的脑洞,非常开心。希望以后还可以在赛场上正面battle。
143 |
144 |
145 | ## 一些可以探索的点/我们还未尝试
146 |
147 | 1. 数据是否需要标准化,在CNN网络的实现中我们发现数据标准化可以极大提高线下分数,但是并不能在线上获得更好的分数,Question:具有实际物理意义的数据该如何进行预处理(但是还是有很多队伍用到了标准化,可能就是个看各自的优化的问题了).
148 | 2. 数据扩增方式:scipy.signal.resample 确实很好用,我们尝试了线性插值但是效果并不好,但实际上可视化可以发现scipy.signal.resample后的数据与原始数据还是存在一定差异的。
149 | 3. 数据扩增方式:Windows warping的数据扩增方法直觉上很适合该类时间序列数据( [参考文章](https://stats.stackexchange.com/questions/320952/data-augmentation-strategies-for-time-series-forecasting) );此外,原始数据抖动较大,直接在原始数据上增加的噪声的话噪声尺度较为难把握(太小没效果,太大掩盖了原有信息)
150 | 4. **我们分享时候存在口误,我们使用的数据扩增是CutMix而不是MixUp**
151 | 5. 初赛有同学提及到可以进行坐标系转换(手机坐标系--》世界坐标系),经过查阅资料后发现仅通过加速度计数据并不可以将手机坐标系转换为世界坐标系(需要磁力计的数据),但是可以获取到pitch和roll([参考文章](https://medium.com/analytics-vidhya/exploring-data-acquisition-and-trajectory-tracking-with-android-devices-and-python-9fdef38f25ee))
152 | 6. 据悉更多折的交叉验证可以提高分数(比如说二十折)
153 | 7. LightGBM单纯增加特征,改训练参数初赛可以达到0.72~,使用rolling后在初赛可以达到0.73+。可以推测复赛如果使用rolling可以达到更高的分数,但是实现的时候记得别标签泄露了。
154 | 8. LightGBM中使用的一些有用特征也可以加入到NN中,比如我们发现的"xy","xy_g","g"
155 | 9. thetax, thetay, thetaz 的构建思路来源于[参考文章](https://blog.csdn.net/zhaoyuaiweide/article/details/70756387?utm_medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-4.nonecase&depth_1-utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-4.nonecase),此外感谢 @拾柒 的指正,实际比赛中我们使用的公式存在问题(计算手机倾角时应该去掉加速度的影响),应该修正为
156 | ```python
157 | df['g_x']=df['acc_xg']-df['acc_x']
158 | df['g_y']=df['acc_yg']-df['acc_y']
159 | df['g_z']=df['acc_zg']-df['acc_z']
160 |
161 | df['thetax']=np.arctan(df.g_x/
162 | np.sqrt(df.g_y*df.g_y+df.g_z*df.g_z))*180/np.pi
163 | df['thetay']=np.arctan(df.g_y/
164 | np.sqrt(df.g_x*df.g_x+df.g_z*df.g_z))*180/np.pi
165 | df['thetaz']=np.arctan(df.g_z/
166 | np.sqrt(df.g_x*df.g_x+df.g_y*df.g_y))*180/np.pi
167 |
168 | ```
169 |
170 | ## 期待
171 |
172 | 1. 期待大家的优秀方案,比如说阿sir的word2vec的方案,买米当卡单模803(初赛)的方案,以及otto inputLayer添加dropoutLayer的方案等等等等。
173 |
174 | ## 开源说明
175 |
176 | | | |
177 | | :---------: | :------: |
178 | | Preliminary | 初赛代码 |
179 | | Final | 复赛代码 |
180 |
181 | - 由于主办方要求,无法提供原始数据
182 |
183 |
184 | ## 参考
185 |
186 | 1. [OTTO开源](https://mp.weixin.qq.com/s/r7Ai8FVSPRB71PVghYk75A)
187 | 2. [LGB695开源](https://www.kesci.com/home/project/5eeaef66e5f796002c2be1c8)
188 |
189 |
190 |
191 |
192 |
193 |
--------------------------------------------------------------------------------
/Preliminary/PKL/0721_conv2_2_net_oof_comm_nn0.84665/conv2d-avepooling_fc2-add_feature-Copy1.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # In[2]:
5 |
6 |
7 | import numpy as np
8 | import pandas as pd
9 | # 选择比较好的模型
10 | # import seaborn as sns
11 |
12 | # import matplotlib.pyplot as plt
13 | from tqdm import tqdm
14 | from scipy.signal import resample
15 | from tensorflow.keras import layers
16 | from tensorflow.keras.layers import *
17 | from tensorflow.keras.models import Model
18 | from tensorflow.keras.optimizers import Adam
19 | from tensorflow.keras.utils import to_categorical
20 | from sklearn.model_selection import StratifiedKFold
21 | from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
22 | import os
23 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
24 |
25 | from sklearn.preprocessing import StandardScaler,MinMaxScaler
26 |
27 | os.environ["CUDA_VISIBLE_DEVICES"] = "0"
28 |
29 | def acc_combo(y, y_pred):
30 | # 数值ID与行为编码的对应关系
31 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
32 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
33 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
34 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
35 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
36 | # 将行为ID转为编码
37 | code_y, code_y_pred = mapping[y], mapping[y_pred]
38 | if code_y == code_y_pred: #编码完全相同得分1.0
39 | return 1.0
40 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
41 | return 1.0/7
42 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
43 | return 1.0/3
44 | else:
45 | return 0.0
46 |
47 |
48 | sample_num = 60
49 |
50 |
51 | # In[3]:
52 |
53 |
54 | root_path = '../../data/'
55 | train = pd.read_csv(root_path+'sensor_train.csv')
56 | test = pd.read_csv(root_path+'sensor_test.csv')
57 | sub = pd.read_csv(root_path+'提交结果示例.csv')
58 | y = train.groupby('fragment_id')['behavior_id'].min()
59 |
60 |
61 | # In[4]:
62 |
63 |
64 | def add_features(df):
65 | print(df.columns)
66 | df['acc'] = (df.acc_x ** 2 + df.acc_y ** 2 + df.acc_z ** 2) ** .5
67 | df['accg'] = (df.acc_xg ** 2 + df.acc_yg ** 2 + df.acc_zg ** 2) ** .5
68 | df['thetax']=np.arctan(df.acc_xg/
69 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_zg*df.acc_zg))*180/np.pi
70 | df['thetay']=np.arctan(df.acc_yg/
71 | np.sqrt(df.acc_xg*df.acc_xg+df.acc_zg*df.acc_zg))*180/np.pi
72 | df['thetaz']=np.arctan(df.acc_zg/
73 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_xg*df.acc_xg))*180/np.pi
74 |
75 | df['xy'] = (df['acc_x'] ** 2 + df['acc_y'] ** 2) ** 0.5
76 | df['xy_g'] = (df['acc_xg'] ** 2 + df['acc_yg'] ** 2) ** 0.5
77 |
78 | df['g'] = ((df["acc_x"] - df["acc_xg"]) ** 2 +
79 | (df["acc_y"] - df["acc_yg"]) ** 2 + (df["acc_z"] - df["acc_zg"]) ** 2) ** 0.5
80 |
81 | print(df.columns)
82 | return df
83 |
84 |
85 | # In[5]:
86 |
87 |
88 | train=add_features(train)
89 | test=add_features(test)
90 |
91 |
92 | # In[6]:
93 |
94 |
95 | group1 = [x for x in train.columns if x not in ['fragment_id', 'time_point','behavior_id']]
96 | group1
97 |
98 |
99 | # In[7]:
100 |
101 |
102 | FEATURE_NUM=14
103 |
104 |
105 | # In[8]:
106 |
107 |
108 |
109 | x = np.zeros((7292, sample_num, FEATURE_NUM, 1))
110 | t = np.zeros((7500, sample_num, FEATURE_NUM, 1))
111 |
112 |
113 | # In[9]:
114 |
115 |
116 |
117 | train = train[['fragment_id', 'time_point', 'behavior_id']+group1]
118 | test = test[['fragment_id', 'time_point']+group1]
119 | print(train.columns)
120 |
121 | for i in tqdm(range(7292)):
122 | tmp = train[train.fragment_id == i][:sample_num]
123 | x[i,:,:,0] = resample(tmp.drop(['fragment_id', 'time_point', 'behavior_id'],
124 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0].reshape(sample_num,FEATURE_NUM)
125 | for i in tqdm(range(7500)):
126 | tmp = test[test.fragment_id == i][:sample_num]
127 | t[i,:,:,0] = resample(tmp.drop(['fragment_id', 'time_point'],
128 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0].reshape(sample_num,FEATURE_NUM)
129 |
130 |
131 |
132 |
133 |
134 | # In[10]:
135 |
136 |
137 | def ConvBNRelu(X,filters,kernal_size=(3,3)):
138 | X = Conv2D(filters=filters,
139 | kernel_size=kernal_size,
140 | # activation='relu',
141 | use_bias=False,
142 | padding='same')(X)
143 | X = BatchNormalization()(X)
144 | X = Activation('relu')(X)
145 | return X
146 |
147 |
148 | def ConvRelu(X,filters,kernal_size=(3,3)):
149 | X = Conv2D(filters=filters,
150 | kernel_size=kernal_size,
151 | activation='relu',
152 | use_bias=False,
153 | padding='same')(X)
154 | return X
155 |
156 |
157 | def squeeze_excitation_layer(x, out_dim,ratio=8):
158 | '''
159 | SE module performs inter-channel weighting.
160 | '''
161 | squeeze = GlobalAveragePooling2D()(x)
162 |
163 | excitation = Dense(units=out_dim // ratio)(squeeze)
164 | excitation = Activation('relu')(excitation)
165 | excitation = Dense(units=out_dim)(excitation)
166 | excitation = Activation('sigmoid')(excitation)
167 | excitation = Reshape((1,1,out_dim))(excitation)
168 | scale = multiply([x,excitation])
169 | return scale
170 |
171 | # def SE_Residual(X):
172 | # A =
173 | # X = squeeze_excitation_layer(X,128)
174 | # X = Add()([X,A])
175 |
176 |
177 | def lenet5(input):
178 | A = ConvBNRelu(input,64,kernal_size=(3,3))
179 | # B = ConvBNRelu(input,16,kernal_size=(5,1))
180 | # C = ConvBNRelu(input,16,kernal_size=(7,1))
181 | # ABC = layers.Concatenate()([A,B,C])
182 | X = ConvBNRelu(A,128)
183 | # X = squeeze_excitation_layer(X,128)
184 | X = Dropout(0.2)(X)
185 |
186 | X = AveragePooling2D()(X)
187 |
188 | X = ConvBNRelu(X,256)
189 | X = Dropout(0.3)(X)
190 | # X = squeeze_excitation_layer(X,256)
191 | X = ConvBNRelu(X,512)
192 | X = Dropout(0.5)(X)
193 | # X = squeeze_excitation_layer(X,512)
194 | # X = GlobalMaxPooling2D()(X)
195 | X = GlobalAveragePooling2D()(X)
196 |
197 | # X = BatchNormalization()(X)
198 | return X
199 | import tensorflow as tf
200 | def Net(sample_num):
201 | input1 = Input(shape=(sample_num, FEATURE_NUM, 1))
202 | part = tf.split(input1,axis=2, num_or_size_splits = [6, 2, 6])
203 | # res = tf.split(c, axis = 3, num_or_size_splits = [2, 2, 4])
204 |
205 |
206 | X1 = Concatenate(axis=-2)([part[0],part[1]])
207 | X1 = lenet5(X1)
208 | X1 = BatchNormalization()(X1)
209 | X1 = Dense(128, activation='relu')(X1)
210 | X1 = BatchNormalization()(X1)
211 | X1 = Dropout(0.2)(X1)
212 |
213 | X2 = Concatenate(axis=-2)([part[0],part[2]])
214 | X2 = lenet5(X2)
215 | X2 = BatchNormalization()(X2)
216 | # X = Dense(512, activation='relu')(X)
217 | # X = BatchNormalization()(X)
218 | X2 = Dense(128, activation='relu')(X2)
219 | X2 = BatchNormalization()(X2)
220 | X2 = Dropout(0.2)(X2)
221 |
222 | X = Concatenate(axis=-1)([X1,X2])
223 |
224 | # X = Dense(19)(X)
225 |
226 | O = Dense(19, activation='softmax')(X)
227 |
228 |
229 | return Model([input1], O)
230 |
231 | model = Net(60)
232 | model.summary()
233 |
234 |
235 | # In[11]:
236 |
237 |
238 | # [:,:,:,[1]]
239 | train = x
240 | test = t
241 |
242 |
243 | def label_smooth(y,rate=0.05):
244 | y = y * (1 - rate)
245 | y = y + rate / 19
246 | return y
247 |
248 | fold_num=5
249 | kfold = StratifiedKFold(fold_num,random_state=42,shuffle=True)
250 | proba_t = np.zeros((7500, 19))
251 | proba_oof = np.zeros((7292,19))
252 |
253 | oof_score = []
254 | oof_comm = []
255 | history = []
256 | for fold, (xx, yy) in enumerate(kfold.split(train, y)):
257 | y_ = to_categorical(y, num_classes=19)
258 | y_ = label_smooth(y_)
259 | model = Net(60)
260 | model.summary()
261 | model.compile(loss='categorical_crossentropy',
262 | optimizer=Adam(),
263 | metrics=["acc"])#'',localscore
264 | plateau = ReduceLROnPlateau(monitor="acc",
265 | verbose=1,
266 | mode='max',
267 | factor=0.5,
268 | patience=18)
269 | early_stopping = EarlyStopping(monitor="val_acc",
270 | verbose=1,
271 | mode='max',
272 | patience=60)
273 |
274 | checkpoint = ModelCheckpoint(f'Conv2dfold{fold}.h5',
275 | monitor="val_acc",
276 | verbose=0,
277 | mode='max',
278 | save_best_only=True)
279 |
280 | train_res = model.fit(train[xx], y_[xx],
281 | epochs=1000, ##############################################33
282 | batch_size=32,
283 | verbose=1,
284 | shuffle=True,
285 | validation_data=(train[yy], y_[yy],),
286 | callbacks=[plateau, early_stopping, checkpoint])
287 | history.append(train_res)
288 |
289 | model.load_weights(f'Conv2dfold{fold}.h5')
290 | proba_t += model.predict(test, verbose=0, batch_size=1024) / fold_num
291 | proba_oof[yy] += model.predict(train[yy],verbose=0,batch_size=1024)
292 |
293 | oof_y = np.argmax(proba_oof[yy], axis=1)
294 | acc = round(accuracy_score(y[yy], oof_y),3)
295 | print(acc)
296 | oof_score.append(acc)
297 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(y[yy], oof_y)) / oof_y.shape[0]
298 | oof_comm.append(scores)
299 | print(round(scores, 5))
300 |
301 |
302 | # In[21]:
303 |
304 |
305 | for index,i in enumerate(oof_comm):
306 | print(index,i,oof_score[index])
307 |
308 | oof_dict = {
309 | "oof":proba_oof,
310 | "test":proba_t,
311 | "acc":oof_comm,
312 | }
313 | import joblib
314 | joblib.dump(oof_dict,"0721_conv2_2_net_oof_comm_%.5f_dict.pkl"% np.mean(oof_comm))
315 |
316 |
317 | # In[ ]:
318 |
319 |
320 |
321 |
322 |
323 | # In[23]:
324 |
325 |
326 | # import seaborn as sns
327 | # import matplotlib.pyplot as plt
328 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
329 |
330 | def acc_combo(y, y_pred):
331 | # 数值ID与行为编码的对应关系
332 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
333 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
334 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
335 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
336 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
337 | # 将行为ID转为编码
338 | code_y, code_y_pred = mapping[y], mapping[y_pred]
339 | if code_y == code_y_pred: #编码完全相同得分1.0
340 | return 1.0
341 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
342 | return 1.0/7
343 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
344 | return 1.0/3
345 | else:
346 | return 0.0
347 |
348 | train_y = y
349 | labels = np.argmax(proba_t, axis=1)
350 | oof_y = np.argmax(proba_oof, axis=1)
351 | print(round(accuracy_score(train_y, oof_y), 5))
352 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(train_y, oof_y)) / oof_y.shape[0]
353 | print(round(scores, 5))
354 | data_path = '../../data/'
355 | sub = pd.read_csv(data_path+'提交结果示例.csv')
356 | sub['behavior_id'] = labels
357 |
358 | vc = pd.Series(train_y).value_counts().sort_index()
359 | # sns.barplot(vc.index, vc.values)
360 | # plt.show()
361 |
362 | vc = pd.Series(oof_y).value_counts().sort_index()
363 | # sns.barplot(vc.index, vc.values)
364 | # plt.show()
365 |
366 | vc = sub['behavior_id'].value_counts().sort_index()
367 | # sns.barplot(vc.index, vc.values)
368 | # plt.show()
369 | sub.to_csv('0721_conv2_2_net_oof_comm_nn%.5f.csv' % scores, index=False)
370 | sub.info()
371 |
--------------------------------------------------------------------------------
/Preliminary/PKL/0725_conv2_2_net_weight_comm_0.85568/conv2d-avepooling_fc2-add_feature_template-weight.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # fit 里面设置权重
5 |
6 | # In[1]:
7 |
8 |
9 | seed = 0
10 | import random
11 | import tensorflow as tf
12 | import numpy as np
13 | import os
14 | random.seed(seed),
15 | np.random.seed(seed)
16 | tf.random.set_seed(seed)
17 | os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
18 | os.environ["CUDA_VISIBLE_DEVICES"] = '0'
19 | os.environ["PYTHONHASHSEED"] = str(seed)
20 |
21 | os.environ['TF_DETERMINISTIC_OPS'] = '1'
22 |
23 |
24 | # In[2]:
25 |
26 |
27 | import numpy as np
28 | import pandas as pd
29 | # 选择比较好的模型
30 | # import seaborn as sns
31 |
32 | # import matplotlib.pyplot as plt
33 | from tqdm import tqdm
34 | from scipy.signal import resample
35 | from tensorflow.keras import layers
36 | from tensorflow.keras.layers import *
37 | from tensorflow.keras.models import Model
38 | from tensorflow.keras.optimizers import Adam
39 | from tensorflow.keras.utils import to_categorical
40 | from sklearn.model_selection import StratifiedKFold
41 | from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
42 | import os
43 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
44 |
45 | from sklearn.preprocessing import StandardScaler,MinMaxScaler
46 | # %load_ext autoreload
47 | # %autoreload 2
48 |
49 |
50 | def acc_combo(y, y_pred):
51 | # 数值ID与行为编码的对应关系
52 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
53 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
54 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
55 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
56 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
57 | # 将行为ID转为编码
58 | code_y, code_y_pred = mapping[y], mapping[y_pred]
59 | if code_y == code_y_pred: #编码完全相同得分1.0
60 | return 1.0
61 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
62 | return 1.0/7
63 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
64 | return 1.0/3
65 | else:
66 | return 0.0
67 |
68 |
69 | sample_num = 60
70 |
71 |
72 |
73 | # In[5]:
74 |
75 |
76 | root_path = '../../data/'
77 | train = pd.read_csv(root_path+'sensor_train.csv')
78 | test = pd.read_csv(root_path+'sensor_test.csv')
79 | sub = pd.read_csv(root_path+'提交结果示例.csv')
80 | y = train.groupby('fragment_id')['behavior_id'].min()
81 |
82 |
83 | # In[6]:
84 |
85 |
86 | def add_features(df):
87 | print(df.columns)
88 | df['acc'] = (df.acc_x ** 2 + df.acc_y ** 2 + df.acc_z ** 2) ** .5
89 | df['accg'] = (df.acc_xg ** 2 + df.acc_yg ** 2 + df.acc_zg ** 2) ** .5
90 | df['thetax']=np.arctan(df.acc_xg/
91 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_zg*df.acc_zg))*180/np.pi
92 | df['thetay']=np.arctan(df.acc_yg/
93 | np.sqrt(df.acc_xg*df.acc_xg+df.acc_zg*df.acc_zg))*180/np.pi
94 | df['thetaz']=np.arctan(df.acc_zg/
95 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_xg*df.acc_xg))*180/np.pi
96 |
97 | df['xy'] = (df['acc_x'] ** 2 + df['acc_y'] ** 2) ** 0.5
98 | df['xy_g'] = (df['acc_xg'] ** 2 + df['acc_yg'] ** 2) ** 0.5
99 |
100 | df['g'] = ((df["acc_x"] - df["acc_xg"]) ** 2 +
101 | (df["acc_y"] - df["acc_yg"]) ** 2 + (df["acc_z"] - df["acc_zg"]) ** 2) ** 0.5
102 |
103 | print(df.columns)
104 | return df
105 |
106 |
107 | # In[7]:
108 |
109 |
110 | train=add_features(train)
111 | test=add_features(test)
112 |
113 |
114 | # In[8]:
115 |
116 |
117 | group1 = [x for x in train.columns if x not in ['fragment_id', 'time_point','behavior_id']]
118 | group1
119 |
120 |
121 | # In[9]:
122 |
123 |
124 | FEATURE_NUM=14
125 |
126 |
127 | # In[10]:
128 |
129 |
130 |
131 | x = np.zeros((7292, sample_num, FEATURE_NUM, 1))
132 | t = np.zeros((7500, sample_num, FEATURE_NUM, 1))
133 |
134 |
135 | # In[11]:
136 |
137 |
138 |
139 | train = train[['fragment_id', 'time_point', 'behavior_id']+group1]
140 | test = test[['fragment_id', 'time_point']+group1]
141 | print(train.columns)
142 |
143 | for i in tqdm(range(7292)):
144 | tmp = train[train.fragment_id == i][:sample_num]
145 | x[i,:,:,0] = resample(tmp.drop(['fragment_id', 'time_point', 'behavior_id'],
146 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0].reshape(sample_num,FEATURE_NUM)
147 | for i in tqdm(range(7500)):
148 | tmp = test[test.fragment_id == i][:sample_num]
149 | t[i,:,:,0] = resample(tmp.drop(['fragment_id', 'time_point'],
150 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0].reshape(sample_num,FEATURE_NUM)
151 |
152 |
153 |
154 |
155 |
156 | # In[12]:
157 |
158 |
159 | def ConvBNRelu(X,filters,kernal_size=(3,3)):
160 | X = Conv2D(filters=filters,
161 | kernel_size=kernal_size,
162 | # activation='relu',
163 | use_bias=False,
164 | padding='same')(X)
165 | X = BatchNormalization()(X)
166 | X = Activation('relu')(X)
167 | return X
168 |
169 |
170 | def ConvRelu(X,filters,kernal_size=(3,3)):
171 | X = Conv2D(filters=filters,
172 | kernel_size=kernal_size,
173 | activation='relu',
174 | use_bias=False,
175 | padding='same')(X)
176 | return X
177 |
178 |
179 | def squeeze_excitation_layer(x, out_dim,ratio=8):
180 | '''
181 | SE module performs inter-channel weighting.
182 | '''
183 | squeeze = GlobalAveragePooling2D()(x)
184 |
185 | excitation = Dense(units=out_dim // ratio)(squeeze)
186 | excitation = Activation('relu')(excitation)
187 | excitation = Dense(units=out_dim)(excitation)
188 | excitation = Activation('sigmoid')(excitation)
189 | excitation = Reshape((1,1,out_dim))(excitation)
190 | scale = multiply([x,excitation])
191 | return scale
192 |
193 | # def SE_Residual(X):
194 | # A =
195 | # X = squeeze_excitation_layer(X,128)
196 | # X = Add()([X,A])
197 |
198 |
199 | def lenet5(input):
200 | A = ConvBNRelu(input,64,kernal_size=(3,3))
201 | # B = ConvBNRelu(input,16,kernal_size=(5,1))
202 | # C = ConvBNRelu(input,16,kernal_size=(7,1))
203 | # ABC = layers.Concatenate()([A,B,C])
204 | X = ConvBNRelu(A,128)
205 | # X = squeeze_excitation_layer(X,128)
206 | X = Dropout(0.2)(X)
207 |
208 | X = AveragePooling2D()(X)
209 |
210 | X = ConvBNRelu(X,256)
211 | X = Dropout(0.3)(X)
212 | # X = squeeze_excitation_layer(X,256)
213 | X = ConvBNRelu(X,512)
214 | X = Dropout(0.5)(X)
215 | # X = squeeze_excitation_layer(X,512)
216 | # X = GlobalMaxPooling2D()(X)
217 | X = GlobalAveragePooling2D()(X)
218 |
219 | # X = BatchNormalization()(X)
220 | return X
221 | import tensorflow as tf
222 | def Net(sample_num):
223 | input1 = Input(shape=(sample_num, FEATURE_NUM, 1))
224 | part = tf.split(input1,axis=2, num_or_size_splits = [6, 2, 6])
225 | # res = tf.split(c, axis = 3, num_or_size_splits = [2, 2, 4])
226 |
227 |
228 | X1 = Concatenate(axis=-2)([part[0],part[1]])
229 | X1 = lenet5(X1)
230 | X1 = BatchNormalization()(X1)
231 | X1 = Dense(128, activation='relu')(X1)
232 | X1 = BatchNormalization()(X1)
233 | X1 = Dropout(0.2)(X1)
234 |
235 | X2 = Concatenate(axis=-2)([part[0],part[2]])
236 | X2 = lenet5(X2)
237 | X2 = BatchNormalization()(X2)
238 | # X = Dense(512, activation='relu')(X)
239 | # X = BatchNormalization()(X)
240 | X2 = Dense(128, activation='relu')(X2)
241 | X2 = BatchNormalization()(X2)
242 | X2 = Dropout(0.2)(X2)
243 |
244 | X = Concatenate(axis=-1)([X1,X2])
245 |
246 | # X = Dense(19)(X)
247 |
248 | O = Dense(19, activation='softmax')(X)
249 |
250 |
251 | return Model([input1], O)
252 |
253 | model = Net(60)
254 | model.summary()
255 |
256 |
257 | # In[18]:
258 |
259 |
260 | from sklearn.utils.class_weight import compute_class_weight
261 | # y_train_weight = compute_sample_weight("balanced", train['behavior_id'])
262 | classweights=compute_class_weight("balanced",np.array(range(19)), train['behavior_id'])
263 | classweights=pd.DataFrame(classweights)[0].to_dict()
264 | classweights
265 |
266 |
267 | # In[19]:
268 |
269 |
270 | # [:,:,:,[1]]
271 | train = x
272 | test = t
273 |
274 |
275 | def label_smooth(y,rate=0.05):
276 | y = y * (1 - rate)
277 | y = y + rate / 19
278 | return y
279 |
280 | fold_num=5
281 | kfold = StratifiedKFold(fold_num,random_state=42,shuffle=True)
282 | proba_t = np.zeros((7500, 19))
283 | proba_oof = np.zeros((7292,19))
284 |
285 | oof_score = []
286 | oof_comm = []
287 | history = []
288 | for fold, (xx, yy) in enumerate(kfold.split(train, y)):
289 | y_ = to_categorical(y, num_classes=19)
290 | y_ = label_smooth(y_)
291 | model = Net(60)
292 | model.summary()
293 | model.compile(loss='categorical_crossentropy',
294 | optimizer=Adam(),
295 | metrics=["acc"])#'',localscore
296 | plateau = ReduceLROnPlateau(monitor="acc",
297 | verbose=1,
298 | mode='max',
299 | factor=0.5,
300 | patience=18)
301 | early_stopping = EarlyStopping(monitor="val_acc",
302 | verbose=1,
303 | mode='max',
304 | patience=60)
305 |
306 | checkpoint = ModelCheckpoint(f'Conv2dfold{fold}.h5',
307 | monitor="val_acc",
308 | verbose=0,
309 | mode='max',
310 | save_best_only=True)
311 |
312 | train_res = model.fit(train[xx], y_[xx],
313 | epochs=1000,#############################1000
314 | batch_size=32,
315 | verbose=1,
316 | shuffle=True,
317 | validation_data=(train[yy], y_[yy],),
318 | callbacks=[plateau, early_stopping, checkpoint],
319 | class_weight=classweights)
320 | history.append(train_res)
321 |
322 |
323 | model.load_weights(f'Conv2dfold{fold}.h5')
324 | proba_t += model.predict(test, verbose=0, batch_size=1024) / fold_num
325 | proba_oof[yy] += model.predict(train[yy],verbose=0,batch_size=1024)
326 |
327 | oof_y = np.argmax(proba_oof[yy], axis=1)
328 | acc = round(accuracy_score(y[yy], oof_y),3)
329 | print(acc)
330 | oof_score.append(acc)
331 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(y[yy], oof_y)) / oof_y.shape[0]
332 | oof_comm.append(scores)
333 | print(round(scores, 5))
334 |
335 |
336 | for index,i in enumerate(oof_comm):
337 | print(index,i,oof_score[index])
338 |
339 | oof_dict = {
340 | "oof":proba_oof,
341 | "test":proba_t,
342 | "acc":oof_comm,
343 | }
344 | import joblib
345 | joblib.dump(oof_dict,"0725_conv2_2_net_weight_comm_%.5f_dict.pkl"% np.mean(oof_comm))
346 |
347 |
348 | # In[ ]:
349 |
350 |
351 |
352 |
353 |
354 | # In[50]:
355 |
356 |
357 | # import seaborn as sns
358 | # import matplotlib.pyplot as plt
359 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
360 |
361 | def acc_combo(y, y_pred):
362 | # 数值ID与行为编码的对应关系
363 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
364 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
365 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
366 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
367 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
368 | # 将行为ID转为编码
369 | code_y, code_y_pred = mapping[y], mapping[y_pred]
370 | if code_y == code_y_pred: #编码完全相同得分1.0
371 | return 1.0
372 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
373 | return 1.0/7
374 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
375 | return 1.0/3
376 | else:
377 | return 0.0
378 |
379 | train_y = y
380 | labels = np.argmax(proba_t, axis=1)
381 | oof_y = np.argmax(proba_oof, axis=1)
382 | print(round(accuracy_score(train_y, oof_y), 5))
383 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(train_y, oof_y)) / oof_y.shape[0]
384 | print(round(scores, 5))
385 | data_path = '../../data/'
386 | sub = pd.read_csv(data_path+'提交结果示例.csv')
387 | sub['behavior_id'] = labels
388 |
389 | vc = pd.Series(train_y).value_counts().sort_index()
390 | # sns.barplot(vc.index, vc.values)
391 | # plt.show()
392 |
393 | vc = pd.Series(oof_y).value_counts().sort_index()
394 | # sns.barplot(vc.index, vc.values)
395 | # plt.show()
396 |
397 | vc = sub['behavior_id'].value_counts().sort_index()
398 | # sns.barplot(vc.index, vc.values)
399 | # plt.show()
400 | sub.to_csv('0721_conv2_2_net_oof_comm_nn%.5f.csv' % scores, index=False)
401 | sub.info()
402 |
403 |
404 | # In[51]:
405 |
406 |
--------------------------------------------------------------------------------
/Preliminary/PKL/0728_08648_online792/conv2d-avepooling_fc2-add_feature_template-multiloss-Copy1.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # In[1]:
5 |
6 |
7 | seed = 0
8 | import random
9 | import numpy as np
10 | import tensorflow as tf
11 | import os
12 | random.seed(seed)
13 | np.random.seed(seed)
14 | tf.random.set_seed(seed)
15 | os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
16 | os.environ["CUDA_VISIBLE_DEVICES"] = '0'
17 | os.environ["PYTHONHASHSEED"] = str(seed)
18 |
19 | os.environ['TF_DETERMINISTIC_OPS'] = '1'
20 |
21 |
22 | # In[2]:
23 |
24 |
25 | import numpy as np
26 | import pandas as pd
27 | # 选择比较好的模型
28 | # import seaborn as sns
29 |
30 | # import matplotlib.pyplot as plt
31 | from tqdm import tqdm
32 | from scipy.signal import resample
33 | from tensorflow.keras import layers
34 | from tensorflow.keras.layers import *
35 | from tensorflow.keras.models import Model
36 | from tensorflow.keras.optimizers import Adam
37 | from tensorflow.keras.utils import to_categorical
38 | from sklearn.model_selection import StratifiedKFold
39 | from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
40 | import os
41 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
42 |
43 | from sklearn.preprocessing import StandardScaler,MinMaxScaler
44 | # %load_ext autoreload
45 | # %autoreload 2
46 | # os.environ["CUDA_VISIBLE_DEVICES"] = "7"
47 |
48 | def acc_combo(y, y_pred):
49 | # 数值ID与行为编码的对应关系
50 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
51 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
52 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
53 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
54 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
55 | # 将行为ID转为编码
56 | code_y, code_y_pred = mapping[y], mapping[y_pred]
57 | if code_y == code_y_pred: #编码完全相同得分1.0
58 | return 1.0
59 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
60 | return 1.0/7
61 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
62 | return 1.0/3
63 | else:
64 | return 0.0
65 |
66 |
67 | sample_num = 60
68 |
69 |
70 | # In[4]:
71 |
72 |
73 | root_path = '../../data/'
74 | train = pd.read_csv(root_path+'sensor_train.csv')
75 | test = pd.read_csv(root_path+'sensor_test.csv')
76 | sub = pd.read_csv(root_path+'提交结果示例.csv')
77 | y = train.groupby('fragment_id')['behavior_id'].min()
78 |
79 |
80 | # In[5]:
81 |
82 |
83 | def add_features(df):
84 | print(df.columns)
85 | df['acc'] = (df.acc_x ** 2 + df.acc_y ** 2 + df.acc_z ** 2) ** .5
86 | df['accg'] = (df.acc_xg ** 2 + df.acc_yg ** 2 + df.acc_zg ** 2) ** .5
87 | df['thetax']=np.arctan(df.acc_xg/
88 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_zg*df.acc_zg))*180/np.pi
89 | df['thetay']=np.arctan(df.acc_yg/
90 | np.sqrt(df.acc_xg*df.acc_xg+df.acc_zg*df.acc_zg))*180/np.pi
91 | df['thetaz']=np.arctan(df.acc_zg/
92 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_xg*df.acc_xg))*180/np.pi
93 |
94 | df['xy'] = (df['acc_x'] ** 2 + df['acc_y'] ** 2) ** 0.5
95 | df['xy_g'] = (df['acc_xg'] ** 2 + df['acc_yg'] ** 2) ** 0.5
96 |
97 | df['g'] = ((df["acc_x"] - df["acc_xg"]) ** 2 +
98 | (df["acc_y"] - df["acc_yg"]) ** 2 + (df["acc_z"] - df["acc_zg"]) ** 2) ** 0.5
99 |
100 | print(df.columns)
101 | return df
102 |
103 |
104 | # In[6]:
105 |
106 |
107 | train=add_features(train)
108 | test=add_features(test)
109 |
110 |
111 | # In[7]:
112 |
113 |
114 | group1 = [x for x in train.columns if x not in ['fragment_id', 'time_point','behavior_id']]
115 | group1
116 |
117 |
118 | # In[8]:
119 |
120 |
121 | FEATURE_NUM=14
122 |
123 |
124 | # In[9]:
125 |
126 |
127 |
128 | x = np.zeros((7292, sample_num, FEATURE_NUM, 1))
129 | t = np.zeros((7500, sample_num, FEATURE_NUM, 1))
130 |
131 |
132 | # In[10]:
133 |
134 |
135 |
136 | train = train[['fragment_id', 'time_point', 'behavior_id']+group1]
137 | test = test[['fragment_id', 'time_point']+group1]
138 | print(train.columns)
139 |
140 | for i in tqdm(range(7292)):
141 | tmp = train[train.fragment_id == i][:sample_num]
142 | x[i,:,:,0] = resample(tmp.drop(['fragment_id', 'time_point', 'behavior_id'],
143 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0].reshape(sample_num,FEATURE_NUM)
144 | for i in tqdm(range(7500)):
145 | tmp = test[test.fragment_id == i][:sample_num]
146 | t[i,:,:,0] = resample(tmp.drop(['fragment_id', 'time_point'],
147 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0].reshape(sample_num,FEATURE_NUM)
148 |
149 |
150 |
151 |
152 |
153 | # In[11]:
154 |
155 |
156 | def ConvBNRelu(X,filters,kernal_size=(3,3)):
157 | X = Conv2D(filters=filters,
158 | kernel_size=kernal_size,
159 | # activation='relu',
160 | use_bias=False,
161 | padding='same')(X)
162 | X = BatchNormalization()(X)
163 | X = Activation('relu')(X)
164 | return X
165 |
166 |
167 | def ConvRelu(X,filters,kernal_size=(3,3)):
168 | X = Conv2D(filters=filters,
169 | kernel_size=kernal_size,
170 | activation='relu',
171 | use_bias=False,
172 | padding='same')(X)
173 | return X
174 |
175 |
176 | def squeeze_excitation_layer(x, out_dim,ratio=8):
177 | '''
178 | SE module performs inter-channel weighting.
179 | '''
180 | squeeze = GlobalAveragePooling2D()(x)
181 |
182 | excitation = Dense(units=out_dim // ratio)(squeeze)
183 | excitation = Activation('relu')(excitation)
184 | excitation = Dense(units=out_dim)(excitation)
185 | excitation = Activation('sigmoid')(excitation)
186 | excitation = Reshape((1,1,out_dim))(excitation)
187 | scale = multiply([x,excitation])
188 | return scale
189 |
190 | # def SE_Residual(X):
191 | # A =
192 | # X = squeeze_excitation_layer(X,128)
193 | # X = Add()([X,A])
194 |
195 |
196 | def lenet5(input):
197 | A = ConvBNRelu(input,64,kernal_size=(3,3))
198 | # B = ConvBNRelu(input,16,kernal_size=(5,1))
199 | # C = ConvBNRelu(input,16,kernal_size=(7,1))
200 | # ABC = layers.Concatenate()([A,B,C])
201 | X = ConvBNRelu(A,128)
202 | # X = squeeze_excitation_layer(X,128)
203 | X = Dropout(0.2)(X)
204 |
205 | X = AveragePooling2D()(X)
206 |
207 | X = ConvBNRelu(X,256)
208 | X = Dropout(0.3)(X)
209 | # X = squeeze_excitation_layer(X,256)
210 | X = ConvBNRelu(X,512)
211 | X = Dropout(0.5)(X)
212 | # X = squeeze_excitation_layer(X,512)
213 | # X = GlobalMaxPooling2D()(X)
214 | X = GlobalAveragePooling2D()(X)
215 |
216 | # X = BatchNormalization()(X)
217 | return X
218 | import tensorflow as tf
219 | def Net(sample_num):
220 | input1 = Input(shape=(sample_num, FEATURE_NUM, 1))
221 | part = tf.split(input1,axis=2, num_or_size_splits = [6, 2, 6])
222 | # res = tf.split(c, axis = 3, num_or_size_splits = [2, 2, 4])
223 |
224 |
225 | X1 = Concatenate(axis=-2)([part[0],part[1]])
226 | X1 = lenet5(X1)
227 | X1 = BatchNormalization()(X1)
228 | X1 = Dense(128, activation='relu')(X1)
229 | X1 = BatchNormalization()(X1)
230 | X1 = Dropout(0.2)(X1)
231 |
232 | X2 = Concatenate(axis=-2)([part[0],part[2]])
233 | X2 = lenet5(X2)
234 | X2 = BatchNormalization()(X2)
235 | # X = Dense(512, activation='relu')(X)
236 | # X = BatchNormalization()(X)
237 | X2 = Dense(128, activation='relu')(X2)
238 | X2 = BatchNormalization()(X2)
239 | X2 = Dropout(0.2)(X2)
240 |
241 | X = Concatenate(axis=-1)([X1,X2])
242 |
243 | # X = Dense(256)(X)
244 |
245 | output1 = Dense(4, activation='softmax', name='4class')(X) # 大类-字母
246 | # output2 = Dense(128)(X)
247 | # output2 = Dense(64)(X)
248 | X = Dense(64)(X)
249 | output2 = Dense(7, activation='softmax', name='7class')(X) # 大类-数字
250 | # X = Dense(32)(X)
251 | # X = Concatenate(axis=-1)([X,output1,output2])
252 | X = Dense(64)(X)
253 | output3 = Dense(19, activation='softmax',name='19class')(X) #小类
254 |
255 |
256 | return Model([input1], [output1,output2,output3])
257 |
258 | model = Net(60)
259 | model.summary()
260 |
261 |
262 | # In[12]:
263 |
264 |
265 | # 两个输出
266 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
267 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
268 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
269 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
270 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
271 | # 每一个大类输出 4
272 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
273 |
274 | from sklearn.utils.class_weight import compute_class_weight
275 | # y_train_weight = compute_sample_weight("balanced", train['behavior_id'])
276 | classweights1=compute_class_weight("balanced",['A','B','C','D'], pd.read_csv(root_path+'sensor_train.csv')['behavior_id'].apply(lambda x:mapping[x][0]))
277 | classweights1=pd.DataFrame(classweights1)[0].to_dict()
278 |
279 |
280 |
281 | classweights2=compute_class_weight("balanced",list(range(7)), pd.read_csv(root_path+'sensor_train.csv')['behavior_id'].apply(lambda x:int(mapping[x][2])))
282 | classweights2=pd.DataFrame(classweights2)[0].to_dict()
283 |
284 |
285 |
286 | from sklearn.utils.class_weight import compute_class_weight
287 | # y_train_weight = compute_sample_weight("balanced", train['behavior_id'])
288 | classweights3=compute_class_weight("balanced",np.array(range(19)), pd.read_csv(root_path+'sensor_train.csv')['behavior_id'])
289 | classweights3=pd.DataFrame(classweights3)[0].to_dict()
290 | classweights1,classweights2,classweights3
291 |
292 |
293 | # In[13]:
294 |
295 |
296 | # [:,:,:,[1]]
297 | train = x
298 | test = t
299 |
300 |
301 | fold_num=5
302 | kfold = StratifiedKFold(fold_num,random_state=42,shuffle=True)
303 | proba_t = np.zeros((7500, 19))
304 | proba_oof = np.zeros((7292,19))
305 |
306 | oof_score = []
307 | oof_comm = []
308 | history = []
309 |
310 | from tensorflow.keras.losses import categorical_crossentropy
311 | def custom_loss(y_true, y_pred):
312 | return categorical_crossentropy(y_true, y_pred, label_smoothing=0.05)
313 |
314 | # 两个输出
315 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
316 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
317 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
318 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
319 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
320 | # 每一个大类输出 4
321 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
322 | y_1 = to_categorical([new_mapping[mapping[x][0]] for x in y], num_classes=4)
323 | # 每一个大类输出
324 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
325 | y_2 = to_categorical([mapping[x][2] for x in y], num_classes=7)
326 | # 每一个小类的输出 19
327 | y_3 = to_categorical(y, num_classes=19)
328 |
329 |
330 | for fold, (xx, yy) in enumerate(kfold.split(train, y)):
331 |
332 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
333 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
334 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
335 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
336 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
337 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
338 |
339 | model = Net(60)
340 | model.summary()
341 | model.compile(loss=[custom_loss,custom_loss,custom_loss],loss_weights=[3,7,21],
342 | optimizer=Adam(),
343 | metrics=["acc"])#'',localscore
344 | plateau = ReduceLROnPlateau(monitor="19class_acc",
345 | verbose=1,
346 | mode='max',
347 | factor=0.5,
348 | patience=18)
349 | early_stopping = EarlyStopping(monitor="val_19class_acc",
350 | verbose=1,
351 | mode='max',
352 | patience=60)
353 |
354 | checkpoint = ModelCheckpoint(f'Conv2d_multiloss_fold{fold}.h5',
355 | monitor="val_19class_acc",
356 | verbose=0,
357 | mode='max',
358 | save_best_only=True)
359 |
360 | train_res = model.fit(train[xx], [y_1[xx], y_2[xx], y_3[xx]],
361 | epochs=1000, #########################################3
362 | batch_size=32,
363 | verbose=1,
364 | shuffle=True,
365 | validation_data=(train[yy], [y_1[yy], y_2[yy],y_3[yy]]),
366 | callbacks=[plateau, early_stopping, checkpoint],
367 | class_weight=[classweights1,classweights2,classweights3])
368 | history.append(train_res)
369 |
370 |
371 |
372 | model.load_weights(f'Conv2d_multiloss_fold{fold}.h5')
373 | proba_t += model.predict(test, verbose=0, batch_size=1024)[2] / fold_num
374 | proba_oof[yy] += model.predict(train[yy],verbose=0,batch_size=1024) [2]
375 |
376 | oof_y = np.argmax(proba_oof[yy], axis=1)
377 | acc = round(accuracy_score(y[yy], oof_y),3)
378 | print(acc)
379 | oof_score.append(acc)
380 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(y[yy], oof_y)) / oof_y.shape[0]
381 | oof_comm.append(scores)
382 | print(round(scores, 5))
383 |
384 |
385 | # In[ ]:
386 |
387 |
388 |
389 |
390 |
391 | # In[25]:
392 |
393 |
394 | for index,i in enumerate(oof_comm):
395 | print(index,i,oof_score[index])
396 |
397 | oof_dict = {
398 | "oof":proba_oof,
399 | "test":proba_t,
400 | "acc":oof_comm,
401 | }
402 | import joblib
403 | joblib.dump(oof_dict,"0728_conv2_2_net_multiloss_%.5f_dict.pkl"% np.mean(oof_comm))
404 |
405 |
406 | # In[26]:
407 |
408 |
409 | # import seaborn as sns
410 | # import matplotlib.pyplot as plt
411 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
412 |
413 | def acc_combo(y, y_pred):
414 | # 数值ID与行为编码的对应关系
415 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
416 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
417 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
418 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
419 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
420 | # 将行为ID转为编码
421 | code_y, code_y_pred = mapping[y], mapping[y_pred]
422 | if code_y == code_y_pred: #编码完全相同得分1.0
423 | return 1.0
424 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
425 | return 1.0/7
426 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
427 | return 1.0/3
428 | else:
429 | return 0.0
430 |
431 | train_y = y
432 | labels = np.argmax(proba_t, axis=1)
433 | oof_y = np.argmax(proba_oof, axis=1)
434 | print(round(accuracy_score(train_y, oof_y), 5))
435 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(train_y, oof_y)) / oof_y.shape[0]
436 | print(round(scores, 5))
437 | data_path = '../../data/'
438 | sub = pd.read_csv(data_path+'提交结果示例.csv')
439 | sub['behavior_id'] = labels
440 |
441 | vc = pd.Series(train_y).value_counts().sort_index()
442 | # sns.barplot(vc.index, vc.values)
443 | # plt.show()
444 |
445 | vc = pd.Series(oof_y).value_counts().sort_index()
446 | # sns.barplot(vc.index, vc.values)
447 | # plt.show()
448 |
449 | vc = sub['behavior_id'].value_counts().sort_index()
450 | # sns.barplot(vc.index, vc.values)
451 | # plt.show()
452 | sub.to_csv('0728_conv2_multoloss_nn%.5f.csv' % scores, index=False)
453 | sub.info()
454 |
455 |
456 | # In[27]:
457 |
458 |
459 |
460 | # %matplotlib inline
461 | # from sklearn.metrics import confusion_matrix
462 | # import matplotlib.pyplot as plt
463 | # import numpy as np
464 |
465 | # def plot_confusion_matrix(cm,classes,title='Confusion Matrix'):
466 |
467 | # plt.figure(figsize=(12, 9), dpi=100)
468 | # np.set_printoptions(precision=2)
469 |
470 | # sns.heatmap(cm,annot=True)
471 | # plt.title(title)
472 | # plt.xticks(ticks=range(19),labels=classes)
473 | # plt.yticks(ticks=range(19),labels=classes)
474 |
475 | # plt.ylabel('Actual label')
476 | # plt.xlabel('Predict label')
477 | # plt.show()
478 |
479 | # # classes表示不同类别的名称,比如这有6个类别
480 | # num2detail_mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
481 | # 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
482 | # 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
483 | # 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
484 | # 16: 'C_2', 17: 'C_5', 18: 'C_6'}
485 |
486 | # classes = [num2detail_mapping[int(i)]for i in range(19)]
487 | # print(classes)
488 | # # 获取混淆矩阵
489 | # cm = confusion_matrix(train_y, oof_y,normalize='true')
490 | # cm = np.round(cm,2)
491 | # plot_confusion_matrix(cm,classes, title='confusion matrix')
492 |
493 |
494 | # In[ ]:
495 |
496 |
497 |
498 |
499 |
--------------------------------------------------------------------------------
/Preliminary/PKL/0730_generator_one_fifth_orig_mixup_087099/conv2d-avepooling_fc2-add_feature_template-multiloss-removedecay-generator-Copy1.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # In[5]:
5 |
6 |
7 | seed = 0
8 | import random
9 | import numpy as np
10 | import tensorflow as tf
11 | import os
12 | random.seed(seed)
13 | np.random.seed(seed)
14 | tf.random.set_seed(seed)
15 | os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
16 | os.environ["CUDA_VISIBLE_DEVICES"] = '0'
17 | os.environ["PYTHONHASHSEED"] = str(seed)
18 |
19 | os.environ['TF_DETERMINISTIC_OPS'] = '1'
20 |
21 |
22 | # In[6]:
23 |
24 |
25 | import numpy as np
26 | import pandas as pd
27 | from tqdm import tqdm
28 | from scipy.signal import resample
29 | from tensorflow.keras import layers
30 | from tensorflow.keras.layers import *
31 | from tensorflow.keras.models import Model
32 | from tensorflow.keras.optimizers import Adam
33 | from tensorflow.keras.utils import to_categorical
34 | from sklearn.model_selection import StratifiedKFold
35 | from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
36 | import os
37 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
38 |
39 | from sklearn.preprocessing import StandardScaler,MinMaxScaler
40 |
41 | def acc_combo(y, y_pred):
42 | # 数值ID与行为编码的对应关系
43 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
44 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
45 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
46 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
47 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
48 | # 将行为ID转为编码
49 | code_y, code_y_pred = mapping[y], mapping[y_pred]
50 | if code_y == code_y_pred: #编码完全相同得分1.0
51 | return 1.0
52 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
53 | return 1.0/7
54 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
55 | return 1.0/3
56 | else:
57 | return 0.0
58 |
59 |
60 | sample_num = 60
61 |
62 |
63 | # In[7]:
64 |
65 |
66 | root_path = '../../data/'
67 | train = pd.read_csv(root_path+'sensor_train.csv')
68 | test = pd.read_csv(root_path+'sensor_test.csv')
69 | sub = pd.read_csv(root_path+'提交结果示例.csv')
70 | y = train.groupby('fragment_id')['behavior_id'].min()
71 |
72 |
73 | # In[8]:
74 |
75 |
76 | def add_features(df):
77 | print(df.columns)
78 | df['acc'] = (df.acc_x ** 2 + df.acc_y ** 2 + df.acc_z ** 2) ** .5
79 | df['accg'] = (df.acc_xg ** 2 + df.acc_yg ** 2 + df.acc_zg ** 2) ** .5
80 | df['thetax']=np.arctan(df.acc_xg/
81 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_zg*df.acc_zg))*180/np.pi
82 | df['thetay']=np.arctan(df.acc_yg/
83 | np.sqrt(df.acc_xg*df.acc_xg+df.acc_zg*df.acc_zg))*180/np.pi
84 | df['thetaz']=np.arctan(df.acc_zg/
85 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_xg*df.acc_xg))*180/np.pi
86 |
87 | df['xy'] = (df['acc_x'] ** 2 + df['acc_y'] ** 2) ** 0.5
88 | df['xy_g'] = (df['acc_xg'] ** 2 + df['acc_yg'] ** 2) ** 0.5
89 |
90 | df['g'] = ((df["acc_x"] - df["acc_xg"]) ** 2 +
91 | (df["acc_y"] - df["acc_yg"]) ** 2 + (df["acc_z"] - df["acc_zg"]) ** 2) ** 0.5
92 |
93 | print(df.columns)
94 | return df
95 |
96 |
97 | # In[9]:
98 |
99 |
100 | train=add_features(train)
101 | test=add_features(test)
102 |
103 |
104 | # In[10]:
105 |
106 |
107 | group1 = [x for x in train.columns if x not in ['fragment_id', 'time_point','behavior_id']]
108 | group1
109 |
110 |
111 | # In[11]:
112 |
113 |
114 | FEATURE_NUM=14
115 |
116 |
117 | # In[13]:
118 |
119 |
120 |
121 | x = np.zeros((7292, sample_num, FEATURE_NUM, 1))
122 | t = np.zeros((7500, sample_num, FEATURE_NUM, 1))
123 |
124 |
125 | # In[14]:
126 |
127 |
128 |
129 | train = train[['fragment_id', 'time_point', 'behavior_id']+group1]
130 | test = test[['fragment_id', 'time_point']+group1]
131 | print(train.columns)
132 |
133 | for i in tqdm(range(7292)):
134 | tmp = train[train.fragment_id == i][:sample_num]
135 | x[i,:,:,0] = resample(tmp.drop(['fragment_id', 'time_point', 'behavior_id'],
136 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0].reshape(sample_num,FEATURE_NUM)
137 | for i in tqdm(range(7500)):
138 | tmp = test[test.fragment_id == i][:sample_num]
139 | t[i,:,:,0] = resample(tmp.drop(['fragment_id', 'time_point'],
140 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0].reshape(sample_num,FEATURE_NUM)
141 |
142 |
143 |
144 |
145 |
146 | # In[16]:
147 |
148 |
149 | # 一个完成了的generator
150 | def data_generator(data,label,class17label,batch_size):
151 | """
152 | data:array (7292, 60, 14, 1)
153 | label:array (7292,)
154 | class17label: series
155 | """
156 | class17label=np.asarray(class17label)
157 | length=len(data)
158 | seq_length=len(data[0])
159 | half_seq_length=int(seq_length/2)
160 |
161 | # index2label
162 | index2label=dict(zip(range(length),class17label))
163 |
164 | label2index={}
165 | # print(class17label)
166 | for i in range(length):
167 | # print(class17label[i],label2index.get(class17label[i],[]))
168 | label2index[class17label[i]]=label2index.get(class17label[i],[])
169 | label2index[class17label[i]].append(i)
170 |
171 | count=0
172 | np.random.seed(seed)# 保证结果可重复
173 |
174 | while True:
175 |
176 | if count==0 or (count + 1) * batch_size > length: # 如果是第一个或者最后一个batch
177 | count=0
178 | shuffle_index = list(range(length))
179 | np.random.shuffle(shuffle_index) ## 对索引进行打乱
180 |
181 | start = count * batch_size ## batch的起始点
182 | end = (count + 1) * batch_size ## batch的终点
183 | inds=shuffle_index[start:end]
184 |
185 | count+=1
186 |
187 | if random.choice([0,1,1]):
188 | # minxup
189 | #one specific index -> label -> all the index belong to this
190 | choice_index=[random.choice(label2index[index2label[x]]) for x in inds] # get the random choice seq(waiting for concat)
191 | # 1st 前1/2 seq_length 点原始 后1/2 seq_length 点随机
192 | res_x_orig=data[inds,:half_seq_length]
193 | res_x=data[choice_index,half_seq_length:]
194 |
195 | # print(inds)
196 | # print(data.shape,res_x_orig.shape,res_x.shape,np.concatenate((res_x_orig,res_x),axis=1).shape)
197 | yield np.concatenate((res_x_orig,res_x),axis=1), [label[0][inds],label[1][inds],label[2][inds]]
198 | else:
199 |
200 | yield data[inds],[label[0][inds],label[1][inds],label[2][inds]]
201 |
202 |
203 |
204 | count=0
205 | for a,b in data_generator(x,[y,y,y],y,32):
206 | print(a.shape,b[0].shape)
207 | count+=1
208 | if count==20:
209 | break
210 |
211 |
212 | # In[17]:
213 |
214 |
215 | def ConvBNRelu(X,filters,kernal_size=(3,3)):
216 | X = Conv2D(filters=filters,
217 | kernel_size=kernal_size,
218 | # activation='relu',
219 | use_bias=False,
220 | padding='same')(X)
221 | X = BatchNormalization()(X)
222 | X = Activation('relu')(X)
223 | return X
224 |
225 |
226 | def ConvRelu(X,filters,kernal_size=(3,3)):
227 | X = Conv2D(filters=filters,
228 | kernel_size=kernal_size,
229 | activation='relu',
230 | use_bias=False,
231 | padding='same')(X)
232 | return X
233 |
234 |
235 | def squeeze_excitation_layer(x, out_dim,ratio=8):
236 | '''
237 | SE module performs inter-channel weighting.
238 | '''
239 | squeeze = GlobalAveragePooling2D()(x)
240 |
241 | excitation = Dense(units=out_dim // ratio)(squeeze)
242 | excitation = Activation('relu')(excitation)
243 | excitation = Dense(units=out_dim)(excitation)
244 | excitation = Activation('sigmoid')(excitation)
245 | excitation = Reshape((1,1,out_dim))(excitation)
246 | scale = multiply([x,excitation])
247 | return scale
248 |
249 | # def SE_Residual(X):
250 | # A =
251 | # X = squeeze_excitation_layer(X,128)
252 | # X = Add()([X,A])
253 |
254 |
255 | def lenet5(input):
256 | A = ConvBNRelu(input,64,kernal_size=(3,3))
257 | # B = ConvBNRelu(input,16,kernal_size=(5,1))
258 | # C = ConvBNRelu(input,16,kernal_size=(7,1))
259 | # ABC = layers.Concatenate()([A,B,C])
260 | X = ConvBNRelu(A,128)
261 | # X = squeeze_excitation_layer(X,128)
262 | X = Dropout(0.2)(X)
263 |
264 | X = AveragePooling2D()(X)
265 |
266 | X = ConvBNRelu(X,256)
267 | X = Dropout(0.3)(X)
268 | # X = squeeze_excitation_layer(X,256)
269 | X = ConvBNRelu(X,512)
270 | X = Dropout(0.5)(X)
271 | # X = squeeze_excitation_layer(X,512)
272 | # X = GlobalMaxPooling2D()(X)
273 | X = GlobalAveragePooling2D()(X)
274 |
275 | # X = BatchNormalization()(X)
276 | return X
277 | import tensorflow as tf
278 | def Net(sample_num):
279 | input1 = Input(shape=(sample_num, FEATURE_NUM, 1))
280 | part = tf.split(input1,axis=2, num_or_size_splits = [6, 2, 6])
281 | # res = tf.split(c, axis = 3, num_or_size_splits = [2, 2, 4])
282 |
283 |
284 | X1 = Concatenate(axis=-2)([part[0],part[1]])
285 | X1 = lenet5(X1)
286 | X1 = BatchNormalization()(X1)
287 | X1 = Dense(128, activation='relu')(X1)
288 | X1 = BatchNormalization()(X1)
289 | X1 = Dropout(0.2)(X1)
290 |
291 | X2 = Concatenate(axis=-2)([part[0],part[2]])
292 | X2 = lenet5(X2)
293 | X2 = BatchNormalization()(X2)
294 | # X = Dense(512, activation='relu')(X)
295 | # X = BatchNormalization()(X)
296 | X2 = Dense(128, activation='relu')(X2)
297 | X2 = BatchNormalization()(X2)
298 | X2 = Dropout(0.2)(X2)
299 |
300 | X = Concatenate(axis=-1)([X1,X2])
301 |
302 | # X = Dense(256)(X)
303 |
304 | output1 = Dense(4, activation='softmax', name='4class')(X) # 大类-字母
305 | # output2 = Dense(128)(X)
306 | # output2 = Dense(64)(X)
307 | X = Dense(64)(X)
308 | output2 = Dense(7, activation='softmax', name='7class')(X) # 大类-数字
309 | # X = Dense(32)(X)
310 | # X = Concatenate(axis=-1)([X,output1,output2])
311 | X = Dense(64)(X)
312 | output3 = Dense(19, activation='softmax',name='19class')(X) #小类
313 |
314 |
315 | return Model([input1], [output1,output2,output3])
316 |
317 | # model = Net(60)
318 | # model.summary()
319 |
320 |
321 | # In[18]:
322 |
323 |
324 | # 两个输出
325 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
326 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
327 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
328 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
329 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
330 | # 每一个大类输出 4
331 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
332 |
333 | from sklearn.utils.class_weight import compute_class_weight
334 | # y_train_weight = compute_sample_weight("balanced", train['behavior_id'])
335 | classweights1=compute_class_weight("balanced",['A','B','C','D'], pd.read_csv(root_path+'sensor_train.csv')['behavior_id'].apply(lambda x:mapping[x][0]))
336 | classweights1=pd.DataFrame(classweights1)[0].to_dict()
337 |
338 |
339 |
340 | classweights2=compute_class_weight("balanced",list(range(7)), pd.read_csv(root_path+'sensor_train.csv')['behavior_id'].apply(lambda x:int(mapping[x][2])))
341 | classweights2=pd.DataFrame(classweights2)[0].to_dict()
342 |
343 |
344 |
345 | from sklearn.utils.class_weight import compute_class_weight
346 | # y_train_weight = compute_sample_weight("balanced", train['behavior_id'])
347 | classweights3=compute_class_weight("balanced",np.array(range(19)), pd.read_csv(root_path+'sensor_train.csv')['behavior_id'])
348 | classweights3=pd.DataFrame(classweights3)[0].to_dict()
349 | classweights1,classweights2,classweights3
350 |
351 |
352 | # In[19]:
353 |
354 |
355 | # [:,:,:,[1]]
356 | train = x
357 | test = t
358 |
359 |
360 | fold_num=5
361 | kfold = StratifiedKFold(fold_num,random_state=42,shuffle=True)
362 | proba_t = np.zeros((7500, 19))
363 | proba_oof = np.zeros((7292,19))
364 |
365 | oof_score = []
366 | oof_comm = []
367 | history = []
368 |
369 | from tensorflow.keras.losses import categorical_crossentropy
370 | def custom_loss(y_true, y_pred):
371 | return categorical_crossentropy(y_true, y_pred, label_smoothing=0.05)
372 |
373 | # 两个输出
374 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
375 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
376 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
377 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
378 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
379 | # 每一个大类输出 4
380 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
381 | y_1 = to_categorical([new_mapping[mapping[x][0]] for x in y], num_classes=4)
382 | # 每一个大类输出
383 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
384 | y_2 = to_categorical([mapping[x][2] for x in y], num_classes=7)
385 | # 每一个小类的输出 19
386 | y_3 = to_categorical(y, num_classes=19)
387 | # y_3=y
388 |
389 |
390 | for fold, (xx, yy) in enumerate(kfold.split(train, y)):
391 |
392 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
393 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
394 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
395 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
396 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
397 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
398 |
399 | model = Net(60)
400 | model.summary()
401 | model.compile(loss=[custom_loss,custom_loss,custom_loss],loss_weights=[3,7,21],
402 | optimizer=Adam(),
403 | metrics=["acc"])#'',localscore
404 |
405 | plateau3 = ReduceLROnPlateau(monitor="19class_acc",
406 | verbose=1,
407 | mode='max',
408 | factor=0.5,
409 | patience=18)
410 | early_stopping = EarlyStopping(monitor="val_19class_acc",
411 | verbose=1,
412 | mode='max',
413 | patience=60)
414 |
415 | checkpoint = ModelCheckpoint(f'Conv2d_multiloss_fold{fold}.h5',
416 | monitor="val_19class_acc",
417 | verbose=0,
418 | mode='max',
419 | save_best_only=True)
420 |
421 | train_res = model.fit(data_generator(train[xx], [y_1[xx], y_2[xx], y_3[xx]],y[xx],32),
422 | epochs=1000, ###################1000
423 | steps_per_epoch=len(xx) // 32,
424 | verbose=1,
425 | shuffle=True,
426 | validation_data=(train[yy], [y_1[yy], y_2[yy],y_3[yy]]),
427 | callbacks=[plateau3, early_stopping, checkpoint],
428 | class_weight=[classweights1,classweights2,classweights3])
429 |
430 | history.append(train_res)
431 |
432 | model.load_weights(f'Conv2d_multiloss_fold{fold}.h5')
433 | proba_t += model.predict(test, verbose=0, batch_size=1024)[2] / fold_num
434 | proba_oof[yy] += model.predict(train[yy],verbose=0,batch_size=1024) [2]
435 |
436 | oof_y = np.argmax(proba_oof[yy], axis=1)
437 | acc = round(accuracy_score(y[yy], oof_y),3)
438 | print(acc)
439 | oof_score.append(acc)
440 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(y[yy], oof_y)) / oof_y.shape[0]
441 | oof_comm.append(scores)
442 | print(round(scores, 5))
443 |
444 |
445 | # In[ ]:
446 |
447 |
448 |
449 |
450 |
451 | # In[26]:
452 |
453 |
454 | for index,i in enumerate(oof_comm):
455 | print(index,i,oof_score[index])
456 |
457 | oof_dict = {
458 | "oof":proba_oof,
459 | "test":proba_t,
460 | "acc":oof_comm,
461 | }
462 | import joblib
463 | joblib.dump(oof_dict,"0729_generator_one_third_orig_mixup_%.5f_dict.pkl"% np.mean(oof_comm))
464 |
465 |
466 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
467 |
468 | def acc_combo(y, y_pred):
469 | # 数值ID与行为编码的对应关系
470 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
471 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
472 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
473 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
474 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
475 | # 将行为ID转为编码
476 | code_y, code_y_pred = mapping[y], mapping[y_pred]
477 | if code_y == code_y_pred: #编码完全相同得分1.0
478 | return 1.0
479 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
480 | return 1.0/7
481 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
482 | return 1.0/3
483 | else:
484 | return 0.0
485 |
486 | train_y = y
487 | labels = np.argmax(proba_t, axis=1)
488 | oof_y = np.argmax(proba_oof, axis=1)
489 | print(round(accuracy_score(train_y, oof_y), 5))
490 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(train_y, oof_y)) / oof_y.shape[0]
491 | print(round(scores, 5))
492 | data_path = '../../data/'
493 | sub = pd.read_csv(data_path+'提交结果示例.csv')
494 | sub['behavior_id'] = labels
495 |
496 | vc = pd.Series(train_y).value_counts().sort_index()
497 | # sns.barplot(vc.index, vc.values)
498 | # plt.show()
499 |
500 | vc = pd.Series(oof_y).value_counts().sort_index()
501 | # sns.barplot(vc.index, vc.values)
502 | # plt.show()
503 |
504 | vc = sub['behavior_id'].value_counts().sort_index()
505 | # sns.barplot(vc.index, vc.values)
506 | # plt.show()
507 | sub.to_csv('0729_generator_one_third_orig_mixup_%.5f.csv' % scores, index=False)
508 | sub.info()
509 |
--------------------------------------------------------------------------------
/Preliminary/PKL/0730_generator_one_fourth_orig_mixup_087765/conv2d-avepooling_fc2-add_feature_template-multiloss-removedecay-generator-Copy2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # In[1]:
5 |
6 |
7 | seed = 0
8 | import random
9 | import numpy as np
10 | import tensorflow as tf
11 | import os
12 | random.seed(seed)
13 | np.random.seed(seed)
14 | tf.random.set_seed(seed)
15 | os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
16 | os.environ["CUDA_VISIBLE_DEVICES"] = '0'
17 | os.environ["PYTHONHASHSEED"] = str(seed)
18 |
19 | os.environ['TF_DETERMINISTIC_OPS'] = '1'
20 |
21 |
22 | # In[2]:
23 |
24 |
25 | import numpy as np
26 | import pandas as pd
27 | from tqdm import tqdm
28 | from scipy.signal import resample
29 | from tensorflow.keras import layers
30 | from tensorflow.keras.layers import *
31 | from tensorflow.keras.models import Model
32 | from tensorflow.keras.optimizers import Adam
33 | from tensorflow.keras.utils import to_categorical
34 | from sklearn.model_selection import StratifiedKFold
35 | from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
36 | import os
37 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
38 |
39 | from sklearn.preprocessing import StandardScaler,MinMaxScaler
40 |
41 | def acc_combo(y, y_pred):
42 | # 数值ID与行为编码的对应关系
43 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
44 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
45 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
46 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
47 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
48 | # 将行为ID转为编码
49 | code_y, code_y_pred = mapping[y], mapping[y_pred]
50 | if code_y == code_y_pred: #编码完全相同得分1.0
51 | return 1.0
52 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
53 | return 1.0/7
54 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
55 | return 1.0/3
56 | else:
57 | return 0.0
58 |
59 |
60 | sample_num = 60
61 |
62 |
63 | # In[4]:
64 |
65 |
66 | root_path = '../../data/'
67 | train = pd.read_csv(root_path+'sensor_train.csv')
68 | test = pd.read_csv(root_path+'sensor_test.csv')
69 | sub = pd.read_csv(root_path+'提交结果示例.csv')
70 | y = train.groupby('fragment_id')['behavior_id'].min()
71 |
72 |
73 | # In[5]:
74 |
75 |
76 | def add_features(df):
77 | print(df.columns)
78 | df['acc'] = (df.acc_x ** 2 + df.acc_y ** 2 + df.acc_z ** 2) ** .5
79 | df['accg'] = (df.acc_xg ** 2 + df.acc_yg ** 2 + df.acc_zg ** 2) ** .5
80 | df['thetax']=np.arctan(df.acc_xg/
81 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_zg*df.acc_zg))*180/np.pi
82 | df['thetay']=np.arctan(df.acc_yg/
83 | np.sqrt(df.acc_xg*df.acc_xg+df.acc_zg*df.acc_zg))*180/np.pi
84 | df['thetaz']=np.arctan(df.acc_zg/
85 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_xg*df.acc_xg))*180/np.pi
86 |
87 | df['xy'] = (df['acc_x'] ** 2 + df['acc_y'] ** 2) ** 0.5
88 | df['xy_g'] = (df['acc_xg'] ** 2 + df['acc_yg'] ** 2) ** 0.5
89 |
90 | df['g'] = ((df["acc_x"] - df["acc_xg"]) ** 2 +
91 | (df["acc_y"] - df["acc_yg"]) ** 2 + (df["acc_z"] - df["acc_zg"]) ** 2) ** 0.5
92 |
93 | print(df.columns)
94 | return df
95 |
96 |
97 | # In[6]:
98 |
99 |
100 | train=add_features(train)
101 | test=add_features(test)
102 |
103 |
104 | # In[7]:
105 |
106 |
107 | group1 = [x for x in train.columns if x not in ['fragment_id', 'time_point','behavior_id']]
108 | group1
109 |
110 |
111 | # In[8]:
112 |
113 |
114 | FEATURE_NUM=14
115 |
116 |
117 | # In[10]:
118 |
119 |
120 |
121 | x = np.zeros((7292, sample_num, FEATURE_NUM, 1))
122 | t = np.zeros((7500, sample_num, FEATURE_NUM, 1))
123 |
124 |
125 | # In[11]:
126 |
127 |
128 |
129 | train = train[['fragment_id', 'time_point', 'behavior_id']+group1]
130 | test = test[['fragment_id', 'time_point']+group1]
131 | print(train.columns)
132 |
133 | for i in tqdm(range(7292)):
134 | tmp = train[train.fragment_id == i][:sample_num]
135 | x[i,:,:,0] = resample(tmp.drop(['fragment_id', 'time_point', 'behavior_id'],
136 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0].reshape(sample_num,FEATURE_NUM)
137 | for i in tqdm(range(7500)):
138 | tmp = test[test.fragment_id == i][:sample_num]
139 | t[i,:,:,0] = resample(tmp.drop(['fragment_id', 'time_point'],
140 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0].reshape(sample_num,FEATURE_NUM)
141 |
142 |
143 |
144 |
145 |
146 | # In[14]:
147 |
148 |
149 | # 一个完成了的generator
150 | def data_generator(data,label,class17label,batch_size):
151 | """
152 | data:array (7292, 60, 14, 1)
153 | label:array (7292,)
154 | class17label: series
155 | """
156 | class17label=np.asarray(class17label)
157 | length=len(data)
158 | seq_length=len(data[0])
159 | half_seq_length=int(seq_length/2)
160 |
161 | # index2label
162 | index2label=dict(zip(range(length),class17label))
163 |
164 | label2index={}
165 | # print(class17label)
166 | for i in range(length):
167 | # print(class17label[i],label2index.get(class17label[i],[]))
168 | label2index[class17label[i]]=label2index.get(class17label[i],[])
169 | label2index[class17label[i]].append(i)
170 |
171 | count=0
172 | np.random.seed(seed)# 保证结果可重复
173 |
174 | while True:
175 |
176 | if count==0 or (count + 1) * batch_size > length: # 如果是第一个或者最后一个batch
177 | count=0
178 | shuffle_index = list(range(length))
179 | np.random.shuffle(shuffle_index) ## 对索引进行打乱
180 |
181 | start = count * batch_size ## batch的起始点
182 | end = (count + 1) * batch_size ## batch的终点
183 | inds=shuffle_index[start:end]
184 |
185 | count+=1
186 |
187 | if random.choice([0,1,1]):
188 | # minxup
189 | #one specific index -> label -> all the index belong to this
190 | choice_index=[random.choice(label2index[index2label[x]]) for x in inds] # get the random choice seq(waiting for concat)
191 | # 1st 前1/2 seq_length 点原始 后1/2 seq_length 点随机
192 | res_x_orig=data[inds,:half_seq_length]
193 | res_x=data[choice_index,half_seq_length:]
194 |
195 | # print(inds)
196 | # print(data.shape,res_x_orig.shape,res_x.shape,np.concatenate((res_x_orig,res_x),axis=1).shape)
197 | yield np.concatenate((res_x_orig,res_x),axis=1), [label[0][inds],label[1][inds],label[2][inds]]
198 | else:
199 |
200 | yield data[inds],[label[0][inds],label[1][inds],label[2][inds]]
201 |
202 |
203 |
204 | count=0
205 | for a,b in data_generator(x,[y,y,y],y,32):
206 | print(a.shape,b[0].shape)
207 | count+=1
208 | if count==20:
209 | break
210 |
211 |
212 | # In[15]:
213 |
214 |
215 | def ConvBNRelu(X,filters,kernal_size=(3,3)):
216 | X = Conv2D(filters=filters,
217 | kernel_size=kernal_size,
218 | # activation='relu',
219 | use_bias=False,
220 | padding='same')(X)
221 | X = BatchNormalization()(X)
222 | X = Activation('relu')(X)
223 | return X
224 |
225 |
226 | def ConvRelu(X,filters,kernal_size=(3,3)):
227 | X = Conv2D(filters=filters,
228 | kernel_size=kernal_size,
229 | activation='relu',
230 | use_bias=False,
231 | padding='same')(X)
232 | return X
233 |
234 |
235 | def squeeze_excitation_layer(x, out_dim,ratio=8):
236 | '''
237 | SE module performs inter-channel weighting.
238 | '''
239 | squeeze = GlobalAveragePooling2D()(x)
240 |
241 | excitation = Dense(units=out_dim // ratio)(squeeze)
242 | excitation = Activation('relu')(excitation)
243 | excitation = Dense(units=out_dim)(excitation)
244 | excitation = Activation('sigmoid')(excitation)
245 | excitation = Reshape((1,1,out_dim))(excitation)
246 | scale = multiply([x,excitation])
247 | return scale
248 |
249 | # def SE_Residual(X):
250 | # A =
251 | # X = squeeze_excitation_layer(X,128)
252 | # X = Add()([X,A])
253 |
254 |
255 | def lenet5(input):
256 | A = ConvBNRelu(input,64,kernal_size=(3,3))
257 | # B = ConvBNRelu(input,16,kernal_size=(5,1))
258 | # C = ConvBNRelu(input,16,kernal_size=(7,1))
259 | # ABC = layers.Concatenate()([A,B,C])
260 | X = ConvBNRelu(A,128)
261 | # X = squeeze_excitation_layer(X,128)
262 | X = Dropout(0.2)(X)
263 |
264 | X = AveragePooling2D()(X)
265 |
266 | X = ConvBNRelu(X,256)
267 | X = Dropout(0.3)(X)
268 | # X = squeeze_excitation_layer(X,256)
269 | X = ConvBNRelu(X,512)
270 | X = Dropout(0.5)(X)
271 | # X = squeeze_excitation_layer(X,512)
272 | # X = GlobalMaxPooling2D()(X)
273 | X = GlobalAveragePooling2D()(X)
274 |
275 | # X = BatchNormalization()(X)
276 | return X
277 | import tensorflow as tf
278 | def Net(sample_num):
279 | input1 = Input(shape=(sample_num, FEATURE_NUM, 1))
280 | part = tf.split(input1,axis=2, num_or_size_splits = [6, 2, 6])
281 | # res = tf.split(c, axis = 3, num_or_size_splits = [2, 2, 4])
282 |
283 |
284 | X1 = Concatenate(axis=-2)([part[0],part[1]])
285 | X1 = lenet5(X1)
286 | X1 = BatchNormalization()(X1)
287 | X1 = Dense(128, activation='relu')(X1)
288 | X1 = BatchNormalization()(X1)
289 | X1 = Dropout(0.2)(X1)
290 |
291 | X2 = Concatenate(axis=-2)([part[0],part[2]])
292 | X2 = lenet5(X2)
293 | X2 = BatchNormalization()(X2)
294 | # X = Dense(512, activation='relu')(X)
295 | # X = BatchNormalization()(X)
296 | X2 = Dense(128, activation='relu')(X2)
297 | X2 = BatchNormalization()(X2)
298 | X2 = Dropout(0.2)(X2)
299 |
300 | X = Concatenate(axis=-1)([X1,X2])
301 |
302 | # X = Dense(256)(X)
303 |
304 | output1 = Dense(4, activation='softmax', name='4class')(X) # 大类-字母
305 | # output2 = Dense(128)(X)
306 | # output2 = Dense(64)(X)
307 | X = Dense(64)(X)
308 | output2 = Dense(7, activation='softmax', name='7class')(X) # 大类-数字
309 | # X = Dense(32)(X)
310 | # X = Concatenate(axis=-1)([X,output1,output2])
311 | X = Dense(64)(X)
312 | output3 = Dense(19, activation='softmax',name='19class')(X) #小类
313 |
314 |
315 | return Model([input1], [output1,output2,output3])
316 |
317 | model = Net(60)
318 | model.summary()
319 |
320 |
321 | # In[16]:
322 |
323 |
324 | # 两个输出
325 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
326 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
327 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
328 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
329 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
330 | # 每一个大类输出 4
331 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
332 |
333 | from sklearn.utils.class_weight import compute_class_weight
334 | # y_train_weight = compute_sample_weight("balanced", train['behavior_id'])
335 | classweights1=compute_class_weight("balanced",['A','B','C','D'], pd.read_csv(root_path+'sensor_train.csv')['behavior_id'].apply(lambda x:mapping[x][0]))
336 | classweights1=pd.DataFrame(classweights1)[0].to_dict()
337 |
338 |
339 |
340 | classweights2=compute_class_weight("balanced",list(range(7)), pd.read_csv(root_path+'sensor_train.csv')['behavior_id'].apply(lambda x:int(mapping[x][2])))
341 | classweights2=pd.DataFrame(classweights2)[0].to_dict()
342 |
343 |
344 |
345 | from sklearn.utils.class_weight import compute_class_weight
346 | # y_train_weight = compute_sample_weight("balanced", train['behavior_id'])
347 | classweights3=compute_class_weight("balanced",np.array(range(19)), pd.read_csv(root_path+'sensor_train.csv')['behavior_id'])
348 | classweights3=pd.DataFrame(classweights3)[0].to_dict()
349 | classweights1,classweights2,classweights3
350 |
351 |
352 | # In[17]:
353 |
354 |
355 | # [:,:,:,[1]]
356 | train = x
357 | test = t
358 |
359 |
360 | fold_num=5
361 | kfold = StratifiedKFold(fold_num,random_state=42,shuffle=True)
362 | proba_t = np.zeros((7500, 19))
363 | proba_oof = np.zeros((7292,19))
364 |
365 | oof_score = []
366 | oof_comm = []
367 | history = []
368 |
369 | from tensorflow.keras.losses import categorical_crossentropy
370 | def custom_loss(y_true, y_pred):
371 | return categorical_crossentropy(y_true, y_pred, label_smoothing=0.05)
372 |
373 | # 两个输出
374 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
375 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
376 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
377 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
378 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
379 | # 每一个大类输出 4
380 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
381 | y_1 = to_categorical([new_mapping[mapping[x][0]] for x in y], num_classes=4)
382 | # 每一个大类输出
383 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
384 | y_2 = to_categorical([mapping[x][2] for x in y], num_classes=7)
385 | # 每一个小类的输出 19
386 | y_3 = to_categorical(y, num_classes=19)
387 | # y_3=y
388 |
389 |
390 | for fold, (xx, yy) in enumerate(kfold.split(train, y)):
391 |
392 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
393 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
394 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
395 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
396 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
397 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
398 |
399 | model = Net(60)
400 | model.summary()
401 | model.compile(loss=[custom_loss,custom_loss,custom_loss],loss_weights=[3,7,21],
402 | optimizer=Adam(),
403 | metrics=["acc"])#'',localscore
404 |
405 | plateau3 = ReduceLROnPlateau(monitor="19class_acc",
406 | verbose=1,
407 | mode='max',
408 | factor=0.5,
409 | patience=18)
410 | early_stopping = EarlyStopping(monitor="val_19class_acc",
411 | verbose=1,
412 | mode='max',
413 | patience=60)
414 |
415 | checkpoint = ModelCheckpoint(f'Conv2d_multiloss_fold{fold}.h5',
416 | monitor="val_19class_acc",
417 | verbose=0,
418 | mode='max',
419 | save_best_only=True)
420 |
421 | train_res = model.fit(data_generator(train[xx], [y_1[xx], y_2[xx], y_3[xx]],y[xx],32),
422 | epochs=1000, ##################################
423 | steps_per_epoch=len(xx) // 32,
424 | verbose=1,
425 | shuffle=True,
426 | validation_data=(train[yy], [y_1[yy], y_2[yy],y_3[yy]]),
427 | callbacks=[plateau3, early_stopping, checkpoint],
428 | class_weight=[classweights1,classweights2,classweights3])
429 |
430 | history.append(train_res)
431 |
432 | model.load_weights(f'Conv2d_multiloss_fold{fold}.h5')
433 | proba_t += model.predict(test, verbose=0, batch_size=1024)[2] / fold_num
434 | proba_oof[yy] += model.predict(train[yy],verbose=0,batch_size=1024) [2]
435 |
436 | oof_y = np.argmax(proba_oof[yy], axis=1)
437 | acc = round(accuracy_score(y[yy], oof_y),3)
438 | print(acc)
439 | oof_score.append(acc)
440 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(y[yy], oof_y)) / oof_y.shape[0]
441 | oof_comm.append(scores)
442 | print(round(scores, 5))
443 |
444 |
445 | # In[ ]:
446 |
447 |
448 |
449 |
450 |
451 | # In[26]:
452 |
453 |
454 | for index,i in enumerate(oof_comm):
455 | print(index,i,oof_score[index])
456 |
457 | oof_dict = {
458 | "oof":proba_oof,
459 | "test":proba_t,
460 | "acc":oof_comm,
461 | }
462 | import joblib
463 | joblib.dump(oof_dict,"0729_generator_one_third_orig_mixup_%.5f_dict.pkl"% np.mean(oof_comm))
464 |
465 |
466 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
467 |
468 | def acc_combo(y, y_pred):
469 | # 数值ID与行为编码的对应关系
470 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
471 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
472 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
473 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
474 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
475 | # 将行为ID转为编码
476 | code_y, code_y_pred = mapping[y], mapping[y_pred]
477 | if code_y == code_y_pred: #编码完全相同得分1.0
478 | return 1.0
479 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
480 | return 1.0/7
481 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
482 | return 1.0/3
483 | else:
484 | return 0.0
485 |
486 | train_y = y
487 | labels = np.argmax(proba_t, axis=1)
488 | oof_y = np.argmax(proba_oof, axis=1)
489 | print(round(accuracy_score(train_y, oof_y), 5))
490 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(train_y, oof_y)) / oof_y.shape[0]
491 | print(round(scores, 5))
492 | data_path = '../../data/'
493 | sub = pd.read_csv(data_path+'提交结果示例.csv')
494 | sub['behavior_id'] = labels
495 |
496 | vc = pd.Series(train_y).value_counts().sort_index()
497 | # sns.barplot(vc.index, vc.values)
498 | # plt.show()
499 |
500 | vc = pd.Series(oof_y).value_counts().sort_index()
501 | # sns.barplot(vc.index, vc.values)
502 | # plt.show()
503 |
504 | vc = sub['behavior_id'].value_counts().sort_index()
505 | # sns.barplot(vc.index, vc.values)
506 | # plt.show()
507 | sub.to_csv('0729_generator_one_third_orig_mixup_%.5f.csv' % scores, index=False)
508 | sub.info()
509 |
510 |
--------------------------------------------------------------------------------
/Preliminary/PKL/0729_generator_one_third_orig_mixup_086223/conv2d-avepooling_fc2-add_feature_template-multiloss-removedecay-generator.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # In[1]:
5 |
6 |
7 | seed = 0
8 | import random
9 | import numpy as np
10 | import tensorflow as tf
11 | import os
12 | random.seed(seed)
13 | np.random.seed(seed)
14 | tf.random.set_seed(seed)
15 | os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
16 | os.environ["CUDA_VISIBLE_DEVICES"] = '0'
17 | os.environ["PYTHONHASHSEED"] = str(seed)
18 |
19 | os.environ['TF_DETERMINISTIC_OPS'] = '1'
20 |
21 |
22 | # In[2]:
23 |
24 |
25 | import numpy as np
26 | import pandas as pd
27 | # 选择比较好的模型
28 | # import seaborn as sns
29 |
30 | # import matplotlib.pyplot as plt
31 | from tqdm import tqdm
32 | from scipy.signal import resample
33 | from tensorflow.keras import layers
34 | from tensorflow.keras.layers import *
35 | from tensorflow.keras.models import Model
36 | from tensorflow.keras.optimizers import Adam
37 | from tensorflow.keras.utils import to_categorical
38 | from sklearn.model_selection import StratifiedKFold
39 | from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
40 | import os
41 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
42 |
43 | from sklearn.preprocessing import StandardScaler,MinMaxScaler
44 | # get_ipython().run_line_magic('load_ext', 'autoreload')
45 | # get_ipython().run_line_magic('autoreload', '2')
46 |
47 | def acc_combo(y, y_pred):
48 | # 数值ID与行为编码的对应关系
49 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
50 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
51 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
52 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
53 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
54 | # 将行为ID转为编码
55 | code_y, code_y_pred = mapping[y], mapping[y_pred]
56 | if code_y == code_y_pred: #编码完全相同得分1.0
57 | return 1.0
58 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
59 | return 1.0/7
60 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
61 | return 1.0/3
62 | else:
63 | return 0.0
64 |
65 |
66 | sample_num = 60
67 |
68 |
69 | # In[3]:
70 |
71 |
72 | root_path = '../../data/'
73 | train = pd.read_csv(root_path+'sensor_train.csv')
74 | test = pd.read_csv(root_path+'sensor_test.csv')
75 | sub = pd.read_csv(root_path+'提交结果示例.csv')
76 | y = train.groupby('fragment_id')['behavior_id'].min()
77 |
78 |
79 | # In[4]:
80 |
81 |
82 | def add_features(df):
83 | print(df.columns)
84 | df['acc'] = (df.acc_x ** 2 + df.acc_y ** 2 + df.acc_z ** 2) ** .5
85 | df['accg'] = (df.acc_xg ** 2 + df.acc_yg ** 2 + df.acc_zg ** 2) ** .5
86 | df['thetax']=np.arctan(df.acc_xg/
87 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_zg*df.acc_zg))*180/np.pi
88 | df['thetay']=np.arctan(df.acc_yg/
89 | np.sqrt(df.acc_xg*df.acc_xg+df.acc_zg*df.acc_zg))*180/np.pi
90 | df['thetaz']=np.arctan(df.acc_zg/
91 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_xg*df.acc_xg))*180/np.pi
92 |
93 | df['xy'] = (df['acc_x'] ** 2 + df['acc_y'] ** 2) ** 0.5
94 | df['xy_g'] = (df['acc_xg'] ** 2 + df['acc_yg'] ** 2) ** 0.5
95 |
96 | df['g'] = ((df["acc_x"] - df["acc_xg"]) ** 2 +
97 | (df["acc_y"] - df["acc_yg"]) ** 2 + (df["acc_z"] - df["acc_zg"]) ** 2) ** 0.5
98 |
99 | print(df.columns)
100 | return df
101 |
102 |
103 | # In[5]:
104 |
105 |
106 | train=add_features(train)
107 | test=add_features(test)
108 |
109 |
110 | # In[6]:
111 |
112 |
113 | group1 = [x for x in train.columns if x not in ['fragment_id', 'time_point','behavior_id']]
114 | group1
115 |
116 |
117 | # In[7]:
118 |
119 |
120 | FEATURE_NUM=14
121 |
122 |
123 | # In[8]:
124 |
125 |
126 |
127 | x = np.zeros((7292, sample_num, FEATURE_NUM, 1))
128 | t = np.zeros((7500, sample_num, FEATURE_NUM, 1))
129 |
130 | from scipy.fftpack import fft
131 | from scipy.signal import resample
132 | def get_fft_values(y_values, N, f_s):
133 | f_values = np.linspace(0.0, f_s/2.0, N//2)
134 | fft_values_ = fft(y_values)
135 | plt.plot(fft_values_)
136 | plt.show()
137 | print(fft_values_.shape)
138 | fft_values = 2.0/N * np.abs(fft_values_[0:N//2])
139 | print(fft_values.shape)
140 | return f_values, fft_values
141 |
142 | # tmp = train[train.fragment_id == 0][:sample_num]
143 |
144 | # get_fft_values(tmp["acc"].values,60,5)
145 |
146 |
147 | # In[9]:
148 |
149 |
150 |
151 | train = train[['fragment_id', 'time_point', 'behavior_id']+group1]
152 | test = test[['fragment_id', 'time_point']+group1]
153 | print(train.columns)
154 |
155 | for i in tqdm(range(7292)):
156 | tmp = train[train.fragment_id == i][:sample_num]
157 | x[i,:,:,0] = resample(tmp.drop(['fragment_id', 'time_point', 'behavior_id'],
158 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0].reshape(sample_num,FEATURE_NUM)
159 | for i in tqdm(range(7500)):
160 | tmp = test[test.fragment_id == i][:sample_num]
161 | t[i,:,:,0] = resample(tmp.drop(['fragment_id', 'time_point'],
162 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0].reshape(sample_num,FEATURE_NUM)
163 |
164 |
165 |
166 |
167 |
168 | # In[10]:
169 |
170 |
171 | # 一个完成了的generator
172 | def data_generator(data,label,class17label,batch_size):
173 | """
174 | data:array (7292, 60, 14, 1)
175 | label:array (7292,)
176 | class17label: series
177 | """
178 | class17label=np.asarray(class17label)
179 | length=len(data)
180 | seq_length=len(data[0])
181 | half_seq_length=int(seq_length/2)
182 |
183 | # index2label
184 | index2label=dict(zip(range(length),class17label))
185 |
186 | label2index={}
187 | # print(class17label)
188 | for i in range(length):
189 | # print(class17label[i],label2index.get(class17label[i],[]))
190 | label2index[class17label[i]]=label2index.get(class17label[i],[])
191 | label2index[class17label[i]].append(i)
192 |
193 | count=0
194 | np.random.seed(seed)# 保证结果可重复
195 |
196 | while True:
197 |
198 | if count==0 or (count + 1) * batch_size > length: # 如果是第一个或者最后一个batch
199 | count=0
200 | shuffle_index = list(range(length))
201 | np.random.shuffle(shuffle_index) ## 对索引进行打乱
202 |
203 | start = count * batch_size ## batch的起始点
204 | end = (count + 1) * batch_size ## batch的终点
205 | inds=shuffle_index[start:end]
206 |
207 | count+=1
208 |
209 | if random.choice([0,1,1]):
210 | # minxup
211 | #one specific index -> label -> all the index belong to this
212 | choice_index=[random.choice(label2index[index2label[x]]) for x in inds] # get the random choice seq(waiting for concat)
213 | # 1st 前1/2 seq_length 点原始 后1/2 seq_length 点随机
214 | res_x_orig=data[inds,:half_seq_length]
215 | res_x=data[choice_index,half_seq_length:]
216 |
217 | # print(inds)
218 | # print(data.shape,res_x_orig.shape,res_x.shape,np.concatenate((res_x_orig,res_x),axis=1).shape)
219 | yield np.concatenate((res_x_orig,res_x),axis=1), [label[0][inds],label[1][inds],label[2][inds]]
220 | else:
221 |
222 | yield data[inds],[label[0][inds],label[1][inds],label[2][inds]]
223 |
224 |
225 |
226 | count=0
227 | for a,b in data_generator(x,[y,y,y],y,32):
228 | print(a.shape,b[0].shape)
229 | count+=1
230 | if count==20:
231 | break
232 |
233 |
234 | # In[11]:
235 |
236 |
237 | def ConvBNRelu(X,filters,kernal_size=(3,3)):
238 | X = Conv2D(filters=filters,
239 | kernel_size=kernal_size,
240 | # activation='relu',
241 | use_bias=False,
242 | padding='same')(X)
243 | X = BatchNormalization()(X)
244 | X = Activation('relu')(X)
245 | return X
246 |
247 |
248 | def ConvRelu(X,filters,kernal_size=(3,3)):
249 | X = Conv2D(filters=filters,
250 | kernel_size=kernal_size,
251 | activation='relu',
252 | use_bias=False,
253 | padding='same')(X)
254 | return X
255 |
256 |
257 | def squeeze_excitation_layer(x, out_dim,ratio=8):
258 | '''
259 | SE module performs inter-channel weighting.
260 | '''
261 | squeeze = GlobalAveragePooling2D()(x)
262 |
263 | excitation = Dense(units=out_dim // ratio)(squeeze)
264 | excitation = Activation('relu')(excitation)
265 | excitation = Dense(units=out_dim)(excitation)
266 | excitation = Activation('sigmoid')(excitation)
267 | excitation = Reshape((1,1,out_dim))(excitation)
268 | scale = multiply([x,excitation])
269 | return scale
270 |
271 | # def SE_Residual(X):
272 | # A =
273 | # X = squeeze_excitation_layer(X,128)
274 | # X = Add()([X,A])
275 |
276 |
277 | def lenet5(input):
278 | A = ConvBNRelu(input,64,kernal_size=(3,3))
279 | # B = ConvBNRelu(input,16,kernal_size=(5,1))
280 | # C = ConvBNRelu(input,16,kernal_size=(7,1))
281 | # ABC = layers.Concatenate()([A,B,C])
282 | X = ConvBNRelu(A,128)
283 | # X = squeeze_excitation_layer(X,128)
284 | X = Dropout(0.2)(X)
285 |
286 | X = AveragePooling2D()(X)
287 |
288 | X = ConvBNRelu(X,256)
289 | X = Dropout(0.3)(X)
290 | # X = squeeze_excitation_layer(X,256)
291 | X = ConvBNRelu(X,512)
292 | X = Dropout(0.5)(X)
293 | # X = squeeze_excitation_layer(X,512)
294 | # X = GlobalMaxPooling2D()(X)
295 | X = GlobalAveragePooling2D()(X)
296 |
297 | # X = BatchNormalization()(X)
298 | return X
299 | import tensorflow as tf
300 | def Net(sample_num):
301 | input1 = Input(shape=(sample_num, FEATURE_NUM, 1))
302 | part = tf.split(input1,axis=2, num_or_size_splits = [6, 2, 6])
303 | # res = tf.split(c, axis = 3, num_or_size_splits = [2, 2, 4])
304 |
305 |
306 | X1 = Concatenate(axis=-2)([part[0],part[1]])
307 | X1 = lenet5(X1)
308 | X1 = BatchNormalization()(X1)
309 | X1 = Dense(128, activation='relu')(X1)
310 | X1 = BatchNormalization()(X1)
311 | X1 = Dropout(0.2)(X1)
312 |
313 | X2 = Concatenate(axis=-2)([part[0],part[2]])
314 | X2 = lenet5(X2)
315 | X2 = BatchNormalization()(X2)
316 | # X = Dense(512, activation='relu')(X)
317 | # X = BatchNormalization()(X)
318 | X2 = Dense(128, activation='relu')(X2)
319 | X2 = BatchNormalization()(X2)
320 | X2 = Dropout(0.2)(X2)
321 |
322 | X = Concatenate(axis=-1)([X1,X2])
323 |
324 | # X = Dense(256)(X)
325 |
326 | output1 = Dense(4, activation='softmax', name='4class')(X) # 大类-字母
327 | # output2 = Dense(128)(X)
328 | # output2 = Dense(64)(X)
329 | X = Dense(64)(X)
330 | output2 = Dense(7, activation='softmax', name='7class')(X) # 大类-数字
331 | # X = Dense(32)(X)
332 | # X = Concatenate(axis=-1)([X,output1,output2])
333 | X = Dense(64)(X)
334 | output3 = Dense(19, activation='softmax',name='19class')(X) #小类
335 |
336 |
337 | return Model([input1], [output1,output2,output3])
338 |
339 | model = Net(60)
340 | model.summary()
341 |
342 |
343 | # In[12]:
344 |
345 |
346 | # 两个输出
347 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
348 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
349 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
350 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
351 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
352 | # 每一个大类输出 4
353 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
354 |
355 | from sklearn.utils.class_weight import compute_class_weight
356 | # y_train_weight = compute_sample_weight("balanced", train['behavior_id'])
357 | classweights1=compute_class_weight("balanced",['A','B','C','D'], pd.read_csv(root_path+'sensor_train.csv')['behavior_id'].apply(lambda x:mapping[x][0]))
358 | classweights1=pd.DataFrame(classweights1)[0].to_dict()
359 |
360 |
361 |
362 | classweights2=compute_class_weight("balanced",list(range(7)), pd.read_csv(root_path+'sensor_train.csv')['behavior_id'].apply(lambda x:int(mapping[x][2])))
363 | classweights2=pd.DataFrame(classweights2)[0].to_dict()
364 |
365 |
366 |
367 | from sklearn.utils.class_weight import compute_class_weight
368 | # y_train_weight = compute_sample_weight("balanced", train['behavior_id'])
369 | classweights3=compute_class_weight("balanced",np.array(range(19)), pd.read_csv(root_path+'sensor_train.csv')['behavior_id'])
370 | classweights3=pd.DataFrame(classweights3)[0].to_dict()
371 | classweights1,classweights2,classweights3
372 |
373 |
374 | # In[13]:
375 |
376 |
377 | # [:,:,:,[1]]
378 | train = x
379 | test = t
380 |
381 |
382 | fold_num=5
383 | kfold = StratifiedKFold(fold_num,random_state=42,shuffle=True)
384 | proba_t = np.zeros((7500, 19))
385 | proba_oof = np.zeros((7292,19))
386 |
387 | oof_score = []
388 | oof_comm = []
389 | history = []
390 |
391 | from tensorflow.keras.losses import categorical_crossentropy
392 | def custom_loss(y_true, y_pred):
393 | return categorical_crossentropy(y_true, y_pred, label_smoothing=0.05)
394 |
395 | # 两个输出
396 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
397 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
398 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
399 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
400 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
401 | # 每一个大类输出 4
402 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
403 | y_1 = to_categorical([new_mapping[mapping[x][0]] for x in y], num_classes=4)
404 | # 每一个大类输出
405 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
406 | y_2 = to_categorical([mapping[x][2] for x in y], num_classes=7)
407 | # 每一个小类的输出 19
408 | y_3 = to_categorical(y, num_classes=19)
409 | # y_3=y
410 |
411 |
412 | for fold, (xx, yy) in enumerate(kfold.split(train, y)):
413 |
414 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
415 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
416 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
417 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
418 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
419 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
420 |
421 | model = Net(60)
422 | model.summary()
423 | model.compile(loss=[custom_loss,custom_loss,custom_loss],loss_weights=[3,7,21],
424 | optimizer=Adam(),
425 | metrics=["acc"])#'',localscore
426 |
427 | plateau3 = ReduceLROnPlateau(monitor="19class_acc",
428 | verbose=1,
429 | mode='max',
430 | factor=0.5,
431 | patience=18)
432 | early_stopping = EarlyStopping(monitor="val_19class_acc",
433 | verbose=1,
434 | mode='max',
435 | patience=60)
436 |
437 | checkpoint = ModelCheckpoint(f'Conv2d_multiloss_fold{fold}.h5',
438 | monitor="val_19class_acc",
439 | verbose=0,
440 | mode='max',
441 | save_best_only=True)
442 |
443 | train_res = model.fit(data_generator(train[xx], [y_1[xx], y_2[xx], y_3[xx]],y[xx],32),
444 | epochs=1000, ############################3
445 | steps_per_epoch=len(xx) // 32,
446 | verbose=1,
447 | shuffle=True,
448 | validation_data=(train[yy], [y_1[yy], y_2[yy],y_3[yy]]),
449 | callbacks=[plateau3, early_stopping, checkpoint],
450 | class_weight=[classweights1,classweights2,classweights3])
451 |
452 | history.append(train_res)
453 |
454 | model.load_weights(f'Conv2d_multiloss_fold{fold}.h5')
455 | proba_t += model.predict(test, verbose=0, batch_size=1024)[2] / fold_num
456 | proba_oof[yy] += model.predict(train[yy],verbose=0,batch_size=1024) [2]
457 |
458 | oof_y = np.argmax(proba_oof[yy], axis=1)
459 | acc = round(accuracy_score(y[yy], oof_y),3)
460 | print(acc)
461 | oof_score.append(acc)
462 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(y[yy], oof_y)) / oof_y.shape[0]
463 | oof_comm.append(scores)
464 | print(round(scores, 5))
465 |
466 |
467 | # In[ ]:
468 |
469 |
470 |
471 |
472 |
473 | # In[26]:
474 |
475 |
476 | for index,i in enumerate(oof_comm):
477 | print(index,i,oof_score[index])
478 |
479 | oof_dict = {
480 | "oof":proba_oof,
481 | "test":proba_t,
482 | "acc":oof_comm,
483 | }
484 | import joblib
485 | joblib.dump(oof_dict,"0729_generator_one_third_orig_mixup_%.5f_dict.pkl"% np.mean(oof_comm))
486 |
487 |
488 | # In[27]:
489 |
490 |
491 | # import seaborn as sns
492 | # import matplotlib.pyplot as plt
493 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
494 |
495 | def acc_combo(y, y_pred):
496 | # 数值ID与行为编码的对应关系
497 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
498 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
499 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
500 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
501 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
502 | # 将行为ID转为编码
503 | code_y, code_y_pred = mapping[y], mapping[y_pred]
504 | if code_y == code_y_pred: #编码完全相同得分1.0
505 | return 1.0
506 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
507 | return 1.0/7
508 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
509 | return 1.0/3
510 | else:
511 | return 0.0
512 |
513 | train_y = y
514 | labels = np.argmax(proba_t, axis=1)
515 | oof_y = np.argmax(proba_oof, axis=1)
516 | print(round(accuracy_score(train_y, oof_y), 5))
517 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(train_y, oof_y)) / oof_y.shape[0]
518 | print(round(scores, 5))
519 | data_path = '../../data/'
520 | sub = pd.read_csv(data_path+'提交结果示例.csv')
521 | sub['behavior_id'] = labels
522 |
523 | vc = pd.Series(train_y).value_counts().sort_index()
524 | # sns.barplot(vc.index, vc.values)
525 | # plt.show()
526 |
527 | vc = pd.Series(oof_y).value_counts().sort_index()
528 | # sns.barplot(vc.index, vc.values)
529 | # plt.show()
530 |
531 | vc = sub['behavior_id'].value_counts().sort_index()
532 | # sns.barplot(vc.index, vc.values)
533 | # plt.show()
534 | sub.to_csv('0729_generator_one_third_orig_mixup_%.5f.csv' % scores, index=False)
535 | sub.info()
536 |
--------------------------------------------------------------------------------
/Preliminary/ensemble_1_to_allin088681.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # In[1]:
5 |
6 |
7 | # rank
8 | # 0.86602
9 | # 0.88681
10 |
11 |
12 | # In[2]:
13 |
14 |
15 | import numpy as np
16 | import pandas as pd
17 | import lightgbm as lgb
18 | from sklearn.model_selection import StratifiedKFold, train_test_split
19 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
20 | from sklearn.preprocessing import LabelEncoder
21 | import seaborn as sns
22 | import matplotlib.pyplot as plt
23 | from scipy.stats import entropy
24 | import gc
25 | import os
26 | from tqdm import tqdm
27 | # pd.set_option('display.max_columns', 600)
28 | # pd.set_option('display.max_rows', 600)
29 | # from IPython.core.interactiveshell import InteractiveShell
30 | # InteractiveShell.ast_node_interactivity = "all"
31 | def acc_combo(y, y_pred):
32 | # 数值ID与行为编码的对应关系
33 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
34 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
35 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
36 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
37 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
38 | # 将行为ID转为编码
39 | code_y, code_y_pred = mapping[y], mapping[y_pred]
40 | if code_y == code_y_pred: #编码完全相同得分1.0
41 | return 1.0
42 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
43 | return 1.0/7
44 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
45 | return 1.0/3
46 | else:
47 | return 0.0
48 | num2detail_mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
49 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
50 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
51 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
52 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
53 |
54 | def feature_test_with_cv(X, y, params=None, cate_feas='auto', nfold=3):
55 | """
56 | [For LightGBM ONLY]
57 | Use cross validation to test if the feature distribution is the same in both train and test sets.
58 | y: 'istest' column with valus of 0-1
59 | Example:
60 | df_fea_auc = get_feature_report(df, features=gcn_feas, all_cate_feas=[], params=None,nfold=3)
61 | """
62 | if params is None:
63 | params = {
64 | 'n_estimators': 100,
65 | 'learning_rate': 0.1,
66 | 'boosting_type': 'gbdt',
67 | 'objective': 'binary',
68 | 'early_stopping_rounds': 25,
69 | 'metric': 'auc',
70 | 'n_jobs': -1,
71 | 'num_leaves': 31,
72 | 'seed': 2020
73 | }
74 | sfold = StratifiedKFold(n_splits=nfold, shuffle=True, random_state=2020)
75 | models = []
76 | val_auc = 0
77 | train_auc = 0
78 | oof = np.zeros(len(X))
79 | for _, (train_idx, val_idx) in enumerate(sfold.split(X, y)):
80 | train_set = lgb.Dataset(X.iloc[train_idx],
81 | y.iloc[train_idx],
82 | categorical_feature=cate_feas)
83 | val_set = lgb.Dataset(X.iloc[val_idx],
84 | y.iloc[val_idx],
85 | categorical_feature=cate_feas)
86 | model = lgb.train(params,
87 | train_set,
88 | valid_sets=[train_set, val_set],
89 | verbose_eval=20)
90 | val_auc += model.best_score['valid_1']['auc'] / 3
91 | train_auc += model.best_score['training']['auc'] / 3
92 | oof[val_idx] = model.predict(X.iloc[val_idx])
93 | models.append(model)
94 | return train_auc, val_auc, models, oof
95 |
96 | def get_feature_report_by_covariate_shift_test(df_raw,
97 | features=None,
98 | all_cate_feas=[],
99 | params=None,
100 | nfold=3,
101 | y2test='istrain',
102 | train_all_feas=False):
103 | """
104 | Use cross validation to test if the feature distribution is the same in both train and test sets.
105 | Args:
106 | y2test: target to test, 'istrain' or 'istest'
107 | train_all_feas: if True, the model will be trained with all features together
108 | Return:
109 | result_dict: dict
110 | """
111 | df = df_raw.copy()
112 | del df_raw
113 | gc.collect()
114 | if features is None:
115 | # logger.info(
116 | # "features is none, all cols will be used except 'istrain' or 'istest'!"
117 | # )
118 | features = [
119 | col for col in df.columns if col not in ['istrain', 'istest']
120 | ]
121 | if train_all_feas:
122 | train_auc, val_auc, models, oof = feature_test_with_cv(
123 | X=df[features],
124 | y=df[y2test],
125 | params=params,
126 | cate_feas=[col for col in all_cate_feas if col in features],
127 | nfold=nfold)
128 | df['pred'] = oof
129 | if y2test == 'istrain':
130 | weights = df[df['istrain'] == 1]['pred'].values
131 | weights = (1. / weights) - 1.
132 | weights /= np.mean(weights)
133 | elif y2test == 'istest':
134 | weights = df[df['istest'] == 0]['pred'].values
135 | weights = (1. / (1 - weights)) - 1.
136 | weights /= np.mean(weights)
137 | else:
138 | raise NotImplementedError(
139 | "y2test should be in ['istrain','istest'] !")
140 | result_dict = {
141 | 'train_auc': train_auc,
142 | 'val_auc': val_auc,
143 | 'models': models,
144 | 'weights': weights
145 | }
146 | else:
147 | score_lst = []
148 | fea_lst = []
149 | for fea in features:
150 | if fea in all_cate_feas:
151 | cate_feas = [fea]
152 | else:
153 | cate_feas = 'auto'
154 | # logger.info("=" * 30)
155 | # logger.info(f"Testing: <{fea}> ...")
156 | # logger.info("=" * 30)
157 | train_auc, val_auc, _, _ = feature_test_with_cv(
158 | X=df[[fea]],
159 | y=df[y2test],
160 | params=params,
161 | cate_feas=cate_feas,
162 | nfold=nfold)
163 | fea_lst.append(fea)
164 | score_lst.append((train_auc, val_auc))
165 | df_fea_auc = pd.DataFrame(score_lst, columns=['train_auc', 'val_auc'])
166 | df_fea_auc['feat'] = fea_lst
167 | df_fea_auc = df_fea_auc.sort_values(by='val_auc', ascending=False)
168 | result_dict = {'df_fea_auc': df_fea_auc}
169 | return result_dict
170 |
171 |
172 | # In[3]:
173 |
174 |
175 | data_path = 'data/'
176 | data_train = pd.read_csv(data_path+'sensor_train.csv')
177 | data_test = pd.read_csv(data_path+'sensor_test.csv')
178 | data_test['fragment_id'] += 10000
179 | label = 'behavior_id'
180 | data = pd.concat([data_train, data_test], sort=False).sort_values(["fragment_id","time_point"])
181 | data.head()
182 | df = data.drop_duplicates(subset=['fragment_id']).reset_index(drop=True)[['fragment_id', 'behavior_id']]
183 |
184 |
185 | # In[4]:
186 |
187 |
188 | import joblib
189 | data_path = "PKL/"
190 |
191 | now_filepath=data_path + "0730_generator_one_fourth_orig_mixup_087765"
192 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
193 | oof = oof_test_data["oof"]
194 | preds = oof_test_data["test"]
195 | for i in oof_test_data:
196 | print(i)
197 | stacknp = np.concatenate([oof,preds],axis=0)
198 | print(stacknp.shape)
199 | stackpd = pd.DataFrame(data=stacknp,columns=["0730_generator_one_fourth_orig_mixup_087765"+str(i)for i in range(19)])
200 | stackpd["fragment_id"] = df["fragment_id"]
201 | df = df.merge(stackpd,how='left',on='fragment_id')
202 | C5 = ["0730_generator_one_fourth_orig_mixup_087765"+str(i)for i in range(19)]
203 |
204 | # ####################################
205 | now_filepath=data_path + "0730_generator_one_fifth_orig_mixup_087099"
206 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
207 | oof = oof_test_data["oof"]
208 | preds = oof_test_data["test"]
209 | for i in oof_test_data:
210 | print(i)
211 | stacknp = np.concatenate([oof,preds],axis=0)
212 | print(stacknp.shape)
213 | stackpd = pd.DataFrame(data=stacknp,columns=["0730_generator_one_fifth_orig_mixup_087099"+str(i)for i in range(19)])
214 | stackpd["fragment_id"] = df["fragment_id"]
215 | df = df.merge(stackpd,how='left',on='fragment_id')
216 | C2 = ["0730_generator_one_fifth_orig_mixup_087099"+str(i)for i in range(19)]
217 |
218 | ###########################################
219 | now_filepath=data_path + "0729_generator_one_third_orig_mixup_086223"
220 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
221 | oof = oof_test_data["oof"]
222 | preds = oof_test_data["test"]
223 | for i in oof_test_data:
224 | print(i)
225 | stacknp = np.concatenate([oof,preds],axis=0)
226 | print(stacknp.shape)
227 | stackpd = pd.DataFrame(data=stacknp,columns=["0729_generator_one_third_orig_mixup_086223"+str(i)for i in range(19)])
228 | stackpd["fragment_id"] = df["fragment_id"]
229 | df = df.merge(stackpd,how='left',on='fragment_id')
230 | C3 = ["0729_generator_one_third_orig_mixup_086223"+str(i)for i in range(19)]
231 |
232 | ##############################################3
233 | now_filepath=data_path + "0729_generator_one_sixth_orig_mixup_086686"
234 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
235 | oof = oof_test_data["oof"]
236 | preds = oof_test_data["test"]
237 | for i in oof_test_data:
238 | print(i)
239 | stacknp = np.concatenate([oof,preds],axis=0)
240 | print(stacknp.shape)
241 | stackpd = pd.DataFrame(data=stacknp,columns=["0729_generator_one_sixth_orig_mixup_086686"+str(i)for i in range(19)])
242 | stackpd["fragment_id"] = df["fragment_id"]
243 | df = df.merge(stackpd,how='left',on='fragment_id')
244 | C4 = ["0729_generator_one_sixth_orig_mixup_086686"+str(i)for i in range(19)]
245 | import joblib
246 |
247 | #################################################
248 | now_filepath=data_path + "0728_08648_online792"
249 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
250 | oof = oof_test_data["oof"]
251 | preds = oof_test_data["test"]
252 | stacknp = np.concatenate([oof,preds],axis=0)
253 | print(stacknp.shape)
254 | stackpd = pd.DataFrame(data=stacknp,columns=["0728_08648_online792"+str(i)for i in range(19)])
255 | stackpd["fragment_id"] = df["fragment_id"]
256 | df = df.merge(stackpd,how='left',on='fragment_id')
257 | # # oof_test_data
258 | C1 = ["0728_08648_online792"+str(i)for i in range(19)]
259 |
260 | ###################################################
261 | now_filepath=data_path + "0725_conv2_2_net_weight_comm_0.85568"
262 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
263 | oof = oof_test_data["oof"]
264 | preds = oof_test_data["test"]
265 | stacknp = np.concatenate([oof,preds],axis=0)
266 | print(stacknp.shape)
267 | stackpd = pd.DataFrame(data=stacknp,columns=["0725_conv2_2_net_weight_comm_0"+str(i)for i in range(19)])
268 | stackpd["fragment_id"] = df["fragment_id"]
269 | df = df.merge(stackpd,how='left',on='fragment_id')
270 | C6 = ["0725_conv2_2_net_weight_comm_0"+str(i)for i in range(19)]
271 |
272 | ####################################################
273 | now_filepath=data_path + "0721_conv2_2_net_oof_comm_nn0.84665"
274 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
275 | oof = oof_test_data["oof"]
276 | preds = oof_test_data["test"]
277 | stacknp = np.concatenate([oof,preds],axis=0)
278 | print(stacknp.shape)
279 | stackpd = pd.DataFrame(data=stacknp,columns=["0721_conv2_2_net_oof_comm_nn0"+str(i)for i in range(19)])
280 | stackpd["fragment_id"] = df["fragment_id"]
281 | df = df.merge(stackpd,how='left',on='fragment_id')
282 | C7 = ["0721_conv2_2_net_oof_comm_nn0"+str(i)for i in range(19)]
283 |
284 | ####################################################
285 | now_filepath=data_path + "spetron_cnn"
286 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
287 | oof = oof_test_data["oof"]
288 | preds = oof_test_data["test"]
289 | stacknp = np.concatenate([oof,preds],axis=0)
290 | print(stacknp.shape)
291 | stackpd = pd.DataFrame(data=stacknp,columns=["spetron0728_conv2_2_net_multiloss_0"+str(i)for i in range(19)])
292 | stackpd["fragment_id"] = df["fragment_id"]
293 | df = df.merge(stackpd,how='left',on='fragment_id')
294 |
295 | ###################################################33
296 | now_filepath=data_path + "multi_lstm"
297 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
298 | oof = oof_test_data["oof"]
299 | preds = oof_test_data["test"]
300 | stacknp = np.concatenate([oof,preds],axis=0)
301 | stackpd = pd.DataFrame(data=stacknp,columns=["lstm_mutiloss_4sub_bs32"+str(i)for i in range(19)])
302 | stackpd["fragment_id"] = df["fragment_id"]
303 | df = df.merge(stackpd,how='left',on='fragment_id')
304 |
305 |
306 | # In[5]:
307 |
308 |
309 | train_df = df[df[label].isna()==False].reset_index(drop=True)
310 | test_df = df[df[label].isna()==True].reset_index(drop=True)
311 |
312 | drop_feat = ["istrain"]
313 | used_feat = [f for f in train_df.columns if f not in (['fragment_id', label] + drop_feat)]
314 | print(used_feat)
315 | df["istrain"] = (df[label].isna()==False).astype(np.int8)
316 | result_dict = get_feature_report_by_covariate_shift_test(df,
317 | features=used_feat,
318 | all_cate_feas=[],
319 | params=None,
320 | nfold=3,
321 | y2test='istrain',
322 | train_all_feas=False)
323 |
324 | result_df = result_dict["df_fea_auc"]
325 | result_df
326 |
327 |
328 | # In[6]:
329 |
330 |
331 | result_df = result_dict["df_fea_auc"]
332 | drop_bylgb = list(result_df[result_df["val_auc"] > 0.7]["feat"])
333 | result_df
334 |
335 | train_df = df[df[label].isna()==False].reset_index(drop=True)
336 | test_df = df[df[label].isna()==True].reset_index(drop=True)
337 |
338 | drop_feat = ['acc_median','acc_y_min','acc_y_max', 'acc_std','acc_y_mean','acc_min', 'acc_x_mean']# lgb_auc 筛选的
339 | drop_feat = ["istrain"] + drop_bylgb
340 | used_feat = [f for f in train_df.columns if f not in (['fragment_id', label] + drop_feat)and 'id'not in f]
341 | print(len(used_feat))
342 | print(used_feat)
343 |
344 | train_x = train_df[used_feat]
345 | train_y = train_df[label]
346 | test_x = test_df[used_feat]
347 |
348 |
349 | # In[7]:
350 |
351 |
352 | scores = []
353 | imp = pd.DataFrame()
354 | imp['feat'] = used_feat
355 | from sklearn.linear_model import RidgeClassifier,LogisticRegression
356 | params = {
357 | 'learning_rate': 0.1,
358 | 'metric': 'multi_error',
359 | 'objective': 'multiclass',
360 | 'num_class': 19,
361 | 'feature_fraction': 0.80,
362 | 'bagging_fraction': 0.75,
363 | 'bagging_freq': 2,
364 | 'n_jobs': -1,
365 | 'max_depth': 6,
366 | 'num_leaves': 64,
367 | # 'lambda_l1': 0.5,
368 | # 'lambda_l2': 0.5,
369 | }
370 |
371 | oof_train = np.zeros((len(train_x), 19))
372 | preds = np.zeros((len(test_x), 19))
373 | folds = 5
374 | # seeds = [
375 | # seeds = []
376 | seeds = [1111, 1024, 1314, 6666, 9999, 6969,44, 2020, 527, 1527,404,721]
377 | for seed in seeds:
378 | kfold = StratifiedKFold(n_splits=folds, shuffle=True, random_state=seed)
379 | for fold, (trn_idx, val_idx) in enumerate(kfold.split(train_x, train_y)):
380 | x_trn, y_trn, x_val, y_val = train_x.iloc[trn_idx], train_y.iloc[trn_idx], train_x.iloc[val_idx], train_y.iloc[val_idx]
381 | train_set = lgb.Dataset(x_trn, y_trn)
382 | val_set = lgb.Dataset(x_val, y_val)
383 | print(str(fold)*10)
384 | #
385 | model = lgb.train(params, train_set, num_boost_round=100,
386 | valid_sets=(train_set, val_set), early_stopping_rounds=50,
387 | verbose_eval=50)
388 | oof_train[val_idx] += model.predict(x_val) / len(seeds)
389 | preds += model.predict(test_x) / folds / len(seeds)
390 | scores.append(model.best_score['valid_1']['multi_error'])
391 |
392 | imp['gain' + str(fold + 1)] = model.feature_importance(importance_type='gain')
393 | imp['split' + str(fold + 1)] = model.feature_importance(importance_type='split')
394 | del x_trn, y_trn, x_val, y_val, model, train_set, val_set
395 | gc.collect()
396 | imp['gain'] = imp[[f for f in imp.columns if 'gain' in f]].sum(axis=1)/folds
397 | imp['split'] = imp[[f for f in imp.columns if 'split' in f]].sum(axis=1)
398 | imp = imp.sort_values(by=['gain'], ascending=False)
399 | # imp[['feat', 'gain', 'split']]
400 | imp = imp.sort_values(by=['split'], ascending=False)
401 | imp = imp.merge(result_df,on='feat',how='left')
402 | imp[['feat', 'gain', 'split',"train_auc","val_auc"]]
403 |
404 |
405 | # In[8]:
406 |
407 |
408 | import seaborn as sns
409 | import matplotlib.pyplot as plt
410 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
411 |
412 | def acc_combo(y, y_pred):
413 | # 数值ID与行为编码的对应关系
414 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
415 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
416 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
417 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
418 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
419 | # 将行为ID转为编码
420 | code_y, code_y_pred = mapping[y], mapping[y_pred]
421 | if code_y == code_y_pred: #编码完全相同得分1.0
422 | return 1.0
423 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
424 | return 1.0/7
425 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
426 | return 1.0/3
427 | else:
428 | return 0.0
429 |
430 | labels = np.argmax(preds, axis=1)
431 | oof_y = np.argmax(oof_train, axis=1)
432 | print(round(accuracy_score(train_y, oof_y), 5))
433 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(train_y, oof_y)) / oof_y.shape[0]
434 | print(round(scores, 5))
435 | data_path2 = 'data/'
436 | sub = pd.read_csv(data_path2+'提交结果示例.csv')
437 | sub['behavior_id'] = labels
438 | sub.to_csv('sub/allin%.5f.csv' % scores, index=False)
439 | print('file has been saved!!!!!!!!!!!!!!!!!!!!!!!!!')
440 | sub.info()
441 |
442 |
443 | # In[ ]:
444 |
445 |
446 |
447 |
448 |
--------------------------------------------------------------------------------
/Preliminary/ensemble_2_to_0806allin088716.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # In[1]:
5 |
6 |
7 | # tijiao
8 | # 0.85861
9 | # 0.88053
10 |
11 |
12 | # In[2]:
13 |
14 |
15 | import numpy as np
16 | import pandas as pd
17 | import lightgbm as lgb
18 | from sklearn.model_selection import StratifiedKFold, train_test_split
19 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
20 | from sklearn.preprocessing import LabelEncoder
21 | # import seaborn as sns
22 | # import matplotlib.pyplot as plt
23 | from scipy.stats import entropy
24 | import gc
25 | import os
26 | from tqdm import tqdm
27 | # pd.set_option('display.max_columns', 300)
28 | # pd.set_option('display.max_rows', 50)
29 | # from IPython.core.interactiveshell import InteractiveShell
30 | # InteractiveShell.ast_node_interactivity = "all"
31 |
32 |
33 | # In[3]:
34 |
35 |
36 | def acc_combo(y, y_pred):
37 | # 数值ID与行为编码的对应关系
38 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
39 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
40 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
41 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
42 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
43 | # 将行为ID转为编码
44 | code_y, code_y_pred = mapping[y], mapping[y_pred]
45 | if code_y == code_y_pred: #编码完全相同得分1.0
46 | return 1.0
47 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
48 | return 1.0/7
49 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
50 | return 1.0/3
51 | else:
52 | return 0.0
53 |
54 |
55 | # In[4]:
56 |
57 |
58 | num2detail_mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
59 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
60 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
61 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
62 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
63 |
64 |
65 | # In[5]:
66 |
67 |
68 | def feature_test_with_cv(X, y, params=None, cate_feas='auto', nfold=3):
69 | """
70 | [For LightGBM ONLY]
71 | Use cross validation to test if the feature distribution is the same in both train and test sets.
72 | y: 'istest' column with valus of 0-1
73 | Example:
74 | df_fea_auc = get_feature_report(df, features=gcn_feas, all_cate_feas=[], params=None,nfold=3)
75 | """
76 | if params is None:
77 | params = {
78 | 'n_estimators': 100,
79 | 'learning_rate': 0.1,
80 | 'boosting_type': 'gbdt',
81 | 'objective': 'binary',
82 | 'early_stopping_rounds': 25,
83 | 'metric': 'auc',
84 | 'n_jobs': -1,
85 | 'num_leaves': 31,
86 | 'seed': 2020
87 | }
88 | sfold = StratifiedKFold(n_splits=nfold, shuffle=True, random_state=2020)
89 | models = []
90 | val_auc = 0
91 | train_auc = 0
92 | oof = np.zeros(len(X))
93 | for _, (train_idx, val_idx) in enumerate(sfold.split(X, y)):
94 | train_set = lgb.Dataset(X.iloc[train_idx],
95 | y.iloc[train_idx],
96 | categorical_feature=cate_feas)
97 | val_set = lgb.Dataset(X.iloc[val_idx],
98 | y.iloc[val_idx],
99 | categorical_feature=cate_feas)
100 | model = lgb.train(params,
101 | train_set,
102 | valid_sets=[train_set, val_set],
103 | verbose_eval=20)
104 | val_auc += model.best_score['valid_1']['auc'] / 3
105 | train_auc += model.best_score['training']['auc'] / 3
106 | oof[val_idx] = model.predict(X.iloc[val_idx])
107 | models.append(model)
108 | return train_auc, val_auc, models, oof
109 |
110 | def get_feature_report_by_covariate_shift_test(df_raw,
111 | features=None,
112 | all_cate_feas=[],
113 | params=None,
114 | nfold=3,
115 | y2test='istrain',
116 | train_all_feas=False):
117 | """
118 | Use cross validation to test if the feature distribution is the same in both train and test sets.
119 | Args:
120 | y2test: target to test, 'istrain' or 'istest'
121 | train_all_feas: if True, the model will be trained with all features together
122 | Return:
123 | result_dict: dict
124 | """
125 | df = df_raw.copy()
126 | del df_raw
127 | gc.collect()
128 | if features is None:
129 | # logger.info(
130 | # "features is none, all cols will be used except 'istrain' or 'istest'!"
131 | # )
132 | features = [
133 | col for col in df.columns if col not in ['istrain', 'istest']
134 | ]
135 | if train_all_feas:
136 | train_auc, val_auc, models, oof = feature_test_with_cv(
137 | X=df[features],
138 | y=df[y2test],
139 | params=params,
140 | cate_feas=[col for col in all_cate_feas if col in features],
141 | nfold=nfold)
142 | df['pred'] = oof
143 | if y2test == 'istrain':
144 | weights = df[df['istrain'] == 1]['pred'].values
145 | weights = (1. / weights) - 1.
146 | weights /= np.mean(weights)
147 | elif y2test == 'istest':
148 | weights = df[df['istest'] == 0]['pred'].values
149 | weights = (1. / (1 - weights)) - 1.
150 | weights /= np.mean(weights)
151 | else:
152 | raise NotImplementedError(
153 | "y2test should be in ['istrain','istest'] !")
154 | result_dict = {
155 | 'train_auc': train_auc,
156 | 'val_auc': val_auc,
157 | 'models': models,
158 | 'weights': weights
159 | }
160 | else:
161 | score_lst = []
162 | fea_lst = []
163 | for fea in features:
164 | if fea in all_cate_feas:
165 | cate_feas = [fea]
166 | else:
167 | cate_feas = 'auto'
168 | # logger.info("=" * 30)
169 | # logger.info(f"Testing: <{fea}> ...")
170 | # logger.info("=" * 30)
171 | train_auc, val_auc, _, _ = feature_test_with_cv(
172 | X=df[[fea]],
173 | y=df[y2test],
174 | params=params,
175 | cate_feas=cate_feas,
176 | nfold=nfold)
177 | fea_lst.append(fea)
178 | score_lst.append((train_auc, val_auc))
179 | df_fea_auc = pd.DataFrame(score_lst, columns=['train_auc', 'val_auc'])
180 | df_fea_auc['feat'] = fea_lst
181 | df_fea_auc = df_fea_auc.sort_values(by='val_auc', ascending=False)
182 | result_dict = {'df_fea_auc': df_fea_auc}
183 | return result_dict
184 |
185 |
186 | # In[6]:
187 |
188 |
189 | data_path = 'data/'
190 | data_train = pd.read_csv(data_path+'sensor_train.csv')
191 | data_test = pd.read_csv(data_path+'sensor_test.csv')
192 | data_test['fragment_id'] += 10000
193 | label = 'behavior_id'
194 | data = pd.concat([data_train, data_test], sort=False).sort_values(["fragment_id","time_point"])
195 | data.head()
196 | df = data.drop_duplicates(subset=['fragment_id']).reset_index(drop=True)[['fragment_id', 'behavior_id']]
197 | df.head()
198 |
199 |
200 | # In[7]:
201 |
202 |
203 | import joblib
204 | data_path = "PKL/"
205 |
206 |
207 | now_filepath=data_path + "0730_generator_one_fourth_orig_mixup_087765"
208 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
209 | oof = oof_test_data["oof"]
210 | preds = oof_test_data["test"]
211 | for i in oof_test_data:
212 | print(i)
213 | stacknp = np.concatenate([oof,preds],axis=0)
214 | print(stacknp.shape)
215 | stackpd = pd.DataFrame(data=stacknp,columns=["0730_generator_one_fourth_orig_mixup_087765_class"+str(i)for i in range(19)])
216 | stackpd["fragment_id"] = df["fragment_id"]
217 | df = df.merge(stackpd,how='left',on='fragment_id')
218 | C5 = ["0730_generator_one_fourth_orig_mixup_087765_class"+str(i)for i in range(19)]
219 |
220 | ####################################################################
221 | now_filepath=data_path + "0730_generator_one_fifth_orig_mixup_087099"
222 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
223 | oof = oof_test_data["oof"]
224 | preds = oof_test_data["test"]
225 | for i in oof_test_data:
226 | print(i)
227 | stacknp = np.concatenate([oof,preds],axis=0)
228 | print(stacknp.shape)
229 | stackpd = pd.DataFrame(data=stacknp,columns=["0730_generator_one_fifth_orig_mixup_087099_class"+str(i)for i in range(19)])
230 | stackpd["fragment_id"] = df["fragment_id"]
231 | df = df.merge(stackpd,how='left',on='fragment_id')
232 | C2 = ["0730_generator_one_fifth_orig_mixup_087099_class"+str(i)for i in range(19)]
233 |
234 | ####################################################################
235 | now_filepath=data_path + "0729_generator_one_third_orig_mixup_086223"
236 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
237 | oof = oof_test_data["oof"]
238 | preds = oof_test_data["test"]
239 | for i in oof_test_data:
240 | print(i)
241 | stacknp = np.concatenate([oof,preds],axis=0)
242 | print(stacknp.shape)
243 | stackpd = pd.DataFrame(data=stacknp,columns=["0729_generator_one_third_orig_mixup_086223_class"+str(i)for i in range(19)])
244 | stackpd["fragment_id"] = df["fragment_id"]
245 | df = df.merge(stackpd,how='left',on='fragment_id')
246 | C3 = ["0729_generator_one_third_orig_mixup_086223_class"+str(i)for i in range(19)]
247 |
248 | ####################################################################
249 | now_filepath=data_path + "0729_generator_one_sixth_orig_mixup_086686"
250 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
251 | oof = oof_test_data["oof"]
252 | preds = oof_test_data["test"]
253 | for i in oof_test_data:
254 | print(i)
255 | stacknp = np.concatenate([oof,preds],axis=0)
256 | print(stacknp.shape)
257 | stackpd = pd.DataFrame(data=stacknp,columns=["0729_generator_one_sixth_orig_mixup_086686_class"+str(i)for i in range(19)])
258 | stackpd["fragment_id"] = df["fragment_id"]
259 | df = df.merge(stackpd,how='left',on='fragment_id')
260 | C4 = ["0729_generator_one_sixth_orig_mixup_086686_class"+str(i)for i in range(19)]
261 |
262 | ####################################################################
263 | now_filepath=data_path + "0728_08648_online792"
264 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
265 | oof = oof_test_data["oof"]
266 | preds = oof_test_data["test"]
267 | stacknp = np.concatenate([oof,preds],axis=0)
268 | print(stacknp.shape)
269 | stackpd = pd.DataFrame(data=stacknp,columns=["0728_08648_online792_class"+str(i)for i in range(19)])
270 | stackpd["fragment_id"] = df["fragment_id"]
271 | df = df.merge(stackpd,how='left',on='fragment_id')
272 | # # oof_test_data
273 | C1 = ["0728_08648_online792_class"+str(i)for i in range(19)]
274 |
275 | ####################################################################
276 | now_filepath=data_path + "0725_conv2_2_net_weight_comm_0.85568"
277 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
278 | oof = oof_test_data["oof"]
279 | preds = oof_test_data["test"]
280 | stacknp = np.concatenate([oof,preds],axis=0)
281 | print(stacknp.shape)
282 | stackpd = pd.DataFrame(data=stacknp,columns=["0725_conv2_2_net_weight_comm_0_class"+str(i)for i in range(19)])
283 | stackpd["fragment_id"] = df["fragment_id"]
284 | df = df.merge(stackpd,how='left',on='fragment_id')
285 | C6 = ["0725_conv2_2_net_weight_comm_0_class"+str(i)for i in range(19)]
286 |
287 |
288 | now_filepath=data_path + "0721_conv2_2_net_oof_comm_nn0.84665"
289 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
290 | oof = oof_test_data["oof"]
291 | preds = oof_test_data["test"]
292 | stacknp = np.concatenate([oof,preds],axis=0)
293 | print(stacknp.shape)
294 | stackpd = pd.DataFrame(data=stacknp,columns=["0721_conv2_2_net_oof_comm_nn0_class"+str(i)for i in range(19)])
295 | stackpd["fragment_id"] = df["fragment_id"]
296 | df = df.merge(stackpd,how='left',on='fragment_id')
297 | C7 = ["0721_conv2_2_net_oof_comm_nn0_class"+str(i)for i in range(19)]
298 |
299 |
300 | now_filepath=data_path + "spetron_cnn"
301 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
302 | oof = oof_test_data["oof"]
303 | preds = oof_test_data["test"]
304 | stacknp = np.concatenate([oof,preds],axis=0)
305 | print(stacknp.shape)
306 | stackpd = pd.DataFrame(data=stacknp,columns=["spetron0728_conv2_2_net_multiloss_0_class"+str(i)for i in range(19)])
307 | stackpd["fragment_id"] = df["fragment_id"]
308 | df = df.merge(stackpd,how='left',on='fragment_id')
309 |
310 | now_filepath=data_path + "multi_lstm"
311 | oof_test_data = joblib.load(os.path.join(now_filepath,[x for x in os.listdir(now_filepath) if 'pkl' in x][0]))
312 | oof = oof_test_data["oof"]
313 | preds = oof_test_data["test"]
314 | stacknp = np.concatenate([oof,preds],axis=0)
315 | stackpd = pd.DataFrame(data=stacknp,columns=["lstm_mutiloss_4sub_bs32_class"+str(i)for i in range(19)])
316 | stackpd["fragment_id"] = df["fragment_id"]
317 | df = df.merge(stackpd,how='left',on='fragment_id')
318 |
319 |
320 | # In[8]:
321 |
322 |
323 | train_df = df[df[label].isna()==False].reset_index(drop=True)
324 | test_df = df[df[label].isna()==True].reset_index(drop=True)
325 |
326 | drop_feat = ["istrain"]
327 | used_feat = [f for f in train_df.columns if f not in (['fragment_id', label] + drop_feat)]
328 | print(used_feat)
329 | df["istrain"] = (df[label].isna()==False).astype(np.int8)
330 | result_dict = get_feature_report_by_covariate_shift_test(df,
331 | features=used_feat,
332 | all_cate_feas=[],
333 | params=None,
334 | nfold=3,
335 | y2test='istrain',
336 | train_all_feas=False)
337 |
338 | result_df = result_dict["df_fea_auc"]
339 | result_df
340 |
341 |
342 | # In[9]:
343 |
344 |
345 | result_df = result_dict["df_fea_auc"]
346 | drop_bylgb = list(result_df[result_df["val_auc"] > 0.7]["feat"])
347 | result_df
348 | print('len of drop',len(drop_bylgb))
349 |
350 |
351 | # In[10]:
352 |
353 |
354 |
355 |
356 | train_df = df[df[label].isna()==False].reset_index(drop=True)
357 | test_df = df[df[label].isna()==True].reset_index(drop=True)
358 |
359 | drop_feat = ['acc_median','acc_y_min','acc_y_max', 'acc_std','acc_y_mean','acc_min', 'acc_x_mean']# lgb_auc 筛选的
360 | drop_feat = ["istrain"] + drop_bylgb
361 | used_feat = [f for f in train_df.columns if f not in (['fragment_id', label] + drop_feat)and 'id'not in f]
362 | print(len(used_feat))
363 | print(used_feat)
364 |
365 | train_x = train_df[used_feat]
366 | train_y = train_df[label]
367 | test_x = test_df[used_feat]
368 |
369 |
370 | # In[11]:
371 |
372 |
373 | scores = []
374 | imp = pd.DataFrame()
375 | imp['feat'] = used_feat
376 | from sklearn.linear_model import RidgeClassifier,LogisticRegression
377 | params = {
378 | 'learning_rate': 0.03,
379 | 'metric': 'multi_error',
380 | 'objective': 'multiclass',
381 | 'num_class': 19,
382 | 'feature_fraction': 0.80,
383 | 'bagging_fraction': 0.75,
384 | 'bagging_freq': 2,
385 | 'n_jobs': -1,
386 | # 'max_depth': 6,
387 | 'num_leaves': 64,
388 | 'lambda_l1': 0.6,
389 | 'lambda_l2': 0.6,
390 | }
391 |
392 |
393 | oof_train = np.zeros((len(train_x), 19))
394 | preds = np.zeros((len(test_x), 19))
395 | folds = 5
396 | # seeds = [44, 2020, 527, 1527,404,721]
397 | seeds = [1111, 1024, 1314, 6666, 9999, 6969]
398 | for seed in seeds:
399 | kfold = StratifiedKFold(n_splits=folds, shuffle=True, random_state=seed)
400 | for fold, (trn_idx, val_idx) in enumerate(kfold.split(train_x, train_y)):
401 | x_trn, y_trn, x_val, y_val = train_x.iloc[trn_idx], train_y.iloc[trn_idx], train_x.iloc[val_idx], train_y.iloc[val_idx]
402 | train_set = lgb.Dataset(x_trn, y_trn)
403 | val_set = lgb.Dataset(x_val, y_val)
404 | print(str(fold)*10)
405 | #
406 | model = lgb.train(params, train_set, num_boost_round=100,
407 | valid_sets=(train_set, val_set), early_stopping_rounds=50,
408 | verbose_eval=50)
409 | oof_train[val_idx] += model.predict(x_val) / len(seeds)
410 | preds += model.predict(test_x) / folds / len(seeds)
411 | scores.append(model.best_score['valid_1']['multi_error'])
412 |
413 | imp['gain' + str(fold + 1)] = model.feature_importance(importance_type='gain')
414 | imp['split' + str(fold + 1)] = model.feature_importance(importance_type='split')
415 | del x_trn, y_trn, x_val, y_val, model, train_set, val_set
416 | gc.collect()
417 | imp['gain'] = imp[[f for f in imp.columns if 'gain' in f]].sum(axis=1)/folds
418 | imp['split'] = imp[[f for f in imp.columns if 'split' in f]].sum(axis=1)
419 | imp = imp.sort_values(by=['gain'], ascending=False)
420 | # imp[['feat', 'gain', 'split']]
421 | imp = imp.sort_values(by=['split'], ascending=False)
422 | imp = imp.merge(result_df,on='feat',how='left')
423 | imp[['feat', 'gain', 'split',"train_auc","val_auc"]]
424 |
425 |
426 | # In[12]:
427 |
428 |
429 | import seaborn as sns
430 | import matplotlib.pyplot as plt
431 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
432 |
433 | def acc_combo(y, y_pred):
434 | # 数值ID与行为编码的对应关系
435 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
436 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
437 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
438 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
439 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
440 | # 将行为ID转为编码
441 | code_y, code_y_pred = mapping[y], mapping[y_pred]
442 | if code_y == code_y_pred: #编码完全相同得分1.0
443 | return 1.0
444 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
445 | return 1.0/7
446 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
447 | return 1.0/3
448 | else:
449 | return 0.0
450 |
451 | labels = np.argmax(preds, axis=1)
452 | oof_y = np.argmax(oof_train, axis=1)
453 | print(round(accuracy_score(train_y, oof_y), 5))
454 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(train_y, oof_y)) / oof_y.shape[0]
455 | print(round(scores, 5))
456 | data_path2 = 'data/'
457 | sub = pd.read_csv(data_path2+'提交结果示例.csv')
458 | sub['behavior_id'] = labels
459 |
460 | sub.to_csv('sub/0806allin%.5f.csv' % scores, index=False)
461 | print('file has been saved!!!!!!!!!!!!!!!!!!!!!!!!!')
462 | sub.info()
463 |
464 |
465 | # In[ ]:
466 |
467 |
468 |
469 |
470 |
--------------------------------------------------------------------------------
/Preliminary/PKL/0729_generator_one_sixth_orig_mixup_086686/conv2d-avepooling_fc2-add_feature_template-multiloss-generator-Copy1.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # In[1]:
5 |
6 |
7 | seed = 0
8 | import random
9 | import numpy as np
10 | import tensorflow as tf
11 | import os
12 | random.seed(seed)
13 | np.random.seed(seed)
14 | tf.random.set_seed(seed)
15 | os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
16 | os.environ["CUDA_VISIBLE_DEVICES"] = '0'
17 | os.environ["PYTHONHASHSEED"] = str(seed)
18 |
19 | os.environ['TF_DETERMINISTIC_OPS'] = '1'
20 |
21 |
22 | # In[2]:
23 |
24 |
25 | import numpy as np
26 | import pandas as pd
27 | # 选择比较好的模型
28 | # import seaborn as sns
29 |
30 | # import matplotlib.pyplot as plt
31 | from tqdm import tqdm
32 | from scipy.signal import resample
33 | from tensorflow.keras import layers
34 | from tensorflow.keras.layers import *
35 | from tensorflow.keras.models import Model
36 | from tensorflow.keras.optimizers import Adam
37 | from tensorflow.keras.utils import to_categorical
38 | from sklearn.model_selection import StratifiedKFold
39 | from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
40 | import os
41 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
42 |
43 | from sklearn.preprocessing import StandardScaler,MinMaxScaler
44 |
45 |
46 | def acc_combo(y, y_pred):
47 | # 数值ID与行为编码的对应关系
48 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
49 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
50 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
51 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
52 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
53 | # 将行为ID转为编码
54 | code_y, code_y_pred = mapping[y], mapping[y_pred]
55 | if code_y == code_y_pred: #编码完全相同得分1.0
56 | return 1.0
57 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
58 | return 1.0/7
59 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
60 | return 1.0/3
61 | else:
62 | return 0.0
63 |
64 |
65 | sample_num = 60
66 |
67 |
68 | # In[10]:
69 |
70 |
71 | root_path = '../../data/'
72 | train = pd.read_csv(root_path+'sensor_train.csv')
73 | test = pd.read_csv(root_path+'sensor_test.csv')
74 | sub = pd.read_csv(root_path+'提交结果示例.csv')
75 | y = train.groupby('fragment_id')['behavior_id'].min()
76 |
77 |
78 | # In[11]:
79 |
80 |
81 | def add_features(df):
82 | print(df.columns)
83 | df['acc'] = (df.acc_x ** 2 + df.acc_y ** 2 + df.acc_z ** 2) ** .5
84 | df['accg'] = (df.acc_xg ** 2 + df.acc_yg ** 2 + df.acc_zg ** 2) ** .5
85 | df['thetax']=np.arctan(df.acc_xg/
86 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_zg*df.acc_zg))*180/np.pi
87 | df['thetay']=np.arctan(df.acc_yg/
88 | np.sqrt(df.acc_xg*df.acc_xg+df.acc_zg*df.acc_zg))*180/np.pi
89 | df['thetaz']=np.arctan(df.acc_zg/
90 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_xg*df.acc_xg))*180/np.pi
91 |
92 | df['xy'] = (df['acc_x'] ** 2 + df['acc_y'] ** 2) ** 0.5
93 | df['xy_g'] = (df['acc_xg'] ** 2 + df['acc_yg'] ** 2) ** 0.5
94 |
95 | df['g'] = ((df["acc_x"] - df["acc_xg"]) ** 2 +
96 | (df["acc_y"] - df["acc_yg"]) ** 2 + (df["acc_z"] - df["acc_zg"]) ** 2) ** 0.5
97 |
98 | print(df.columns)
99 | return df
100 |
101 |
102 | # In[12]:
103 |
104 |
105 | train=add_features(train)
106 | test=add_features(test)
107 |
108 |
109 | # In[13]:
110 |
111 |
112 | group1 = [x for x in train.columns if x not in ['fragment_id', 'time_point','behavior_id']]
113 | group1
114 |
115 |
116 | # In[14]:
117 |
118 |
119 | FEATURE_NUM=14
120 |
121 |
122 | # In[15]:
123 |
124 |
125 |
126 | x = np.zeros((7292, sample_num, FEATURE_NUM, 1))
127 | t = np.zeros((7500, sample_num, FEATURE_NUM, 1))
128 |
129 |
130 | # In[ ]:
131 |
132 |
133 |
134 |
135 |
136 | # In[16]:
137 |
138 |
139 |
140 | train = train[['fragment_id', 'time_point', 'behavior_id']+group1]
141 | test = test[['fragment_id', 'time_point']+group1]
142 | print(train.columns)
143 |
144 | for i in tqdm(range(7292)):
145 | tmp = train[train.fragment_id == i][:sample_num]
146 | x[i,:,:,0] = resample(tmp.drop(['fragment_id', 'time_point', 'behavior_id'],
147 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0].reshape(sample_num,FEATURE_NUM)
148 | for i in tqdm(range(7500)):
149 | tmp = test[test.fragment_id == i][:sample_num]
150 | t[i,:,:,0] = resample(tmp.drop(['fragment_id', 'time_point'],
151 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0].reshape(sample_num,FEATURE_NUM)
152 |
153 |
154 |
155 |
156 |
157 | # In[18]:
158 |
159 |
160 | # 一个完成了的generator
161 | def data_generator(data,label,class17label,batch_size):
162 | """
163 | data:array (7292, 60, 14, 1)
164 | label:array (7292,)
165 | class17label: series
166 | """
167 | class17label=np.asarray(class17label)
168 | length=len(data)
169 | seq_length=len(data[0])
170 | half_seq_length=int(seq_length/2)
171 |
172 | # index2label
173 | index2label=dict(zip(range(length),class17label))
174 |
175 | label2index={}
176 | # print(class17label)
177 | for i in range(length):
178 | # print(class17label[i],label2index.get(class17label[i],[]))
179 | label2index[class17label[i]]=label2index.get(class17label[i],[])
180 | label2index[class17label[i]].append(i)
181 |
182 | count=0
183 | np.random.seed(seed)# 保证结果可重复
184 |
185 | while True:
186 |
187 | if count==0 or (count + 1) * batch_size > length: # 如果是第一个或者最后一个batch
188 | count=0
189 | shuffle_index = list(range(length))
190 | np.random.shuffle(shuffle_index) ## 对索引进行打乱
191 |
192 | start = count * batch_size ## batch的起始点
193 | end = (count + 1) * batch_size ## batch的终点
194 | inds=shuffle_index[start:end]
195 |
196 | count+=1
197 |
198 | if random.choice([0,1,1,1,1,1,1]):
199 | # minxup
200 | #one specific index -> label -> all the index belong to this
201 | choice_index=[random.choice(label2index[index2label[x]]) for x in inds] # get the random choice seq(waiting for concat)
202 | # 1st 前1/2 seq_length 点原始 后1/2 seq_length 点随机
203 | res_x_orig=data[inds,:half_seq_length]
204 | res_x=data[choice_index,half_seq_length:]
205 |
206 | # print(inds)
207 | # print(data.shape,res_x_orig.shape,res_x.shape,np.concatenate((res_x_orig,res_x),axis=1).shape)
208 | yield np.concatenate((res_x_orig,res_x),axis=1), [label[0][inds],label[1][inds],label[2][inds]]
209 | else:
210 |
211 | yield data[inds],[label[0][inds],label[1][inds],label[2][inds]]
212 |
213 |
214 |
215 | count=0
216 | for a,b in data_generator(x,[y,y,y],y,32):
217 | print(a.shape,b[0].shape)
218 | count+=1
219 | if count==20:
220 | break
221 |
222 |
223 | # In[19]:
224 |
225 |
226 | def ConvBNRelu(X,filters,kernal_size=(3,3)):
227 | X = Conv2D(filters=filters,
228 | kernel_size=kernal_size,
229 | # activation='relu',
230 | use_bias=False,
231 | padding='same')(X)
232 | X = BatchNormalization()(X)
233 | X = Activation('relu')(X)
234 | return X
235 |
236 |
237 | def ConvRelu(X,filters,kernal_size=(3,3)):
238 | X = Conv2D(filters=filters,
239 | kernel_size=kernal_size,
240 | activation='relu',
241 | use_bias=False,
242 | padding='same')(X)
243 | return X
244 |
245 |
246 | def squeeze_excitation_layer(x, out_dim,ratio=8):
247 | '''
248 | SE module performs inter-channel weighting.
249 | '''
250 | squeeze = GlobalAveragePooling2D()(x)
251 |
252 | excitation = Dense(units=out_dim // ratio)(squeeze)
253 | excitation = Activation('relu')(excitation)
254 | excitation = Dense(units=out_dim)(excitation)
255 | excitation = Activation('sigmoid')(excitation)
256 | excitation = Reshape((1,1,out_dim))(excitation)
257 | scale = multiply([x,excitation])
258 | return scale
259 |
260 | # def SE_Residual(X):
261 | # A =
262 | # X = squeeze_excitation_layer(X,128)
263 | # X = Add()([X,A])
264 |
265 |
266 | def lenet5(input):
267 | A = ConvBNRelu(input,64,kernal_size=(3,3))
268 | # B = ConvBNRelu(input,16,kernal_size=(5,1))
269 | # C = ConvBNRelu(input,16,kernal_size=(7,1))
270 | # ABC = layers.Concatenate()([A,B,C])
271 | X = ConvBNRelu(A,128)
272 | # X = squeeze_excitation_layer(X,128)
273 | X = Dropout(0.2)(X)
274 |
275 | X = AveragePooling2D()(X)
276 |
277 | X = ConvBNRelu(X,256)
278 | X = Dropout(0.3)(X)
279 | # X = squeeze_excitation_layer(X,256)
280 | X = ConvBNRelu(X,512)
281 | X = Dropout(0.5)(X)
282 | # X = squeeze_excitation_layer(X,512)
283 | # X = GlobalMaxPooling2D()(X)
284 | X = GlobalAveragePooling2D()(X)
285 |
286 | # X = BatchNormalization()(X)
287 | return X
288 | import tensorflow as tf
289 | def Net(sample_num):
290 | input1 = Input(shape=(sample_num, FEATURE_NUM, 1))
291 | part = tf.split(input1,axis=2, num_or_size_splits = [6, 2, 6])
292 | # res = tf.split(c, axis = 3, num_or_size_splits = [2, 2, 4])
293 |
294 |
295 | X1 = Concatenate(axis=-2)([part[0],part[1]])
296 | X1 = lenet5(X1)
297 | X1 = BatchNormalization()(X1)
298 | X1 = Dense(128, activation='relu')(X1)
299 | X1 = BatchNormalization()(X1)
300 | X1 = Dropout(0.2)(X1)
301 |
302 | X2 = Concatenate(axis=-2)([part[0],part[2]])
303 | X2 = lenet5(X2)
304 | X2 = BatchNormalization()(X2)
305 | # X = Dense(512, activation='relu')(X)
306 | # X = BatchNormalization()(X)
307 | X2 = Dense(128, activation='relu')(X2)
308 | X2 = BatchNormalization()(X2)
309 | X2 = Dropout(0.2)(X2)
310 |
311 | X = Concatenate(axis=-1)([X1,X2])
312 |
313 | # X = Dense(256)(X)
314 |
315 | output1 = Dense(4, activation='softmax', name='4class')(X) # 大类-字母
316 | # output2 = Dense(128)(X)
317 | # output2 = Dense(64)(X)
318 | X = Dense(64)(X)
319 | output2 = Dense(7, activation='softmax', name='7class')(X) # 大类-数字
320 | # X = Dense(32)(X)
321 | # X = Concatenate(axis=-1)([X,output1,output2])
322 | X = Dense(64)(X)
323 | output3 = Dense(19, activation='softmax',name='19class')(X) #小类
324 |
325 |
326 | return Model([input1], [output1,output2,output3])
327 |
328 | # model = Net(60)
329 | # model.summary()
330 |
331 |
332 | # In[20]:
333 |
334 |
335 | # 两个输出
336 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
337 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
338 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
339 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
340 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
341 | # 每一个大类输出 4
342 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
343 |
344 | from sklearn.utils.class_weight import compute_class_weight
345 | # y_train_weight = compute_sample_weight("balanced", train['behavior_id'])
346 | classweights1=compute_class_weight("balanced",['A','B','C','D'], pd.read_csv(root_path+'sensor_train.csv')['behavior_id'].apply(lambda x:mapping[x][0]))
347 | classweights1=pd.DataFrame(classweights1)[0].to_dict()
348 |
349 |
350 |
351 | classweights2=compute_class_weight("balanced",list(range(7)), pd.read_csv(root_path+'sensor_train.csv')['behavior_id'].apply(lambda x:int(mapping[x][2])))
352 | classweights2=pd.DataFrame(classweights2)[0].to_dict()
353 |
354 |
355 |
356 | from sklearn.utils.class_weight import compute_class_weight
357 | # y_train_weight = compute_sample_weight("balanced", train['behavior_id'])
358 | classweights3=compute_class_weight("balanced",np.array(range(19)), pd.read_csv(root_path+'sensor_train.csv')['behavior_id'])
359 | classweights3=pd.DataFrame(classweights3)[0].to_dict()
360 | classweights1,classweights2,classweights3
361 |
362 |
363 | # In[21]:
364 |
365 |
366 | # [:,:,:,[1]]
367 | train = x
368 | test = t
369 |
370 |
371 | fold_num=5
372 | kfold = StratifiedKFold(fold_num,random_state=42,shuffle=True)
373 | proba_t = np.zeros((7500, 19))
374 | proba_oof = np.zeros((7292,19))
375 |
376 | oof_score = []
377 | oof_comm = []
378 | history = []
379 |
380 | from tensorflow.keras.losses import categorical_crossentropy
381 | def custom_loss(y_true, y_pred):
382 | return categorical_crossentropy(y_true, y_pred, label_smoothing=0.05)
383 |
384 | # 两个输出
385 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
386 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
387 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
388 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
389 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
390 | # 每一个大类输出 4
391 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
392 | y_1 = to_categorical([new_mapping[mapping[x][0]] for x in y], num_classes=4)
393 | # 每一个大类输出
394 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
395 | y_2 = to_categorical([mapping[x][2] for x in y], num_classes=7)
396 | # 每一个小类的输出 19
397 | y_3 = to_categorical(y, num_classes=19)
398 | # y_3=y
399 |
400 |
401 | for fold, (xx, yy) in enumerate(kfold.split(train, y)):
402 |
403 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
404 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
405 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
406 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
407 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
408 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
409 |
410 | model = Net(60)
411 | model.summary()
412 | model.compile(loss=[custom_loss,custom_loss,custom_loss],loss_weights=[3,7,21],
413 | optimizer=Adam(),
414 | metrics=["acc"])#'',localscore
415 |
416 | plateau3 = ReduceLROnPlateau(monitor="19class_acc",
417 | verbose=1,
418 | mode='max',
419 | factor=0.5,
420 | patience=18)
421 | early_stopping = EarlyStopping(monitor="val_19class_acc",
422 | verbose=1,
423 | mode='max',
424 | patience=60)
425 |
426 | checkpoint = ModelCheckpoint(f'Conv2d_multiloss_mixup_fold{fold}.h5',
427 | monitor="val_19class_acc",
428 | verbose=0,
429 | mode='max',
430 | save_best_only=True)
431 |
432 | train_res = model.fit(data_generator(train[xx], [y_1[xx], y_2[xx], y_3[xx]],y[xx],32),
433 | epochs=1000, #%%%%%%%%%%%%%10000000000
434 | steps_per_epoch=len(xx) // 32,
435 | verbose=1,
436 | shuffle=True,
437 | validation_data=(train[yy], [y_1[yy], y_2[yy],y_3[yy]]),
438 | callbacks=[plateau3, early_stopping, checkpoint],
439 | class_weight=[classweights1,classweights2,classweights3])
440 |
441 | history.append(train_res)
442 |
443 | model.load_weights(f'Conv2d_multiloss_mixup_fold{fold}.h5')
444 | proba_t += model.predict(test, verbose=0, batch_size=1024)[2] / fold_num
445 | proba_oof[yy] += model.predict(train[yy],verbose=0,batch_size=1024) [2]
446 |
447 | oof_y = np.argmax(proba_oof[yy], axis=1)
448 | acc = round(accuracy_score(y[yy], oof_y),3)
449 | print(acc)
450 | oof_score.append(acc)
451 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(y[yy], oof_y)) / oof_y.shape[0]
452 | oof_comm.append(scores)
453 | print(round(scores, 5))
454 |
455 |
456 | # In[ ]:
457 |
458 |
459 | for index,i in enumerate(oof_comm):
460 | print(index,i,oof_score[index])
461 |
462 | oof_dict = {
463 | "oof":proba_oof,
464 | "test":proba_t,
465 | "acc":oof_comm,
466 | }
467 | import joblib
468 | joblib.dump(oof_dict,"0730_generator_one_sixth_orig_mixup_%.5f_dict.pkl"% np.mean(oof_comm))
469 |
470 |
471 | # In[ ]:
472 |
473 |
474 | # import seaborn as sns
475 | # import matplotlib.pyplot as plt
476 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
477 |
478 | def acc_combo(y, y_pred):
479 | # 数值ID与行为编码的对应关系
480 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
481 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
482 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
483 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
484 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
485 | # 将行为ID转为编码
486 | code_y, code_y_pred = mapping[y], mapping[y_pred]
487 | if code_y == code_y_pred: #编码完全相同得分1.0
488 | return 1.0
489 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
490 | return 1.0/7
491 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
492 | return 1.0/3
493 | else:
494 | return 0.0
495 |
496 | train_y = y
497 | labels = np.argmax(proba_t, axis=1)
498 | oof_y = np.argmax(proba_oof, axis=1)
499 | print(round(accuracy_score(train_y, oof_y), 5))
500 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(train_y, oof_y)) / oof_y.shape[0]
501 | print(round(scores, 5))
502 | data_path = '../../data/'
503 | sub = pd.read_csv(data_path+'提交结果示例.csv')
504 | sub['behavior_id'] = labels
505 |
506 | vc = pd.Series(train_y).value_counts().sort_index()
507 | # sns.barplot(vc.index, vc.values)
508 | # plt.show()
509 |
510 | vc = pd.Series(oof_y).value_counts().sort_index()
511 | # sns.barplot(vc.index, vc.values)
512 | # plt.show()
513 |
514 | vc = sub['behavior_id'].value_counts().sort_index()
515 | # sns.barplot(vc.index, vc.values)
516 | # plt.show()
517 | sub.to_csv('0729_generator_one_third_orig_mixup_%.5f.csv' % scores, index=False)
518 | sub.info()
519 |
520 |
521 | # In[22]:
522 |
523 |
524 |
525 | # %matplotlib inline
526 | # from sklearn.metrics import confusion_matrix
527 | # import matplotlib.pyplot as plt
528 | # import numpy as np
529 |
530 | # def plot_confusion_matrix(cm,classes,title='Confusion Matrix'):
531 |
532 | # plt.figure(figsize=(12, 9), dpi=100)
533 | # np.set_printoptions(precision=2)
534 |
535 | # sns.heatmap(cm,annot=True)
536 | # plt.title(title)
537 | # plt.xticks(ticks=range(19),labels=classes)
538 | # plt.yticks(ticks=range(19),labels=classes)
539 |
540 | # plt.ylabel('Actual label')
541 | # plt.xlabel('Predict label')
542 | # plt.show()
543 |
544 | # # classes表示不同类别的名称,比如这有6个类别
545 | # num2detail_mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
546 | # 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
547 | # 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
548 | # 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
549 | # 16: 'C_2', 17: 'C_5', 18: 'C_6'}
550 |
551 | # classes = [num2detail_mapping[int(i)]for i in range(19)]
552 | # print(classes)
553 | # # 获取混淆矩阵
554 | # cm = confusion_matrix(train_y, oof_y,normalize='true')
555 | # cm = np.round(cm,2)
556 | # plot_confusion_matrix(cm,classes, title='confusion matrix')
557 |
558 |
559 | # In[ ]:
560 |
561 |
562 |
563 |
564 |
--------------------------------------------------------------------------------
/Preliminary/PKL/multi_lstm/mutil_loss_Lstm.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # In[1]:
5 |
6 |
7 | import numpy as np
8 | import pandas as pd
9 |
10 | from tqdm import tqdm
11 | from scipy.signal import resample
12 | from tensorflow.keras import layers
13 | from tensorflow.keras.layers import *
14 | from tensorflow.keras.models import Model
15 | from tensorflow.keras.optimizers import Adam
16 | from tensorflow.keras.utils import to_categorical
17 | from sklearn.model_selection import StratifiedKFold
18 | from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
19 | import os
20 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
21 |
22 | from sklearn.preprocessing import StandardScaler,MinMaxScaler
23 |
24 | os.environ["CUDA_VISIBLE_DEVICES"] = "0"
25 | os.environ["TF_KERAS"] = "1"
26 |
27 | def acc_combo(y, y_pred):
28 | # 数值ID与行为编码的对应关系
29 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
30 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
31 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
32 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
33 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
34 | # 将行为ID转为编码
35 | code_y, code_y_pred = mapping[y], mapping[y_pred]
36 | if code_y == code_y_pred: #编码完全相同得分1.0
37 | return 1.0
38 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
39 | return 1.0/7
40 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
41 | return 1.0/3
42 | else:
43 | return 0.0
44 |
45 |
46 | sample_num = 60
47 |
48 |
49 | # In[2]:
50 |
51 |
52 | root_path = '../../data/'
53 | train = pd.read_csv(root_path+'sensor_train.csv')
54 | test = pd.read_csv(root_path+'sensor_test.csv')
55 | sub = pd.read_csv(root_path+'提交结果示例.csv')
56 | y = train.groupby('fragment_id')['behavior_id'].min()
57 |
58 | def add_features(df):
59 | print(df.columns)
60 | df['acc'] = (df.acc_x ** 2 + df.acc_y ** 2 + df.acc_z ** 2) ** .5
61 | df['accg'] = (df.acc_xg ** 2 + df.acc_yg ** 2 + df.acc_zg ** 2) ** .5
62 | # df['thetax']=np.arctan(df.acc_xg/
63 | # np.sqrt(df.acc_yg*df.acc_yg+df.acc_zg*df.acc_zg))*180/np.pi
64 | # df['thetay']=np.arctan(df.acc_yg/
65 | # np.sqrt(df.acc_xg*df.acc_xg+df.acc_zg*df.acc_zg))*180/np.pi
66 | # df['thetaz']=np.arctan(df.acc_zg/
67 | # np.sqrt(df.acc_yg*df.acc_yg+df.acc_xg*df.acc_xg))*180/np.pi
68 |
69 | # df['xy'] = (df['acc_x'] ** 2 + df['acc_y'] ** 2) ** 0.5
70 | # df['xy_g'] = (df['acc_xg'] ** 2 + df['acc_yg'] ** 2) ** 0.5
71 |
72 | df['g'] = ((df["acc_x"] - df["acc_xg"]) ** 2 +
73 | (df["acc_y"] - df["acc_yg"]) ** 2 + (df["acc_z"] - df["acc_zg"]) ** 2) ** 0.5
74 |
75 | print(df.columns)
76 | return df
77 |
78 |
79 | train=add_features(train)
80 | test=add_features(test)
81 |
82 | group1 = [x for x in train.columns if x not in ['fragment_id', 'time_point','behavior_id']]
83 | # group1 = ["acc_x","acc_y","acc_z","acc","acc_xg","acc_yg","acc_zg","accg"]
84 |
85 |
86 | test['fragment_id'] += 10000
87 | data = pd.concat([train, test], sort=False)
88 | ss_tool = StandardScaler()
89 | data[group1] = ss_tool.fit_transform(data[group1])
90 |
91 |
92 | train = data[data["behavior_id"].isna()==False].reset_index(drop=True)
93 | test = data[data["behavior_id"].isna()==True].reset_index(drop=True)
94 | test['fragment_id'] -= 10000
95 | print(test.shape)
96 |
97 |
98 |
99 | x = np.zeros((7292, sample_num, len(group1), 1))
100 | t = np.zeros((7500, sample_num, len(group1), 1))
101 | print(x.shape)
102 |
103 |
104 | # In[3]:
105 |
106 |
107 |
108 | train = train[['fragment_id', 'time_point', 'behavior_id']+group1]
109 | test = test[['fragment_id', 'time_point']+group1]
110 | print(train.columns)
111 |
112 | for i in tqdm(range(7292)):
113 | tmp = train[train.fragment_id == i][:sample_num]
114 | x[i,:,:,0] = resample(tmp.drop(['fragment_id', 'time_point', 'behavior_id'],
115 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0].reshape(sample_num,len(group1))
116 | for i in tqdm(range(7500)):
117 | tmp = test[test.fragment_id == i][:sample_num]
118 | t[i,:,:,0] = resample(tmp.drop(['fragment_id', 'time_point'],
119 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0].reshape(sample_num,len(group1))
120 |
121 |
122 |
123 |
124 |
125 | # In[7]:
126 |
127 |
128 | def ConvBNRelu(X,filters,kernal_size=(3,3)):
129 | X = Conv2D(filters=filters,
130 | kernel_size=kernal_size,
131 | # activation='relu',
132 | use_bias=False,
133 | padding='same')(X)
134 | X = BatchNormalization()(X)
135 | X = Activation('relu')(X)
136 | return X
137 |
138 |
139 | def ConvRelu(X,filters,kernal_size=(3,3)):
140 | X = Conv2D(filters=filters,
141 | kernel_size=kernal_size,
142 | activation='relu',
143 | use_bias=False,
144 | padding='same')(X)
145 | return X
146 |
147 |
148 | def squeeze_excitation_layer(x, out_dim,ratio=8):
149 | '''
150 | SE module performs inter-channel weighting.
151 | '''
152 | squeeze = GlobalAveragePooling2D()(x)
153 |
154 | excitation = Dense(units=out_dim // ratio)(squeeze)
155 | excitation = Activation('relu')(excitation)
156 | excitation = Dense(units=out_dim)(excitation)
157 | excitation = Activation('sigmoid')(excitation)
158 | excitation = Reshape((1,1,out_dim))(excitation)
159 | scale = multiply([x,excitation])
160 | return scale
161 |
162 |
163 | def lenet5(input):
164 | X = ConvBNRelu(input,64)
165 | # X = squeeze_excitation_layer(X,64)
166 | X = ConvBNRelu(X,128)
167 | X = Dropout(0.2)(X)
168 | # X = squeeze_excitation_layer(X,128)
169 | X = MaxPooling2D()(X)
170 |
171 | X = ConvBNRelu(X,256)
172 | X = Dropout(0.3)(X)
173 | X = squeeze_excitation_layer(X,256)
174 | X = ConvBNRelu(X,512)
175 | X = Dropout(0.5)(X)
176 | # X = squeeze_excitation_layer(X,512)
177 | X = GlobalMaxPooling2D()(X)
178 | # X = BatchNormalization()(X)
179 | return X
180 |
181 | def Net():
182 | input1 = Input(shape=(sample_num, 8, 1))
183 | X = lenet5(input1)
184 | O = Dense(19,)(X)
185 | # O = Dense(19, activation='softmax')(X)
186 | return Model([input1], O)
187 |
188 | # from keras_self_attention import SeqSelfAttention
189 | def LSTM_A(input,INPUT_SIZE = 8,CELL_SIZE = 64):
190 | TIME_STEPS = 60
191 | OUTPUT_SIZE = 19
192 |
193 | activateion_fun = 'tanh'
194 | # inputs = Input(shape=[TIME_STEPS,INPUT_SIZE])
195 | x = LSTM(CELL_SIZE, input_shape = (TIME_STEPS,INPUT_SIZE), return_sequences=True, activation=activateion_fun)(input)
196 | x = LayerNormalization()(x)
197 | x = Dropout(0.2)(x)
198 | x = LSTM(CELL_SIZE, input_shape = (TIME_STEPS,CELL_SIZE), return_sequences=True,activation=activateion_fun)(x)
199 | x = LayerNormalization()(x)
200 | # x = Attention()([x,x])
201 | x = Dropout(0.3)(x)
202 | x = LSTM(CELL_SIZE, input_shape = (TIME_STEPS,CELL_SIZE), return_sequences=True)(x)
203 | x = Attention()([x,x])
204 | x = LayerNormalization()(x)
205 | x = Dropout(0.5)(x)
206 | x = GlobalAveragePooling1D()(x)
207 | return x
208 |
209 | def LSTM_B(input,INPUT_SIZE = 8):
210 | TIME_STEPS = 60
211 | OUTPUT_SIZE = 19
212 | CELL_SIZE = 128
213 | activateion_fun = 'tanh'
214 | x = LSTM(CELL_SIZE, input_shape = (TIME_STEPS,INPUT_SIZE), return_sequences=True, activation=activateion_fun)(input)
215 | x = LayerNormalization()(x)
216 | x = Dropout(0.2)(x)
217 | x = LSTM(CELL_SIZE*2, input_shape = (TIME_STEPS,CELL_SIZE), return_sequences=True,activation=activateion_fun)(x)
218 | x = LayerNormalization()(x)
219 | x = Dropout(0.3)(x)
220 | x = LSTM(CELL_SIZE, input_shape = (TIME_STEPS,CELL_SIZE*2), return_sequences=False)(x)
221 | x = LayerNormalization()(x)
222 | return x
223 |
224 | def LSTM_C(input,INPUT_SIZE = 8):
225 | TIME_STEPS = 60
226 | INPUT_SIZE = 8
227 | OUTPUT_SIZE = 19
228 | CELL_SIZE = 64
229 | activateion_fun = 'tanh'
230 | # inputs = Input(shape=[TIME_STEPS,INPUT_SIZE])
231 | x = LSTM(CELL_SIZE, input_shape = (TIME_STEPS,INPUT_SIZE), return_sequences=True, activation=activateion_fun)(input)
232 | x = LayerNormalization()(x)
233 | x = Dropout(0.2)(x)
234 | x = LSTM(CELL_SIZE, input_shape = (TIME_STEPS,INPUT_SIZE), return_sequences=True, activation=activateion_fun)(input)
235 | x = LayerNormalization()(x)
236 | x = Dropout(0.3)(x)
237 | x = LSTM(CELL_SIZE*2, input_shape = (TIME_STEPS,CELL_SIZE), return_sequences=True,activation=activateion_fun)(x)
238 | x = LayerNormalization()(x)
239 | x = Dropout(0.3)(x)
240 | x = LSTM(CELL_SIZE, input_shape = (TIME_STEPS,CELL_SIZE*2), return_sequences=False)(x)
241 | x = LayerNormalization()(x)
242 | return x
243 | import tensorflow as tf
244 | def LSTM_Model():
245 |
246 | TIME_STEPS = 60
247 | INPUT_SIZE = len(group1)
248 | OUTPUT_SIZE = 19
249 | activateion_fun = 'tanh'
250 | inputs = Input(shape=[TIME_STEPS,INPUT_SIZE])
251 | part = tf.split(inputs,axis=2, num_or_size_splits = [3,3,1,1,1])
252 | A = LSTM_A(inputs,CELL_SIZE = 32)
253 | A1 = LSTM_A(part[0],3,CELL_SIZE = 16)
254 | A2 = LSTM_A(part[1],3,CELL_SIZE = 16)
255 | A3 = LSTM_A(Concatenate()([part[2],part[3],part[4]]),3,CELL_SIZE = 16)
256 | A4 = LSTM_A(Concatenate()([part[0],part[2]]),4,CELL_SIZE = 32)
257 | A5 = LSTM_A(Concatenate()([part[1],part[3]]),4,CELL_SIZE = 32)
258 |
259 | # A4 = LSTM_A(part[3],6,CELL_SIZE = 16)
260 | # B = LSTM_B(inputs,INPUT_SIZE=9)
261 | # C = LSTM_C(inputs,CELL_SIZE=46)
262 | x = Concatenate()([A,A1,A2,A3,A4,A5])
263 | x = BatchNormalization()(x)
264 | x = Dense(256, activation='relu')(x)
265 | X = BatchNormalization()(x)
266 | output1 = Dense(4, activation='softmax', name='4class')(X) # 大类-字母
267 | print(X.shape)
268 | # output2 = Dense(128)(X)
269 | # output2 = Dense(64)(X)
270 | X = Dense(64)(X)
271 | output2 = Dense(7, activation='softmax', name='7class')(X) # 大类-数字
272 | # X = Dense(64)(X)
273 | # X = Dense(32)(X)
274 | # X = Concatenate(axis=-1)([X,output1,output2])
275 | X = Dense(64)(X)
276 | output3 = Dense(19, activation='softmax',name='19class')(X) #小类
277 | print(output3.shape)
278 | return Model([inputs], [output1,output2,output3])
279 |
280 | LSTM_Model().summary()
281 |
282 |
283 | # In[9]:
284 |
285 |
286 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
287 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
288 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
289 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
290 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
291 | from sklearn.utils.class_weight import compute_class_weight
292 | # y_train_weight = compute_sample_weight("balanced", train['behavior_id'])
293 | classweights1=compute_class_weight("balanced",['A','B','C','D'],\
294 | pd.read_csv(root_path+'sensor_train.csv')['behavior_id'].apply(lambda x:mapping[x][0]))
295 | classweights1=pd.DataFrame(classweights1)[0].to_dict()
296 |
297 | classweights2=compute_class_weight("balanced",list(range(7)),\
298 | pd.read_csv(root_path+'sensor_train.csv')['behavior_id'].apply(lambda x:int(mapping[x][2])))
299 | classweights2=pd.DataFrame(classweights2)[0].to_dict()
300 | classweights3=compute_class_weight("balanced",np.array(range(19)), pd.read_csv(root_path+'sensor_train.csv')['behavior_id'])
301 | classweights3=pd.DataFrame(classweights3)[0].to_dict()
302 | print(classweights1,classweights2,classweights3)
303 | print('#'*20)
304 | print(classweights1,classweights2,classweights3)
305 |
306 | # In[19]:
307 |
308 |
309 | # [:,:,:,[1]]
310 | train = x
311 | test = t
312 |
313 |
314 | fold_num=5
315 | kfold = StratifiedKFold(fold_num,random_state=42,shuffle=True)
316 | proba_t = np.zeros((7500, 19))
317 | proba_oof = np.zeros((7292,19))
318 |
319 | oof_score = []
320 | oof_comm = []
321 | history = []
322 |
323 | from tensorflow.keras.losses import categorical_crossentropy
324 | def custom_loss(y_true, y_pred):
325 | return categorical_crossentropy(y_true, y_pred, label_smoothing=0.05)
326 |
327 | # 两个输出
328 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
329 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
330 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
331 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
332 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
333 | # 每一个大类输出 4
334 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
335 | y_1 = to_categorical([new_mapping[mapping[x][0]] for x in y], num_classes=4)
336 | # 每一个大类输出
337 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
338 | y_2 = to_categorical([mapping[x][2] for x in y], num_classes=7)
339 | # 每一个小类的输出 19
340 | y_3 = to_categorical(y, num_classes=19)
341 |
342 |
343 | for fold, (xx, yy) in enumerate(kfold.split(train, y)):
344 |
345 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
346 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
347 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
348 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
349 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
350 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
351 |
352 | model = LSTM_Model()
353 | model.summary()
354 | model.compile(loss=[custom_loss,custom_loss,custom_loss],loss_weights=[3,7,21],
355 | optimizer=Adam(),
356 | metrics=["acc"])#'',localscore
357 | plateau = ReduceLROnPlateau(monitor="19class_acc",
358 | verbose=1,
359 | mode='max',
360 | factor=0.7,
361 | patience=18)
362 | early_stopping = EarlyStopping(monitor="val_19class_acc",
363 | verbose=1,
364 | mode='max',
365 | patience=60)
366 |
367 | checkpoint = ModelCheckpoint(f'Conv2d_multiloss_fold{fold}.h5',
368 | monitor="val_19class_acc",
369 | verbose=0,
370 | mode='max',
371 | save_best_only=True)
372 |
373 | print('fold'*50)
374 | print(classweights1,classweights2,classweights3)
375 |
376 | train_res = model.fit(train[xx][:,:,:,0], [y_1[xx], y_2[xx], y_3[xx]],
377 | epochs=400, #400 #########################33
378 | batch_size=32,
379 | verbose=1,
380 | shuffle=True,
381 | validation_data=(train[yy][:,:,:,0], [y_1[yy], y_2[yy],y_3[yy]]),
382 | callbacks=[plateau, early_stopping,checkpoint],
383 | class_weight=[classweights1,classweights2,classweights3])
384 | history.append(train_res)
385 |
386 |
387 |
388 | model.load_weights(f'Conv2d_multiloss_fold{fold}.h5')
389 | proba_t += model.predict(test[:,:,:,0], verbose=0, batch_size=1024)[2] / fold_num
390 | proba_oof[yy] += model.predict(train[yy][:,:,:,0],verbose=0,batch_size=1024) [2]
391 |
392 | oof_y = np.argmax(proba_oof[yy], axis=1)
393 | acc = round(accuracy_score(y[yy], oof_y),3)
394 | print(acc)
395 | oof_score.append(acc)
396 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(y[yy], oof_y)) / oof_y.shape[0]
397 | oof_comm.append(scores)
398 | print(round(scores, 5))
399 |
400 |
401 | # In[20]:
402 |
403 |
404 | for index,i in enumerate(oof_comm):
405 | print(index,i,oof_score[index])
406 |
407 |
408 | oof_dict = {
409 | "oof":proba_oof,
410 | "test":proba_t,
411 | "acc":oof_comm,
412 | }
413 | import joblib
414 | joblib.dump(oof_dict,"lstm_mutiloss_4sub_bs32.pkl")
415 |
416 | print(oof_score)
417 | print(oof_comm)
418 |
419 |
420 | # In[ ]:
421 |
422 |
423 | '''lstm baseline
424 | 0 0.7871666829857358 0.754
425 | 1 0.7745357224452475 0.738
426 | 2 0.7666078777189876 0.724
427 | 3 0.7799660330524517 0.74
428 | 4 0.784669148866678 0.747
429 | '''
430 | '''lstm_NB_layerNorm_AvePooling
431 | 0 0.7825647051143947 0.746
432 | 1 0.7841966121609696 0.751
433 | 2 0.788490430465738 0.754
434 | 3 0.7902867594225609 0.752
435 | 4 0.8051472989744576 0.773
436 | '''
437 | '''去掉双向
438 | 0 0.7993733476941134 0.768
439 | 1 0.7980025457749909 0.764
440 | 2 0.8037428963354872 0.77
441 | 3 0.7997256515775022 0.767
442 | 4 0.802730420014369 0.77
443 | '''
444 | '''more wide
445 | 0 0.8240477822383224 0.796
446 | 1 0.8095564476647393 0.776
447 | 2 0.8125938990136501 0.783
448 | 3 0.7985825331504338 0.763
449 | 4 0.8233718727545867 0.794
450 | '''
451 | '''split channels
452 | 0 0.8186624889846258 0.79
453 | 1 0.8136362152811761 0.781
454 | 2 0.8075315174080595 0.778
455 | 3 0.7980926252531181 0.767
456 | 4 0.8239271017048774 0.796
457 |
458 | '''
459 | '''add attention
460 | 0 0.8186951271255575 0.79
461 | 1 0.7945102646953215 0.761
462 | 2 0.8192566464171388 0.789
463 | 3 0.8061924358220645 0.77
464 | 4 0.8307531517408047 0.803
465 |
466 | '''
467 | '''调整dropout及训练策略,reduceonplateau
468 |
469 | 0 0.8272789581905398 0.802
470 | 1 0.8231991905741032 0.796
471 | 2 0.823829120125415 0.794
472 | 3 0.8362401201907365 0.808
473 | 4 0.840583970213598 0.814
474 |
475 | '''
476 |
477 | '''lstm_NB_4sub_bs32
478 | 0 0.843369561669766 0.819
479 | 1 0.8388002219393567 0.808
480 | 2 0.8508393755307321 0.824
481 | 3 0.8372852570383419 0.81
482 | 4 0.8516885492194121 0.826
483 |
484 | '''
485 |
486 |
487 | # In[18]:
488 |
489 |
490 | # import seaborn as sns
491 | # import matplotlib.pyplot as plt
492 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
493 |
494 | def acc_combo(y, y_pred):
495 | # 数值ID与行为编码的对应关系
496 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
497 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
498 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
499 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
500 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
501 | # 将行为ID转为编码
502 | code_y, code_y_pred = mapping[y], mapping[y_pred]
503 | if code_y == code_y_pred: #编码完全相同得分1.0
504 | return 1.0
505 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
506 | return 1.0/7
507 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
508 | return 1.0/3
509 | else:
510 | return 0.0
511 |
512 | train_y = y
513 | labels = np.argmax(proba_t, axis=1)
514 | oof_y = np.argmax(proba_oof, axis=1)
515 | print(round(accuracy_score(train_y, oof_y), 5))
516 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(train_y, oof_y)) / oof_y.shape[0]
517 | print(round(scores, 5))
518 | data_path = '../../data/'
519 | sub = pd.read_csv(data_path+'提交结果示例.csv')
520 | sub['behavior_id'] = labels
521 |
522 | vc = pd.Series(train_y).value_counts().sort_index()
523 | # sns.barplot(vc.index, vc.values)
524 | # plt.show()
525 |
526 | vc = pd.Series(oof_y).value_counts().sort_index()
527 | # sns.barplot(vc.index, vc.values)
528 | # plt.show()
529 |
530 | vc = sub['behavior_id'].value_counts().sort_index()
531 | # sns.barplot(vc.index, vc.values)
532 | # plt.show()
533 | sub.to_csv('lstm%.5f.csv' % scores, index=False)
534 | sub.info()
535 |
536 |
--------------------------------------------------------------------------------
/Final/code/cos_dense_attention.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Attention layers that can be used in sequence DNN/CNN models.
16 | This file follows the terminology of https://arxiv.org/abs/1706.03762 Figure 2.
17 | Attention is formed by three tensors: Query, Key and Value.
18 | """
19 |
20 | from __future__ import absolute_import
21 | from __future__ import division
22 | from __future__ import print_function
23 |
24 | from tensorflow.python.framework import dtypes
25 | from tensorflow.python.framework import ops
26 | from tensorflow.python.framework import tensor_shape
27 | from tensorflow.python.keras import backend as K
28 | from tensorflow.python.keras.engine.base_layer import Layer
29 | from tensorflow.python.ops import array_ops
30 | from tensorflow.python.ops import init_ops
31 | from tensorflow.python.ops import math_ops
32 | from tensorflow.python.ops import nn
33 | from tensorflow.python.util.tf_export import keras_export
34 |
35 |
36 | class BaseDenseAttention(Layer):
37 | """Base Attention class for Dense networks.
38 | This class is suitable for Dense or CNN networks, and not for RNN networks.
39 | Implementations of attention mechanisms should inherit from this class, and
40 | reuse the `apply_attention_scores()` method.
41 | Args:
42 | causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such
43 | that position `i` cannot attend to positions `j > i`. This prevents the
44 | flow of information from the future towards the past.
45 | Call Arguments:
46 | inputs: List of the following tensors:
47 | * query: Query `Tensor` of shape `[batch_size, Tq, dim]`.
48 | * value: Value `Tensor` of shape `[batch_size, Tv, dim]`.
49 | * key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not
50 | given, will use `value` for both `key` and `value`, which is the
51 | most common case.
52 | mask: List of the following tensors:
53 | * query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.
54 | If given, the output will be zero at the positions where
55 | `mask==False`.
56 | * value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
57 | If given, will apply the mask such that values at positions where
58 | `mask==False` do not contribute to the result.
59 | Output shape:
60 | Attention outputs of shape `[batch_size, Tq, dim]`.
61 | """
62 |
63 | def __init__(self, causal=False, **kwargs):
64 | super(BaseDenseAttention, self).__init__(**kwargs)
65 | self.causal = causal
66 | self.supports_masking = True
67 |
68 | def _calculate_scores(self, query, key):
69 | """Calculates attention scores.
70 | Args:
71 | query: Query tensor of shape `[batch_size, Tq, dim]`.
72 | key: Key tensor of shape `[batch_size, Tv, dim]`.
73 | Returns:
74 | Tensor of shape `[batch_size, Tq, Tv]`.
75 | """
76 | return NotImplementedError
77 |
78 | def _apply_scores(self, scores, value, scores_mask=None):
79 | """Applies attention scores to the given value tensor.
80 | To use this method in your attention layer, follow the steps:
81 | * Use `query` tensor of shape `[batch_size, Tq]` and `key` tensor of shape
82 | `[batch_size, Tv]` to calculate the attention `scores`.
83 | * Pass `scores` and `value` tensors to this method. The method applies
84 | `scores_mask`, calculates `attention_distribution = softmax(scores)`, then
85 | returns `matmul(attention_distribution, value).
86 | * Apply `query_mask` and return the result.
87 | Args:
88 | scores: Scores float tensor of shape `[batch_size, Tq, Tv]`.
89 | value: Value tensor of shape `[batch_size, Tv, dim]`.
90 | scores_mask: A boolean mask `Tensor` of shape `[batch_size, 1, Tv]` or
91 | `[batch_size, Tq, Tv]`. If given, scores at positions where
92 | `scores_mask==False` do not contribute to the result. It must contain
93 | at least one `True` value in each line along the last dimension.
94 | Returns:
95 | Tensor of shape `[batch_size, Tq, dim]`.
96 | """
97 | if scores_mask is not None:
98 | padding_mask = math_ops.logical_not(scores_mask)
99 | # Bias so padding positions do not contribute to attention distribution.
100 | scores -= 1.e9 * math_ops.cast(padding_mask, dtype=K.floatx())
101 | attention_distribution = nn.softmax(scores)
102 | return math_ops.matmul(attention_distribution, value)
103 |
104 | # TODO(b/125916026): Consider exposing a __call__ method with named args.
105 | def call(self, inputs, mask=None):
106 | self._validate_call_args(inputs=inputs, mask=mask)
107 | q = inputs[0]
108 | v = inputs[1]
109 | k = inputs[2] if len(inputs) > 2 else v
110 | q_mask = mask[0] if mask else None
111 | v_mask = mask[1] if mask else None
112 | scores = self._calculate_scores(query=q, key=k)
113 | if v_mask is not None:
114 | # Mask of shape [batch_size, 1, Tv].
115 | v_mask = array_ops.expand_dims(v_mask, axis=-2)
116 | if self.causal:
117 | # Creates a lower triangular mask, so position i cannot attend to
118 | # positions j>i. This prevents the flow of information from the future
119 | # into the past.
120 | scores_shape = array_ops.shape(scores)
121 | # causal_mask_shape = [1, Tq, Tv].
122 | causal_mask_shape = array_ops.concat(
123 | [array_ops.ones_like(scores_shape[:-2]), scores_shape[-2:]],
124 | axis=0)
125 | causal_mask = _lower_triangular_mask(causal_mask_shape)
126 | else:
127 | causal_mask = None
128 | scores_mask = _merge_masks(v_mask, causal_mask)
129 | result = self._apply_scores(scores=scores, value=v, scores_mask=scores_mask)
130 | if q_mask is not None:
131 | # Mask of shape [batch_size, Tq, 1].
132 | q_mask = array_ops.expand_dims(q_mask, axis=-1)
133 | result *= math_ops.cast(q_mask, dtype=result.dtype)
134 | return result
135 |
136 | def compute_mask(self, inputs, mask=None):
137 | self._validate_call_args(inputs=inputs, mask=mask)
138 | if mask:
139 | q_mask = mask[0]
140 | if q_mask is None:
141 | return None
142 | return ops.convert_to_tensor(q_mask)
143 | return None
144 |
145 | def _validate_call_args(self, inputs, mask):
146 | """Validates arguments of the call method."""
147 | class_name = self.__class__.__name__
148 | if not isinstance(inputs, list):
149 | raise ValueError(
150 | '{} layer must be called on a list of inputs, namely [query, value] '
151 | 'or [query, value, key].'.format(class_name))
152 | if len(inputs) < 2 or len(inputs) > 3:
153 | raise ValueError(
154 | '{} layer accepts inputs list of length 2 or 3, '
155 | 'namely [query, value] or [query, value, key]. '
156 | 'Given length: {}'.format(class_name, len(inputs)))
157 | if mask:
158 | if not isinstance(mask, list):
159 | raise ValueError(
160 | '{} layer mask must be a list, '
161 | 'namely [query_mask, value_mask].'.format(class_name))
162 | if len(mask) != 2:
163 | raise ValueError(
164 | '{} layer mask must be a list of length 2, namely [query_mask, '
165 | 'value_mask]. Given length: {}'.format(class_name, len(mask)))
166 |
167 | def get_config(self):
168 | config = {'causal': self.causal}
169 | base_config = super(BaseDenseAttention, self).get_config()
170 | return dict(list(base_config.items()) + list(config.items()))
171 |
172 |
173 | @keras_export('keras.layers.Attention')
174 | class Attention(BaseDenseAttention):
175 | """Dot-product attention layer, a.k.a. Luong-style attention.
176 | Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor of
177 | shape `[batch_size, Tv, dim]` and `key` tensor of shape
178 | `[batch_size, Tv, dim]`. The calculation follows the steps:
179 | 1. Calculate scores with shape `[batch_size, Tq, Tv]` as a `query`-`key` dot
180 | product: `scores = tf.matmul(query, key, transpose_b=True)`.
181 | 2. Use scores to calculate a distribution with shape
182 | `[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`.
183 | 3. Use `distribution` to create a linear combination of `value` with
184 | shape `batch_size, Tq, dim]`:
185 | `return tf.matmul(distribution, value)`.
186 | Args:
187 | use_scale: If `True`, will create a scalar variable to scale the attention
188 | scores.
189 | causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such
190 | that position `i` cannot attend to positions `j > i`. This prevents the
191 | flow of information from the future towards the past.
192 | Call Arguments:
193 | inputs: List of the following tensors:
194 | * query: Query `Tensor` of shape `[batch_size, Tq, dim]`.
195 | * value: Value `Tensor` of shape `[batch_size, Tv, dim]`.
196 | * key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not
197 | given, will use `value` for both `key` and `value`, which is the
198 | most common case.
199 | mask: List of the following tensors:
200 | * query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.
201 | If given, the output will be zero at the positions where
202 | `mask==False`.
203 | * value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
204 | If given, will apply the mask such that values at positions where
205 | `mask==False` do not contribute to the result.
206 | Output shape:
207 | Attention outputs of shape `[batch_size, Tq, dim]`.
208 | The meaning of `query`, `value` and `key` depend on the application. In the
209 | case of text similarity, for example, `query` is the sequence embeddings of
210 | the first piece of text and `value` is the sequence embeddings of the second
211 | piece of text. `key` is usually the same tensor as `value`.
212 | Here is a code example for using `Attention` in a CNN+Attention network:
213 | ```python
214 | # Variable-length int sequences.
215 | query_input = tf.keras.Input(shape=(None,), dtype='int32')
216 | value_input = tf.keras.Input(shape=(None,), dtype='int32')
217 | # Embedding lookup.
218 | token_embedding = tf.keras.layers.Embedding(max_tokens, dimension)
219 | # Query embeddings of shape [batch_size, Tq, dimension].
220 | query_embeddings = token_embedding(query_input)
221 | # Value embeddings of shape [batch_size, Tv, dimension].
222 | value_embeddings = token_embedding(query_input)
223 | # CNN layer.
224 | cnn_layer = tf.keras.layers.Conv1D(
225 | filters=100,
226 | kernel_size=4,
227 | # Use 'same' padding so outputs have the same shape as inputs.
228 | padding='same')
229 | # Query encoding of shape [batch_size, Tq, filters].
230 | query_seq_encoding = cnn_layer(query_embeddings)
231 | # Value encoding of shape [batch_size, Tv, filters].
232 | value_seq_encoding = cnn_layer(value_embeddings)
233 | # Query-value attention of shape [batch_size, Tq, filters].
234 | query_value_attention_seq = tf.keras.layers.Attention()(
235 | [query_seq_encoding, value_seq_encoding])
236 | # Reduce over the sequence axis to produce encodings of shape
237 | # [batch_size, filters].
238 | query_encoding = tf.keras.layers.GlobalAveragePooling1D()(
239 | query_seq_encoding)
240 | query_value_attention = tf.keras.layers.GlobalAveragePooling1D()(
241 | query_value_attention_seq)
242 | # Concatenate query and document encodings to produce a DNN input layer.
243 | input_layer = tf.keras.layers.Concatenate()(
244 | [query_encoding, query_value_attention])
245 | # Add DNN layers, and create Model.
246 | # ...
247 | ```
248 | """
249 |
250 | def __init__(self, use_scale=False, **kwargs):
251 | super(Attention, self).__init__(**kwargs)
252 | self.use_scale = use_scale
253 |
254 | def build(self, input_shape):
255 | """Creates scale variable if use_scale==True."""
256 | if self.use_scale:
257 | self.scale = self.add_weight(
258 | name='scale',
259 | shape=(),
260 | initializer=init_ops.ones_initializer(),
261 | dtype=self.dtype,
262 | trainable=True)
263 | else:
264 | self.scale = None
265 | super(Attention, self).build(input_shape)
266 |
267 | def _calculate_scores(self, query, key):
268 | """Calculates attention scores as a query-key dot product.
269 | Args:
270 | query: Query tensor of shape `[batch_size, Tq, dim]`.
271 | key: Key tensor of shape `[batch_size, Tv, dim]`.
272 | Returns:
273 | Tensor of shape `[batch_size, Tq, Tv]`.
274 | """
275 | scores = math_ops.matmul(query, key, transpose_b=True)
276 | if self.scale is not None:
277 | scores *= self.scale
278 | return scores
279 |
280 | def get_config(self):
281 | config = {'use_scale': self.use_scale}
282 | base_config = super(Attention, self).get_config()
283 | return dict(list(base_config.items()) + list(config.items()))
284 |
285 |
286 | @keras_export('keras.layers.AdditiveAttention')
287 | class AdditiveAttention(BaseDenseAttention):
288 | """Additive attention layer, a.k.a. Bahdanau-style attention.
289 | Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor of
290 | shape `[batch_size, Tv, dim]` and `key` tensor of shape
291 | `[batch_size, Tv, dim]`. The calculation follows the steps:
292 | 1. Reshape `query` and `value` into shapes `[batch_size, Tq, 1, dim]`
293 | and `[batch_size, 1, Tv, dim]` respectively.
294 | 2. Calculate scores with shape `[batch_size, Tq, Tv]` as a non-linear
295 | sum: `scores = tf.reduce_sum(tf.tanh(query + value), axis=-1)`
296 | 3. Use scores to calculate a distribution with shape
297 | `[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`.
298 | 4. Use `distribution` to create a linear combination of `value` with
299 | shape `batch_size, Tq, dim]`:
300 | `return tf.matmul(distribution, value)`.
301 | Args:
302 | use_scale: If `True`, will create a variable to scale the attention scores.
303 | causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such
304 | that position `i` cannot attend to positions `j > i`. This prevents the
305 | flow of information from the future towards the past.
306 | Call Arguments:
307 | inputs: List of the following tensors:
308 | * query: Query `Tensor` of shape `[batch_size, Tq, dim]`.
309 | * value: Value `Tensor` of shape `[batch_size, Tv, dim]`.
310 | * key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not
311 | given, will use `value` for both `key` and `value`, which is the
312 | most common case.
313 | mask: List of the following tensors:
314 | * query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.
315 | If given, the output will be zero at the positions where
316 | `mask==False`.
317 | * value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
318 | If given, will apply the mask such that values at positions where
319 | `mask==False` do not contribute to the result.
320 | Output shape:
321 | Attention outputs of shape `[batch_size, Tq, dim]`.
322 | The meaning of `query`, `value` and `key` depend on the application. In the
323 | case of text similarity, for example, `query` is the sequence embeddings of
324 | the first piece of text and `value` is the sequence embeddings of the second
325 | piece of text. `key` is usually the same tensor as `value`.
326 | Here is a code example for using `AdditiveAttention` in a CNN+Attention
327 | network:
328 | ```python
329 | # Variable-length int sequences.
330 | query_input = tf.keras.Input(shape=(None,), dtype='int32')
331 | value_input = tf.keras.Input(shape=(None,), dtype='int32')
332 | # Embedding lookup.
333 | token_embedding = tf.keras.layers.Embedding(max_tokens, dimension)
334 | # Query embeddings of shape [batch_size, Tq, dimension].
335 | query_embeddings = token_embedding(query_input)
336 | # Value embeddings of shape [batch_size, Tv, dimension].
337 | value_embeddings = token_embedding(query_input)
338 | # CNN layer.
339 | cnn_layer = tf.keras.layers.Conv1D(
340 | filters=100,
341 | kernel_size=4,
342 | # Use 'same' padding so outputs have the same shape as inputs.
343 | padding='same')
344 | # Query encoding of shape [batch_size, Tq, filters].
345 | query_seq_encoding = cnn_layer(query_embeddings)
346 | # Value encoding of shape [batch_size, Tv, filters].
347 | value_seq_encoding = cnn_layer(value_embeddings)
348 | # Query-value attention of shape [batch_size, Tq, filters].
349 | query_value_attention_seq = tf.keras.layers.AdditiveAttention()(
350 | [query_seq_encoding, value_seq_encoding])
351 | # Reduce over the sequence axis to produce encodings of shape
352 | # [batch_size, filters].
353 | query_encoding = tf.keras.layers.GlobalAveragePooling1D()(
354 | query_seq_encoding)
355 | query_value_attention = tf.keras.layers.GlobalAveragePooling1D()(
356 | query_value_attention_seq)
357 | # Concatenate query and document encodings to produce a DNN input layer.
358 | input_layer = tf.keras.layers.Concatenate()(
359 | [query_encoding, query_value_attention])
360 | # Add DNN layers, and create Model.
361 | # ...
362 | ```
363 | """
364 |
365 | def __init__(self, use_scale=True, **kwargs):
366 | super(AdditiveAttention, self).__init__(**kwargs)
367 | self.use_scale = use_scale
368 |
369 | def build(self, input_shape):
370 | v_shape = tensor_shape.TensorShape(input_shape[1])
371 | dim = v_shape[-1]
372 | if isinstance(dim, tensor_shape.Dimension):
373 | dim = dim.value
374 | if self.use_scale:
375 | self.scale = self.add_weight(
376 | name='scale',
377 | shape=[dim],
378 | initializer=init_ops.glorot_uniform_initializer(),
379 | dtype=self.dtype,
380 | trainable=True)
381 | else:
382 | self.scale = None
383 | super(AdditiveAttention, self).build(input_shape)
384 |
385 | def _calculate_scores(self, query, key):
386 | """Calculates attention scores as a nonlinear sum of query and key.
387 | Args:
388 | query: Query tensor of shape `[batch_size, Tq, dim]`.
389 | key: Key tensor of shape `[batch_size, Tv, dim]`.
390 | Returns:
391 | Tensor of shape `[batch_size, Tq, Tv]`.
392 | """
393 | # Reshape tensors to enable broadcasting.
394 | # Reshape into [batch_size, Tq, 1, dim].
395 | q_reshaped = array_ops.expand_dims(query, axis=-2)
396 | # Reshape into [batch_size, 1, Tv, dim].
397 | k_reshaped = array_ops.expand_dims(key, axis=-3)
398 | if self.use_scale:
399 | scale = self.scale
400 | else:
401 | scale = 1.
402 | return math_ops.reduce_sum(
403 | scale * math_ops.tanh(q_reshaped + k_reshaped), axis=-1)
404 |
405 | def get_config(self):
406 | config = {'use_scale': self.use_scale}
407 | base_config = super(AdditiveAttention, self).get_config()
408 | return dict(list(base_config.items()) + list(config.items()))
409 |
410 |
411 | def _lower_triangular_mask(shape):
412 | """Creates a lower-triangular boolean mask over the last 2 dimensions."""
413 | row_index = math_ops.cumsum(
414 | array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-2)
415 | col_index = math_ops.cumsum(
416 | array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-1)
417 | return math_ops.greater_equal(row_index, col_index)
418 |
419 |
420 | def _merge_masks(x, y):
421 | if x is None:
422 | return y
423 | if y is None:
424 | return x
425 | return math_ops.logical_and(x, y)
--------------------------------------------------------------------------------
/Preliminary/PKL/spetron_cnn/spetron_cnn.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 |
4 | # In[1]:
5 |
6 |
7 | seed = 0
8 | import random
9 | import numpy as np
10 | import tensorflow as tf
11 | import os
12 | random.seed(seed)
13 | np.random.seed(seed)
14 | tf.random.set_seed(seed)
15 | os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
16 | os.environ["CUDA_VISIBLE_DEVICES"] = '0'
17 | os.environ["PYTHONHASHSEED"] = str(seed)
18 |
19 | os.environ['TF_DETERMINISTIC_OPS'] = '1'
20 |
21 |
22 | # In[2]:
23 |
24 |
25 | import numpy as np
26 | import pandas as pd
27 | from tqdm import tqdm
28 | from scipy.signal import resample
29 | from tensorflow.keras import layers
30 | from tensorflow.keras.layers import *
31 | from tensorflow.keras.models import Model
32 | from tensorflow.keras.optimizers import Adam
33 | from tensorflow.keras.utils import to_categorical
34 | from sklearn.model_selection import StratifiedKFold
35 | from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
36 | import os
37 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
38 |
39 | from sklearn.preprocessing import StandardScaler,MinMaxScaler
40 |
41 |
42 | def acc_combo(y, y_pred):
43 | # 数值ID与行为编码的对应关系
44 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
45 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
46 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
47 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
48 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
49 | # 将行为ID转为编码
50 | code_y, code_y_pred = mapping[y], mapping[y_pred]
51 | if code_y == code_y_pred: #编码完全相同得分1.0
52 | return 1.0
53 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
54 | return 1.0/7
55 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
56 | return 1.0/3
57 | else:
58 | return 0.0
59 |
60 |
61 | sample_num = 60
62 |
63 |
64 | # In[4]:
65 |
66 |
67 | root_path = '../../data/'
68 | train = pd.read_csv(root_path+'sensor_train.csv')
69 | test = pd.read_csv(root_path+'sensor_test.csv')
70 | sub = pd.read_csv(root_path+'提交结果示例.csv')
71 | y = train.groupby('fragment_id')['behavior_id'].min()
72 |
73 |
74 | # In[5]:
75 |
76 |
77 | def add_features(df):
78 | print(df.columns)
79 | df['acc'] = (df.acc_x ** 2 + df.acc_y ** 2 + df.acc_z ** 2) ** .5
80 | df['accg'] = (df.acc_xg ** 2 + df.acc_yg ** 2 + df.acc_zg ** 2) ** .5
81 | df['thetax']=np.arctan(df.acc_xg/
82 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_zg*df.acc_zg))*180/np.pi
83 | df['thetay']=np.arctan(df.acc_yg/
84 | np.sqrt(df.acc_xg*df.acc_xg+df.acc_zg*df.acc_zg))*180/np.pi
85 | df['thetaz']=np.arctan(df.acc_zg/
86 | np.sqrt(df.acc_yg*df.acc_yg+df.acc_xg*df.acc_xg))*180/np.pi
87 |
88 | df['xy'] = (df['acc_x'] ** 2 + df['acc_y'] ** 2) ** 0.5
89 | df['xy_g'] = (df['acc_xg'] ** 2 + df['acc_yg'] ** 2) ** 0.5
90 |
91 | df['g'] = ((df["acc_x"] - df["acc_xg"]) ** 2 +
92 | (df["acc_y"] - df["acc_yg"]) ** 2 + (df["acc_z"] - df["acc_zg"]) ** 2) ** 0.5
93 |
94 | print(df.columns)
95 | return df
96 |
97 |
98 | # In[6]:
99 |
100 |
101 | train=add_features(train)
102 | test=add_features(test)
103 |
104 |
105 | # In[7]:
106 |
107 |
108 | group1 = [x for x in train.columns if x not in ['fragment_id', 'time_point','behavior_id']]
109 | group1
110 |
111 |
112 | # In[8]:
113 |
114 |
115 | FEATURE_NUM=14
116 |
117 |
118 | # In[9]:
119 |
120 |
121 |
122 | x = np.zeros((7292, sample_num, FEATURE_NUM, 1))
123 | t = np.zeros((7500, sample_num, FEATURE_NUM, 1))
124 |
125 | from scipy.fftpack import fft
126 | from scipy.signal import resample
127 | def get_fft_values(y_values, N, f_s):
128 | f_values = np.linspace(0.0, f_s/2.0, N//2)
129 | fft_values_ = fft(y_values)
130 | plt.plot(fft_values_)
131 | plt.show()
132 | print(fft_values_.shape)
133 | fft_values = 2.0/N * np.abs(fft_values_[0:N//2])
134 | print(fft_values.shape)
135 | return f_values, fft_values
136 |
137 | # tmp = train[train.fragment_id == 0][:sample_num]
138 |
139 | # get_fft_values(tmp["acc"].values,60,5)
140 |
141 |
142 | # In[14]:
143 |
144 |
145 |
146 | test['fragment_id'] += 10000
147 | data = pd.concat([train, test], sort=False)
148 | ss_tool = StandardScaler()
149 | data[group1] = ss_tool.fit_transform(data[group1])
150 | train = data[data["behavior_id"].isna()==False].reset_index(drop=True)
151 | test = data[data["behavior_id"].isna()==True].reset_index(drop=True)
152 | test['fragment_id'] -= 10000
153 | train = train[['fragment_id', 'time_point', 'behavior_id']+group1]
154 | test = test[['fragment_id', 'time_point']+group1]
155 | print(train.columns)
156 |
157 | for i in tqdm(range(7292)):
158 | tmp = train[train.fragment_id == i][:sample_num]
159 | x[i,:,:,0] = fft(resample(tmp.drop(['fragment_id', 'time_point', 'behavior_id'],
160 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0]).reshape(sample_num,FEATURE_NUM)
161 | for i in tqdm(range(7500)):
162 | tmp = test[test.fragment_id == i][:sample_num]
163 | t[i,:,:,0] = fft(resample(tmp.drop(['fragment_id', 'time_point'],
164 | axis=1)[group1], sample_num, np.array(tmp.time_point))[0]).reshape(sample_num,FEATURE_NUM)
165 |
166 |
167 |
168 |
169 |
170 | # In[15]:
171 |
172 |
173 | def ConvBNRelu(X,filters,kernal_size=(3,3)):
174 | X = Conv2D(filters=filters,
175 | kernel_size=kernal_size,
176 | # activation='relu',
177 | use_bias=False,
178 | padding='same')(X)
179 | X = BatchNormalization()(X)
180 | X = Activation('relu')(X)
181 | return X
182 |
183 |
184 | def ConvRelu(X,filters,kernal_size=(3,3)):
185 | X = Conv2D(filters=filters,
186 | kernel_size=kernal_size,
187 | activation='relu',
188 | use_bias=False,
189 | padding='same')(X)
190 | return X
191 |
192 |
193 | def squeeze_excitation_layer(x, out_dim,ratio=8):
194 | '''
195 | SE module performs inter-channel weighting.
196 | '''
197 | squeeze = GlobalAveragePooling2D()(x)
198 |
199 | excitation = Dense(units=out_dim // ratio)(squeeze)
200 | excitation = Activation('relu')(excitation)
201 | excitation = Dense(units=out_dim)(excitation)
202 | excitation = Activation('sigmoid')(excitation)
203 | excitation = Reshape((1,1,out_dim))(excitation)
204 | scale = multiply([x,excitation])
205 | return scale
206 |
207 | # def SE_Residual(X):
208 | # A =
209 | # X = squeeze_excitation_layer(X,128)
210 | # X = Add()([X,A])
211 |
212 |
213 | def lenet5(input):
214 | A = ConvBNRelu(input,64,kernal_size=(3,3))
215 | # B = ConvBNRelu(input,16,kernal_size=(5,1))
216 | # C = ConvBNRelu(input,16,kernal_size=(7,1))
217 | # ABC = layers.Concatenate()([A,B,C])
218 | X = ConvBNRelu(A,128)
219 | # X = squeeze_excitation_layer(X,128)
220 | X = Dropout(0.2)(X)
221 |
222 | X = AveragePooling2D()(X)
223 |
224 | X = ConvBNRelu(X,256)
225 | X = Dropout(0.3)(X)
226 | # X = squeeze_excitation_layer(X,256)
227 | X = ConvBNRelu(X,512)
228 | X = Dropout(0.5)(X)
229 | # X = squeeze_excitation_layer(X,512)
230 | # X = GlobalMaxPooling2D()(X)
231 | X = GlobalAveragePooling2D()(X)
232 |
233 | # X = BatchNormalization()(X)
234 | return X
235 | import tensorflow as tf
236 | def Net(sample_num):
237 | input1 = Input(shape=(sample_num, FEATURE_NUM, 1))
238 | part = tf.split(input1,axis=2, num_or_size_splits = [6, 2, 6])
239 | # res = tf.split(c, axis = 3, num_or_size_splits = [2, 2, 4])
240 |
241 |
242 | X1 = Concatenate(axis=-2)([part[0],part[1]])
243 | X1 = lenet5(X1)
244 | X1 = BatchNormalization()(X1)
245 | X1 = Dense(128, activation='relu')(X1)
246 | X1 = BatchNormalization()(X1)
247 | X1 = Dropout(0.2)(X1)
248 |
249 | X2 = Concatenate(axis=-2)([part[0],part[2]])
250 | X2 = lenet5(X2)
251 | X2 = BatchNormalization()(X2)
252 | # X = Dense(512, activation='relu')(X)
253 | # X = BatchNormalization()(X)
254 | X2 = Dense(128, activation='relu')(X2)
255 | X2 = BatchNormalization()(X2)
256 | X2 = Dropout(0.2)(X2)
257 |
258 | X = Concatenate(axis=-1)([X1,X2])
259 |
260 | # X = Dense(256)(X)
261 |
262 | output1 = Dense(4, activation='softmax', name='4class')(X) # 大类-字母
263 | # output2 = Dense(128)(X)
264 | # output2 = Dense(64)(X)
265 | X = Dense(64)(X)
266 | output2 = Dense(7, activation='softmax', name='7class')(X) # 大类-数字
267 | # X = Dense(32)(X)
268 | # X = Concatenate(axis=-1)([X,output1,output2])
269 | X = Dense(64)(X)
270 | output3 = Dense(19, activation='softmax',name='19class')(X) #小类
271 |
272 |
273 | return Model([input1], [output1,output2,output3])
274 |
275 | model = Net(60)
276 | model.summary()
277 |
278 |
279 | # # dense
280 | # ```
281 | # dense_158 (Dense) (None, 256) 65792 concatenate_155[0][0]
282 | # __________________________________________________________________________________________________
283 | # dense_159 (Dense) (None, 128) 32896 dense_158[0][0]
284 | # __________________________________________________________________________________________________
285 | # dense_160 (Dense) (None, 256) 33024 dense_159[0][0]
286 | # __________________________________________________________________________________________________
287 | # dense_161 (Dense) (None, 128) 32896 dense_160[0][0]
288 | # _________________________________________________________________________________________________
289 | # ```
290 | # [0.845, 0.838, 0.812, 0.822, 0.812] 0.8258000000000001
291 | # [0.8671627664088236, 0.8639642285975372, 0.8386569991508248, 0.851459925533998, 0.8411065386374014] 0.8524700916657171
292 | #
293 | # -------------
294 | # ```
295 | # output1 = Dense(4, activation='softmax', name='4class')(X) # 大类-字母
296 | # X = Dense(128)(X)
297 | # X = Dense(64)(X)
298 | # output2 = Dense(7, activation='softmax', name='7class')(X) # 大类-数字
299 | # X = Dense(64)(X)
300 | # X = Dense(32)(X)
301 | # output3 = Dense(19, activation='softmax',name='19class')(X) #小类
302 | #
303 | # ```
304 | # 0 val_4class_loss0.47757295val_4class_acc0.90472925
305 | # 1 val_4class_loss0.42935628val_4class_acc0.91089785
306 | # 2 val_4class_loss0.4260101val_4class_acc0.9238683
307 | # 3 val_4class_loss0.4555293val_4class_acc0.8978052
308 | # 4 val_4class_loss0.4219553val_4class_acc0.89711934
309 | # [0.819, 0.837, 0.857, 0.809, 0.817] 0.8278000000000001
310 | # [0.8447077254479575, 0.8621038545644417, 0.877914951989024, 0.8381344307270219, 0.8430988307531503] 0.8531919586963191
311 | #
312 | #
313 | # -------
314 | #
315 | # ```
316 | # # X = Dense(19)(X)
317 | #
318 | # output1 = Dense(4, activation='softmax', name='4class')(X) # 大类-字母
319 | # # X = Dense(128)(X)
320 | # X = Dense(64)(X)
321 | # output2 = Dense(7, activation='softmax', name='7class')(X) # 大类-数字
322 | # X = Dense(64)(X)
323 | # # X = Dense(32)(X)
324 | # output3 = Dense(19, activation='softmax',name='19class')(X) #小类
325 | # ```
326 | # [0.808, 0.836, 0.855, 0.814, 0.824] 0.8274000000000001
327 | # [0.8357322366917967, 0.8609941577727713, 0.875628715134886, 0.8447971781305103, 0.8481612123587412] 0.8530627000177411
328 | # 线上0.796
329 | #
330 | # ------------------
331 | #
332 | # ```
333 | # X = Concatenate(axis=-1)([X1,X2])
334 | #
335 | # # X = Dense(256)(X)
336 | #
337 | # output1 = Dense(4, activation='softmax', name='4class')(X) # 大类-字母
338 | # # output2 = Dense(128)(X)
339 | # # output2 = Dense(64)(X)
340 | # X = Dense(64)(X)
341 | # output2 = Dense(7, activation='softmax', name='7class')(X) # 大类-数字
342 | # # X = Dense(64)(X)
343 | # # X = Dense(32)(X)
344 | # # X = Concatenate(axis=-1)([X,output1,output2])
345 | # X = Dense(64)(X)
346 | # output3 = Dense(19, activation='softmax',name='19class')(X) #小类
347 | #
348 | # ```
349 | # [0.814, 0.833, 0.85, 0.829, 0.841] 0.8333999999999999
350 | # [0.8404974052677944, 0.859982375403895, 0.8727545888039693, 0.8554118492390083, 0.8649487229734123] 0.8587189883376158
351 | #
352 | # -----------
353 | # ```
354 | # X = Concatenate(axis=-1)([X1,X2])
355 | #
356 | # # X = Dense(256)(X)
357 | #
358 | # output1 = Dense(4, activation='softmax', name='4class')(X) # 大类-字母
359 | # # output2 = Dense(128)(X)
360 | # # output2 = Dense(64)(X)
361 | # X = Dense(128)(X)
362 | # output2 = Dense(7, activation='softmax', name='7class')(X) # 大类-数字
363 | # # X = Dense(64)(X)
364 | # # X = Dense(32)(X)
365 | # # X = Concatenate(axis=-1)([X,output1,output2])
366 | # X = Dense(128)(X)
367 | # output3 = Dense(19, activation='softmax',name='19class')(X) #小类
368 | #
369 | # ```
370 | #
371 | # [0.851, 0.825, 0.855, 0.821, 0.844] 0.8392
372 | # [0.8736903945951223, 0.8528019843989669, 0.8762819256646396, 0.8497615781566383, 0.8669083545626737] 0.8638888474756083
373 | #
374 | # 线上 78938
375 | #
376 | #
377 | #
378 | # ---------------------------------------------------
379 | # ```
380 | # X = Concatenate(axis=-1)([X1,X2])
381 | #
382 | # # X = Dense(256)(X)
383 | #
384 | # output1 = Dense(4, activation='softmax', name='4class')(X) # 大类-字母
385 | # # output2 = Dense(128)(X)
386 | # # output2 = Dense(64)(X)
387 | # X = Dense(64)(X)
388 | # output2 = Dense(7, activation='softmax', name='7class')(X) # 大类-数字
389 | # # X = Dense(32)(X)
390 | # # X = Concatenate(axis=-1)([X,output1,output2])
391 | # X = Dense(64)(X)
392 | # output3 = Dense(19, activation='softmax',name='19class')(X) #小类
393 | #
394 | #
395 | # return Model([input1], [output1,output2,output3])
396 | #
397 | # ```
398 | # [0.85, 0.839, 0.845, 0.829, 0.837] 0.8399999999999999
399 | # [0.8721564019713419, 0.8632788276379759, 0.8670063361421364, 0.8551505650271063, 0.8620092755895202] 0.8639202812736162
400 | # 线上 789999999
401 | #
402 | #
403 | # 第二次跑
404 | #
405 | # [0.845, 0.809, 0.842, 0.81, 0.823] 0.8257999999999999
406 | #
407 | #
408 | # [0.8656940500669066, 0.8366134664969463, 0.865536612450191, 0.8416617675876923, 0.8489124044679585] 0.8516836602139388
409 | #
410 | # 第三次跑
411 | # 固定种子
412 | #
413 | # [0.85, 0.826, 0.845, 0.835, 0.845] 0.8402
414 | #
415 | # [0.8700022846698634, 0.8540095956134318, 0.8693578940492505, 0.8598863413678212, 0.867594225618915] 0.8641700682638562
416 | #
417 | #
418 |
419 | # In[16]:
420 |
421 |
422 | # 两个输出
423 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
424 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
425 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
426 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
427 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
428 | # 每一个大类输出 4
429 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
430 |
431 | from sklearn.utils.class_weight import compute_class_weight
432 | # y_train_weight = compute_sample_weight("balanced", train['behavior_id'])
433 | classweights1=compute_class_weight("balanced",['A','B','C','D'], pd.read_csv(root_path+'sensor_train.csv')['behavior_id'].apply(lambda x:mapping[x][0]))
434 | classweights1=pd.DataFrame(classweights1)[0].to_dict()
435 |
436 |
437 |
438 | classweights2=compute_class_weight("balanced",list(range(7)), pd.read_csv(root_path+'sensor_train.csv')['behavior_id'].apply(lambda x:int(mapping[x][2])))
439 | classweights2=pd.DataFrame(classweights2)[0].to_dict()
440 |
441 |
442 |
443 | from sklearn.utils.class_weight import compute_class_weight
444 | # y_train_weight = compute_sample_weight("balanced", train['behavior_id'])
445 | classweights3=compute_class_weight("balanced",np.array(range(19)), pd.read_csv(root_path+'sensor_train.csv')['behavior_id'])
446 | classweights3=pd.DataFrame(classweights3)[0].to_dict()
447 | classweights1,classweights2,classweights3
448 |
449 |
450 | # In[17]:
451 |
452 |
453 | # [:,:,:,[1]]
454 | train = x
455 | test = t
456 |
457 | fold_num=5
458 | kfold = StratifiedKFold(fold_num,random_state=42,shuffle=True)
459 | proba_t = np.zeros((7500, 19))
460 | proba_oof = np.zeros((7292,19))
461 |
462 | oof_score = []
463 | oof_comm = []
464 | history = []
465 |
466 | from tensorflow.keras.losses import categorical_crossentropy
467 | def custom_loss(y_true, y_pred):
468 | return categorical_crossentropy(y_true, y_pred, label_smoothing=0.05)
469 |
470 | # 两个输出
471 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
472 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
473 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
474 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
475 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
476 | # 每一个大类输出 4
477 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
478 | y_1 = to_categorical([new_mapping[mapping[x][0]] for x in y], num_classes=4)
479 | # 每一个大类输出
480 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
481 | y_2 = to_categorical([mapping[x][2] for x in y], num_classes=7)
482 | # 每一个小类的输出 19
483 | y_3 = to_categorical(y, num_classes=19)
484 |
485 |
486 | for fold, (xx, yy) in enumerate(kfold.split(train, y)):
487 |
488 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
489 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
490 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
491 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
492 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
493 | new_mapping = {'A':0,'B':1,'C':2,'D':3}
494 |
495 | model = Net(60)
496 | model.summary()
497 | model.compile(loss=[custom_loss,custom_loss,custom_loss],loss_weights=[3,7,21],
498 | optimizer=Adam(),
499 | metrics=["acc"])#'',localscore
500 | plateau = ReduceLROnPlateau(monitor="19class_acc",
501 | verbose=1,
502 | mode='max',
503 | factor=0.5,
504 | patience=18)
505 | early_stopping = EarlyStopping(monitor="val_19class_acc",
506 | verbose=1,
507 | mode='max',
508 | patience=60)
509 | # file_path = save_dir + f'/fold{fold}' + '_weights_{epoch:02d}_{val_loss:.2f}_{val_acc:.2f}.h5'
510 | # checkpoint = ModelCheckpoint(file_path,
511 | # monitor='val_acc',
512 | # verbose=1,
513 | # mode='max',
514 | # save_best_only=False) # 保存所有的
515 | checkpoint = ModelCheckpoint(f'Conv2d_multiloss_fold{fold}.h5',
516 | monitor="val_19class_acc",
517 | verbose=0,
518 | mode='max',
519 | save_best_only=True)
520 |
521 | train_res = model.fit(train[xx], [y_1[xx], y_2[xx], y_3[xx]],
522 | epochs=1000, ################################################
523 | batch_size=32,
524 | verbose=1,
525 | shuffle=True,
526 | validation_data=(train[yy], [y_1[yy], y_2[yy],y_3[yy]]),
527 | callbacks=[plateau, early_stopping, checkpoint],
528 | class_weight=[classweights1,classweights2,classweights3])
529 | history.append(train_res)
530 |
531 |
532 | # # 找到对应最高的 val_acc 对应的epoch,预测left+1+right次
533 | # left=2
534 | # right=2
535 | # max_acc_index=history[fold].history['val_acc'].index(np.max(history[fold].history['val_acc']))+1
536 |
537 | # save_filelist=os.listdir(save_dir)
538 | # save_filelist.sort()
539 | # select_blending=save_filelist[max_acc_index-left : max_acc_index+right]
540 | # print(select_blending)
541 | # for file in select_blending:
542 | # model.load_weights(save_dir+'/'+file)
543 | # proba_t += model.predict(t, verbose=0, batch_size=1024) / (fold_num*len(select_blending))
544 | # proba_oof[yy] = model.predict(train[yy],verbose=0,batch_size=1024) / len(select_blending)
545 |
546 | model.load_weights(f'Conv2d_multiloss_fold{fold}.h5')
547 | proba_t += model.predict(test, verbose=0, batch_size=1024)[2] / fold_num
548 | proba_oof[yy] += model.predict(train[yy],verbose=0,batch_size=1024) [2]
549 |
550 | oof_y = np.argmax(proba_oof[yy], axis=1)
551 | acc = round(accuracy_score(y[yy], oof_y),3)
552 | print(acc)
553 | oof_score.append(acc)
554 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(y[yy], oof_y)) / oof_y.shape[0]
555 | oof_comm.append(scores)
556 | print(round(scores, 5))
557 |
558 |
559 | # In[ ]:
560 |
561 |
562 |
563 |
564 |
565 | # In[ ]:
566 |
567 |
568 |
569 |
570 |
571 | # In[21]:
572 |
573 |
574 | for index,i in enumerate(oof_comm):
575 | print(index,i,oof_score[index])
576 |
577 | oof_dict = {
578 | "oof":proba_oof,
579 | "test":proba_t,
580 | "acc":oof_comm,
581 | }
582 | import joblib
583 | joblib.dump(oof_dict,"spetron0728_conv2_2_net_multiloss_%.5f_dict.pkl"% np.mean(oof_comm))
584 |
585 |
586 | # In[26]:
587 |
588 |
589 |
590 | from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
591 |
592 | def acc_combo(y, y_pred):
593 | # 数值ID与行为编码的对应关系
594 | mapping = {0: 'A_0', 1: 'A_1', 2: 'A_2', 3: 'A_3',
595 | 4: 'D_4', 5: 'A_5', 6: 'B_1',7: 'B_5',
596 | 8: 'B_2', 9: 'B_3', 10: 'B_0', 11: 'A_6',
597 | 12: 'C_1', 13: 'C_3', 14: 'C_0', 15: 'B_6',
598 | 16: 'C_2', 17: 'C_5', 18: 'C_6'}
599 | # 将行为ID转为编码
600 | code_y, code_y_pred = mapping[y], mapping[y_pred]
601 | if code_y == code_y_pred: #编码完全相同得分1.0
602 | return 1.0
603 | elif code_y.split("_")[0] == code_y_pred.split("_")[0]: #编码仅字母部分相同得分1.0/7
604 | return 1.0/7
605 | elif code_y.split("_")[1] == code_y_pred.split("_")[1]: #编码仅数字部分相同得分1.0/3
606 | return 1.0/3
607 | else:
608 | return 0.0
609 |
610 | train_y = y
611 | labels = np.argmax(proba_t, axis=1)
612 | oof_y = np.argmax(proba_oof, axis=1)
613 | print(round(accuracy_score(train_y, oof_y), 5))
614 | scores = sum(acc_combo(y_true, y_pred) for y_true, y_pred in zip(train_y, oof_y)) / oof_y.shape[0]
615 | print(round(scores, 5))
616 | data_path = '../../data/'
617 | sub = pd.read_csv(data_path+'提交结果示例.csv')
618 | sub['behavior_id'] = labels
619 |
620 | vc = pd.Series(train_y).value_counts().sort_index()
621 | # sns.barplot(vc.index, vc.values)
622 | # plt.show()
623 |
624 | vc = pd.Series(oof_y).value_counts().sort_index()
625 | # sns.barplot(vc.index, vc.values)
626 | # plt.show()
627 |
628 | vc = sub['behavior_id'].value_counts().sort_index()
629 | # sns.barplot(vc.index, vc.values)
630 | # plt.show()
631 | sub.to_csv('0728_conv2_multoloss_nn%.5f.csv' % scores, index=False)
632 | sub.info()
633 |
634 |
--------------------------------------------------------------------------------