├── competitions
├── aiyanxishe_102_flowers
│ ├── README.md
│ └── fastai_densenet121.ipynb
├── dc_onecity
│ ├── rank.png
│ └── README.md
├── dc_fraud_phonecall
│ ├── 1.png
│ └── README.md
├── tianchi_car_sale
│ ├── data.png
│ ├── README.md
│ └── final
│ │ ├── xgb.ipynb
│ │ └── lgb.ipynb
├── kesci_datajoy_airbnb
│ ├── 2_text.png
│ ├── 1_train_info.png
│ └── README.md
├── property_chat_pair
│ ├── run.sh
│ ├── README.md
│ ├── merge_folds.py
│ └── run_folds.py
├── sodic_enterprise_hidden_dangers
│ ├── pics
│ │ └── data.png
│ └── RAEDME.md
├── 2022ccf_bigdata_platform_log
│ └── README.md
├── 2021ccf_ueba
│ ├── README.md
│ └── baseline.py
├── 2021shandong_dianwang
│ └── README.md
├── wsdm_iqiyi_torch
│ ├── README.md
│ ├── model.py
│ ├── feature.py
│ ├── model_tools.py
│ └── train.py
├── sohu2022_nlp_rec
│ ├── README.md
│ └── Rec_deepfm.ipynb
├── fund_raising_risk_prediction
│ └── README.md
├── 2022ccf_web_attack_detect
│ └── README.md
├── 2021ccf_loan
│ └── README.md
├── 2021shandong_dongying
│ └── README.md
├── tianchi_elm_delivery
│ ├── README.md
│ ├── part2_feature.ipynb
│ └── data.ipynb
├── wsdm_iqiyi
│ └── README.md
├── 2021ccf_aqy
│ └── README.md
├── kesci_public_health_answers_classification
│ └── README.md
├── shenshui_baoqiliang
│ └── README.md
├── 2021ccf_ner
│ └── README.md
├── aiyanxishe_text_similarity
│ ├── main.py
│ └── README.md
├── 2021ccf_SysRisk
│ └── README.md
├── sodic_job_match
│ └── README.md
├── bien_bmes
│ └── README.md
├── dc_molecule
│ └── README.md
├── tianchi_news_classification
│ ├── RAEDME.md
│ └── baseline.ipynb
├── iflytek_agriculture_ner
│ ├── README.md
│ └── baseline.ipynb
├── aiyanxishe_weibo_baseline
│ ├── README.md
│ └── yanxishe_weibo_BERT_baseline.ipynb
├── haihua2021
│ └── README.md
├── aiyanxishe_fraud_job
│ └── README.md
├── dc_yizhifu
│ └── README.md
├── serverless_load_prediction
│ └── README.md
├── tianchi_aiops2022
│ └── README.md
├── xiamen_international_bank_2020
│ ├── baseline.py
│ └── README.md
└── pingan_baoxian
│ └── model.ipynb
└── README.md
/competitions/aiyanxishe_102_flowers/README.md:
--------------------------------------------------------------------------------
1 | FastAI DenseNet121 with TTA, score: ~95.5
2 |
--------------------------------------------------------------------------------
/competitions/dc_onecity/rank.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LogicJake/competition_baselines/HEAD/competitions/dc_onecity/rank.png
--------------------------------------------------------------------------------
/competitions/dc_fraud_phonecall/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LogicJake/competition_baselines/HEAD/competitions/dc_fraud_phonecall/1.png
--------------------------------------------------------------------------------
/competitions/tianchi_car_sale/data.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LogicJake/competition_baselines/HEAD/competitions/tianchi_car_sale/data.png
--------------------------------------------------------------------------------
/competitions/kesci_datajoy_airbnb/2_text.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LogicJake/competition_baselines/HEAD/competitions/kesci_datajoy_airbnb/2_text.png
--------------------------------------------------------------------------------
/competitions/property_chat_pair/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | for i in `seq 0 9`; do
4 | python3 run_folds.py ${i}
5 | done
6 |
7 | python3 merge_folds.py
8 |
--------------------------------------------------------------------------------
/competitions/kesci_datajoy_airbnb/1_train_info.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LogicJake/competition_baselines/HEAD/competitions/kesci_datajoy_airbnb/1_train_info.png
--------------------------------------------------------------------------------
/competitions/sodic_enterprise_hidden_dangers/pics/data.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LogicJake/competition_baselines/HEAD/competitions/sodic_enterprise_hidden_dangers/pics/data.png
--------------------------------------------------------------------------------
/competitions/tianchi_car_sale/README.md:
--------------------------------------------------------------------------------
1 | # tianchi_car_sale
2 | 零基础入门数据挖掘 - 二手车交易价格预测 baseline
3 |
4 | ## 主办方:天池
5 | ## 赛道:零基础入门数据挖掘 - 二手车交易价格预测
6 |
7 | **赛道链接**:https://tianchi.aliyun.com/competition/entrance/231784/introduction
8 | ## 数据说明
9 | 
10 |
11 | ## 模型分数
12 | 线下 mae 483.599941687254
--------------------------------------------------------------------------------
/competitions/2022ccf_bigdata_platform_log/README.md:
--------------------------------------------------------------------------------
1 | # 2022 CCF 大数据平台安全事件检测与分类识别 无监督学习赛道 baseline 分享
2 |
3 | 赛道链接:https://www.datafountain.cn/competitions/595
4 |
5 | 注意下,这个比赛没有提供标签,是无监督学习,初赛是二分类,复赛是七分类,复赛依然是无监督学习。
6 |
7 | 第一次做非监督学习,分数不高,本 baseline 只有 0.549 左右,写文时能排在 21 名 (top50% 左右),基本没啥技术含量,纯当抛砖引玉之用。
8 |
9 | ## baseline 思路
10 |
11 | 1. 做 count 特征;
12 | 2. 利用 message 简单做了点 TFIDF 特征;
13 | 3. 跑孤立森林模型,预测 outlier;
14 | 4. 调整下阈值。
15 |
--------------------------------------------------------------------------------
/competitions/2021ccf_ueba/README.md:
--------------------------------------------------------------------------------
1 | # 发了好像又没发! 2021 CCF 基于UEBA的用户上网异常行为分析 baseline
2 | - 赛道名: 基于UEBA的用户上网异常行为分析
3 | - 赛道链接: https://www.datafountain.cn/competitions/520
4 |
5 | ## 赛题任务
6 | 利用机器学习、深度学习,UEBA等人工智能方法,基于无标签的用户日常上网日志数据,构建用户上网行为基线和上网行为评价模型,依据上网行为与基线的距离确定偏离程度。
7 | (1)通过用户日常上网数据构建行为基线;
8 | (2)采用无监督学习模型,基于用户上网行为特征,构建上网行为评价模型,评价上网行为与基线的偏离程度。
9 |
10 | ## baseline
11 | baseline 由天才儿童提供,详细代码见阅读原文。该baseline仅供参考,因为存在如下问题:
12 | 1. 训练集有label,榜单评价指标为 RMSE,但是主办方要求用无监督算法,所以榜单意义不大,主办方回应主要看方案的实际意义,所以可能变为方案赛
13 | 2. 测试集合划分有问题,leak比较严重
14 |
15 |
--------------------------------------------------------------------------------
/competitions/2021shandong_dianwang/README.md:
--------------------------------------------------------------------------------
1 | # 山东大数据-电网母线负荷预测 prophet baseline
2 |
3 | 赛道:http://data.sd.gov.cn/cmpt/cmptDetail.html?id=55
4 |
5 | ## 赛题任务
6 |
7 | 电网母线负荷预测包含不同类型变电站电站和母线出线端负荷等数据,对每5分钟为一个时间间隔的负荷数据进行数据处理与建模,预测未来多天的负荷数据,电网线路连接情况复杂,需预测结果数量非常多。
8 |
9 | ## 赛题难点
10 |
11 | - 单变量时间序列预测,难度是需要预测的点非常多;
12 | - 不同的母线,要预测的长度也有不同;
13 | - 部分母线年与年之间变化幅度大。
14 |
15 | ## baseline
16 |
17 | - 本 baseline 只使用前一个月的数据;
18 | - 每个母线单独跑一个 prophet,跑完大概 10 分钟;
19 | - 线上分数 5.21
20 |
21 | ## TODO
22 |
23 | - 尝试更多的数据;
24 | - 构造特征,跑 LGB 等树模型;
25 | - LSTM 等 NN 模型;
26 | - 后处理;
27 | - 融合。
28 |
--------------------------------------------------------------------------------
/competitions/wsdm_iqiyi_torch/README.md:
--------------------------------------------------------------------------------
1 | # 又小又快的爱奇艺用户留存预测pytorch版本baseline,线上84
2 |
3 | 借鉴官方baseline的标签和特征构造方式,只使用了两个特征,线下得分86,线上得分84。
4 |
5 | ### 赛道链接
6 |
7 | http://challenge.ai.iqiyi.com/detail?raceId=61600f6cef1b65639cd5eaa6
8 |
9 | ### baseline
10 |
11 | 本 baseline 做了以下工作:
12 |
13 | 1. 只使用了user_id和32日launch type序列,因为构造样本的时候每个用户只构造了一条样本,所以user_id作用有限(单user_id线上79),上分主要贡献在32日launch type序列;
14 | 2. launch type相较于官方baseline做了一些改进,使用embedding序列,而非单数值序列,有一点提升;
15 | 3. 加入了早停;
16 | 4. 更多特征构造可以参考官方baseline(基于keras)和水哥的[paddle版本](https://aistudio.baidu.com/aistudio/projectdetail/2715522)。
17 |
--------------------------------------------------------------------------------
/competitions/sohu2022_nlp_rec/README.md:
--------------------------------------------------------------------------------
1 | # 2022搜狐校园 情感分析 × 推荐排序 算法大赛 baseline 分享
2 |
3 | - 赛道链接:https://www.biendata.xyz/competition/sohu_2022/
4 | - 本 baseline 预计可以获得 NLP: 0.655, REC: 0.550 左右的榜上分数
5 |
6 | ## 环境:
7 |
8 | - transformers
9 | - deepctr 后续应当用 deepctr-torch 版,这样就不用边折腾 torch 边折腾 TF 了 :(
10 |
11 | ## 复现步骤
12 |
13 | 1. 执行 NLP_training 训练 NLP 模型 (409M x 5 < 2G)
14 | 2. 执行 NLP_infer 推断生成 NLP 的提交文件,并同时生成 rec 所需要的情感特征
15 | 3. 执行 Rec_deepfm 生成 REC 的提交文件
16 | 4. 最后把两个文件放在 submission 目录并压缩成 submission.zip 进行提交
17 |
18 | ## 可能的改进
19 |
20 | - NLP 参考其他 trick 进行提升
21 | - 感觉实体情感特征并不能给 rec 很好的支撑,需要同学们继续摸索
22 |
--------------------------------------------------------------------------------
/competitions/property_chat_pair/README.md:
--------------------------------------------------------------------------------
1 | # CCF 2020 - 房产行业聊天问答匹配
2 |
3 | ## 赛题介绍
4 |
5 | 赛题介绍请直接查看比赛链接 https://www.datafountain.cn/competitions/474
6 |
7 | 该题为经典的 Sentence Pair Classification
8 |
9 | | 客户问题 | 经纪人回复 | 标签 |
10 | | ------- | ---------- | ------ |
11 | | 您好,请问这个户型有什么优缺点呢?| 你是想看看这套房子是吗 | 0 |
12 | | | 在的 | 0 |
13 | | | 此房房型方正 得房率高 多层不带电梯4/6楼 | 1 |
14 |
15 | ## Baseline
16 |
17 | - 10 折 RoBERTa,单折榜单分数为 0.749
18 | - 直接执行 run.sh 即可,输出 baseline.tsv 可提交,榜单分数为 0.764,目前可以排在 top20
19 | - 依赖:simpletransformers, 'hfl/chinese-roberta-wwm-ext' 预训练模型 (执行脚本会自动下载)
20 | - 参数微调可修改 run_folds.py 里的 train_args
21 |
--------------------------------------------------------------------------------
/competitions/fund_raising_risk_prediction/README.md:
--------------------------------------------------------------------------------
1 | # CCF 2020 - 企业非法集资风险预测
2 |
3 | ## 赛题介绍
4 |
5 | 赛题名:企业非法集资风险预测
6 |
7 | 背景:非法集资严重干扰了正常的经济、金融秩序,使参与者遭受经济损失,甚至生活陷入困境,极易引发社会不稳定和大量社会治安问题,甚至引发局部地区的社会动荡。如何根据大量的企业信息建立预测模型并判断企业是否存在非法集资风险,对监管部门、企业合作伙伴、投资者都具有一定的价值。
8 |
9 | 任务:利用机器学习、深度学习等方法训练一个预测模型,该模型可学习企业的相关信息,以预测企业是否存在非法集资风险。赛题的难点在于数据集包括大量的企业相关信息,如何从中提取有效的特征并进行风险预测成为本赛题的关键问题。
10 |
11 | 出题单位:中科大智慧城市研究院
12 |
13 | ## Baseline
14 |
15 | 数据集里提供了多个表格数据,
16 |
17 | 本 baseline 只基于基础表 `base_info.csv` 进行特征处理和建模
18 |
19 | 1. 缺失值简单用中值填充处理
20 | 2. 去掉冗余和无作用的特征
21 | 3. 类别变量做了 label encoding 和 frequency encoding;
22 | 4. 五折 LGB, eval_metric 采用 f1
23 |
24 | 成绩:线下 0.835, 线上 0.825 左右
25 |
--------------------------------------------------------------------------------
/competitions/2022ccf_web_attack_detect/README.md:
--------------------------------------------------------------------------------
1 | # 2022 CCF Web攻击检测与分类识别赛道 线上 94.7 baseline 分享
2 |
3 | # 赛道链接:
4 |
5 | https://www.datafountain.cn/competitions/596
6 |
7 | # 赛题背景:
8 |
9 | 某业务平台平均每月捕获到Web攻击数量超过2亿,涉及常见注入攻击,代码执行等类型。传统威胁检测手段通过分析已知攻击特征进行规则匹配,无法检测未知漏洞或攻击手法。如何快速准确地识别未知威胁攻击并且将不同攻击正确分类,对提升Web攻击检测能力至关重要。利用机器学习和深度学习技术对攻击报文进行识别和分类已经成为解决该问题的创新思路,有利于推动AI技术在威胁检测分析场景的研究与应用。
10 |
11 | # 赛题任务:
12 |
13 | 参赛团队需要对前期提供的训练集进行分析,通过特征工程、机器学习和深度学习等方法构建AI模型,实现对每一条样本正确且快速分类,不断提高模型精确率和召回率。待模型优化稳定后,通过无标签测试集评估各参赛团队模型分类效果,以正确率评估各参赛团队模型质量。
14 |
15 | # baseline 思路
16 |
17 | 多分类问题,表格 + 文本 类型的题目,可使用传统的 TFIDF 或者 BERT 等方式提取文本信息,结合特征工程来做。
18 |
19 | 本 baseline 用了 TFIDF + 简单特征工程,五折 LGB 模型,线下 0.98,线上 94.7
20 |
--------------------------------------------------------------------------------
/competitions/2021ccf_loan/README.md:
--------------------------------------------------------------------------------
1 | # 裸特征 0.868! 2021 CCF 个贷违约预测赛道 baseline
2 |
3 | 赛道链接: https://www.datafountain.cn/competitions/530
4 |
5 | ## 碎碎念
6 |
7 | 典型的风控赛题,就不用多做介绍啦
8 |
9 | 赛方提到了迁移学习,提供了 train_public 和 train_internet_public 两个表
10 |
11 | 个人感觉是可以拼起来直接做的,至于为啥叫迁移学习,没搞懂
12 |
13 | ## 思路
14 |
15 | 本 baseline 主要是看下两个表一起是不是比单表分数要高
16 |
17 | - 只使用两表共有的字段;
18 | - 做了点数据整理;
19 | - 未做特征工程,留给各位大佬发挥;
20 | - 五折 LGB
21 |
22 | ## 线上
23 |
24 | 线下比较低分:0.806,但是线上却有 0.86832636137,据说是比裸特征 train_public 单表(0.85X) 分数要好。
25 |
26 | ## 2021/09/27
27 |
28 | 尝试通过「对抗验证」从 internet 表中找出部分较为符合 public 表分布的数据,步骤如下:
29 |
30 | - public 的 target 设置为 1
31 | - internet 的 target 设置为 0
32 | - 合并两个表
33 | - 训练模型分辨两个表的数据
34 | - 将训练好的模型预测概率较高的 internet 数据挑选出来
35 |
36 | 详见 adv.ipynb
37 |
--------------------------------------------------------------------------------
/competitions/property_chat_pair/merge_folds.py:
--------------------------------------------------------------------------------
1 | import warnings
2 | warnings.simplefilter('ignore')
3 |
4 | import numpy as np
5 | import pandas as pd
6 |
7 | p0 = np.load('prob_0.npy')
8 | p1 = np.load('prob_1.npy')
9 | p2 = np.load('prob_2.npy')
10 | p3 = np.load('prob_3.npy')
11 | p4 = np.load('prob_4.npy')
12 | p5 = np.load('prob_5.npy')
13 | p6 = np.load('prob_6.npy')
14 | p7 = np.load('prob_7.npy')
15 | p8 = np.load('prob_8.npy')
16 | p9 = np.load('prob_9.npy')
17 |
18 | p = (p0 + p1 + p2 + p3 + p4 + p5 + p6 + p7 + p8 + p9) / 10
19 |
20 | submit_sample = pd.read_csv('raw_data/sample_submission.tsv', sep='\t', header=None)
21 | submit_sample.columns =['qid', 'rid', 'label']
22 |
23 | submit_sample['label'] = p.argmax(axis=1)
24 |
25 | submit_sample.to_csv('baseline.tsv', sep='\t', index=False, header=False)
26 |
--------------------------------------------------------------------------------
/competitions/2021shandong_dongying/README.md:
--------------------------------------------------------------------------------
1 | # 山东赛 - 网格事件智能分类 baseline
2 |
3 | 赛道链接:http://data.sd.gov.cn/cmpt/cmptDetail.html?id=67
4 |
5 | ## 赛题介绍
6 |
7 | 基于网格事件数据,对网格中的事件内容进行提取分析,对事件的类别进行划分,具体为根据提供的事件描述,对事件所属政务类型进行划分。
8 |
9 | ## baseline
10 |
11 | 这是个典型的文本多分类问题,之前总是出 simpletransformers 的 baseline,这次正规点出一个 transformers 的 baseline 啦~~
12 |
13 | ### 文本预处理
14 |
15 | 简单拼接文本,用 [SEP] 进行分割
16 |
17 | ```
18 | def concat_text(row):
19 | return f'事件简述:{row["name"]}[SEP]'\
20 | f'事件内容:{row["content"]}'
21 |
22 | train['text'] = train.apply(lambda row: concat_text(row), axis=1)
23 | ```
24 |
25 | ### 预训练模型
26 |
27 | hfl/chinese-roberta-wwm-ext
28 |
29 | ### 训练
30 |
31 | - 单折
32 | - 只训练了 2 epochs,val acc 为 0.667 左右
33 |
34 | ### 线上分数
35 |
36 | 0.68018
37 |
38 | ### TODO
39 |
40 | - 多折
41 | - 融合
42 | - 等等
--------------------------------------------------------------------------------
/competitions/dc_onecity/README.md:
--------------------------------------------------------------------------------
1 | # 中移集成(雄安产业研究院)首届OneCity编程大赛 Baseline 分享
2 |
3 | 赛道链接: https://www.dcjingsai.com/v2/cmptDetail.html?id=457
4 |
5 | ## 竞赛背景
6 |
7 | 本届 OneCity 编程大赛主题围绕智慧城市 OneCity 赋能智慧政务,实现政务数据管理更智慧化、智能化展开。政务数据智能算法包括分类与标签提取,根据政府表格文件标题与内容,按照一定的原则将杂乱无章的文件自动映射到具体的类目上,加速数据归档的智能化与高效化。本次比赛旨在通过抽取政务表格文件中的关键信息,来实现表格数据自动化分类的目标。
8 |
9 | ## Baseline
10 |
11 | 这是个文本多分类问题
12 |
13 | 数据很大,训练集 60000 个文件,测试集 8000 个,但我们只用文件名即可达到 acc 0.9 以上
14 |
15 | ```
16 | train/市管宗教活动场所.csv 文化休闲
17 | train/价格监测信息公开事项汇总信息_.xls 经济管理
18 | ...
19 | ```
20 |
21 | 使用的预训练模型为 chinese-roberta-wwm-ext,训练集验证集 9:1 划分
22 |
23 | 参数:
24 |
25 | ```
26 | model_args.max_seq_length = 128
27 | model_args.train_batch_size = 16
28 | model_args.num_train_epochs = 3
29 | ```
30 |
31 | 在 1080Ti 上跑,一个 epoch 需要 12min 左右
32 |
33 | 线下分数 0.977,线上分数 0.97662 (发文时排在第四)
34 |
35 | 
36 |
--------------------------------------------------------------------------------
/competitions/tianchi_elm_delivery/README.md:
--------------------------------------------------------------------------------
1 | ## 比赛链接
2 | https://tianchi.aliyun.com/competition/entrance/231777/introduction
3 |
4 | ## 比赛介绍
5 | 随着电子商务和本地生活服务的发展,物流行业也得到了快速的成长。我国现在每天需要处理的商品物流订单包裹量已经超过了一亿,以外卖为代表的即时配送领域订单量也超过了日均5000万单。在巨大的增长迅速的即时物流配送市场,产生了巨大的运力需求,光外卖配送员的数量目前就达到了500万以上。如何高效地分配订单给骑士,使之达到配送效率最大化,是一件极富挑战的事情。对骑士行为的预估是进行骑士高效调度的重要条件,这个大数据量场景和需求对机器学习和运筹优化领域产生了巨大的挑战,也促进了这个领域的蓬勃发展。越来越多的方法被研究来解决这个问题,一方面传统的求解算法如分支界限法、动态规划等,另一方面越来越多的启发式算法如遗传算法、进化算法、蚁群算法,模拟退火、粒子群方法等与具体业务场景相结合用来解决这个问题。近些年,随着机器学习技术的发展,传统的机器学习方法,深度学习,图神经网络和强化学习也被引入进来解决这个问题。各种技术也有融合的趋势。
6 |
7 | ## 任务
8 | 在饿了么的配送场景下,每天每个时刻智慧物流的调度平台都会将用户的配送订单源源不断地分配给骑士进行即时配送。骑士在外卖配送中的决策行为主要分为到店取单,到用户处送单两种行为。骑士在某一时刻会接收到分配给他的配送订单,同时骑士身上也背负着之前分配给他还未完成的订单。骑士会根据当前身上背负所有订单状态和自己所处的位置来决策下一步的任务。本项比赛的任务就是需要根据骑士历史的决策信息,结合当前骑士所处的状态来预测骑士的下一步决策行为。
9 |
10 | 订单需要满足取餐在送餐之前, 预测评测需要同时评测行为与时间的准确性,两者进行加权求和。其中,预测行为准确率的成绩占比80%,预测时间准确率成绩占比20%。初试阶段选手提交成绩以预测骑士行为准确率的排名为主,在比赛最终提交成绩确定后,两者进行归一加权后确定最终的排名。
11 |
12 | ## 模型效果
13 | 线下分数为0.6243134050569703
--------------------------------------------------------------------------------
/competitions/wsdm_iqiyi/README.md:
--------------------------------------------------------------------------------
1 | # 爱奇艺用户留存预测挑战赛非官方版本 baseline
2 |
3 | 官方 baseline 将在 11.22 发布,据说有 85 左右。
4 |
5 | 如果想先自己尝试下,可以看看本 baseline。
6 |
7 | ### 赛道链接
8 |
9 | http://challenge.ai.iqiyi.com/detail?raceId=61600f6cef1b65639cd5eaa6
10 |
11 | ### 赛题描述
12 |
13 | 爱奇艺手机端APP,通过深度学习等最新的AI技术,提升用户个性化的产品体验,更好地让用户享受定制化的娱乐服务。我们用“N日留存分”这一关键指标来衡量用户的满意程度。例如,如果一个用户10月1日的“7日留存分”等于3,代表这个用户接下来的7天里(10月2日~8日),有3天会访问爱奇艺APP。预测用户的留存分是个充满挑战的难题:不同用户本身的偏好、活跃度差异很大,另外用户可支配的娱乐时间、热门内容的流行趋势等其他因素,也有很强的周期性特征。
14 |
15 | 本次大赛基于爱奇艺APP脱敏和采样后的数据信息,预测用户的7日留存分。参赛队伍需要设计相应的算法进行数据分析和预测。
16 |
17 | ### 数据描述
18 |
19 | 本次比赛提供了丰富的数据集,包含视频数据、用户画像数据、用户启动日志、用户观影和互动行为日志等。针对测试集用户,需要预测每一位用户某一日的“7日留存分”。7日留存分取值范围从0到7,预测结果保留小数点后2位。
20 |
21 | ### 评价指标
22 |
23 | `1 - mae(true, pred) / 7`
24 |
25 | ### baseline
26 |
27 | 本 baseline 做了以下工作:
28 |
29 | 1. 训练集没有提供 label,根据题目的要求构建了 label;
30 | 2. 几个用户表做了少量统计特征;
31 | 3. 还没有使用 item 侧特征;
32 | 4. catboost 5 folds 建模;
33 | 5. 线下 74.95,线上 79.83。
34 |
35 | ### TODO
36 |
37 | 1. 更多用户特征;
38 | 2. 使用 item 侧特征;
39 | 3. 深度学习模型。
--------------------------------------------------------------------------------
/competitions/wsdm_iqiyi_torch/model.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class AQYModel(nn.Module):
6 | def __init__(self):
7 | super(AQYModel, self).__init__()
8 |
9 | self.user_id_embedding = nn.Embedding(600000 + 1, 16)
10 | self.launch_type_embedding = nn.Embedding(2 + 1, 16)
11 |
12 | self.launch_seq_gru = nn.GRU(input_size=16,
13 | hidden_size=16,
14 | batch_first=True)
15 |
16 | self.fc = nn.Linear(32, 1)
17 |
18 | def forward(self, user_id, launch_seq):
19 | user_id_emb = self.user_id_embedding(user_id)
20 |
21 | # launch_seq = launch_seq.reshape((-1, 32, 1))
22 | launch_seq = self.launch_type_embedding(launch_seq)
23 |
24 | launch_seq, _ = self.launch_seq_gru(launch_seq)
25 | launch_seq = torch.mean(launch_seq, dim=1)
26 |
27 | fc_input = torch.cat([user_id_emb, launch_seq], 1)
28 |
29 | pred = self.fc(fc_input)
30 |
31 | return pred
32 |
--------------------------------------------------------------------------------
/competitions/2021ccf_aqy/README.md:
--------------------------------------------------------------------------------
1 | # 全网首发! 2021 CCF 爱奇艺剧本角色情感识别赛道 baseline
2 |
3 | - 赛道名: 剧本角色情感识别
4 | - 赛道链接: https://www.datafountain.cn/competitions/518
5 |
6 | ## 赛题任务
7 |
8 | 本赛题提供一部分电影剧本作为训练集,训练集数据已由人工进行标注,参赛队伍需要对剧本场景中每句对白和动作描述中涉及到的每个角色的情感从多个维度进行分析和识别。该任务的主要难点和挑战包括:1)剧本的行文风格和通常的新闻类语料差别较大,更加口语化;2)剧本中角色情感不仅仅取决于当前的文本,对前文语义可能有深度依赖。
9 |
10 | ## 数据介绍
11 |
12 | ### 字段
13 |
14 | - id
15 | - content: 文本内容, 剧本对白或动作描写
16 | - character: 角色名,文本中提到的角色
17 | - emotion: 情感识别结果(爱情感值,乐情感值,惊情感值,怒情感值,恐情感值,哀情感值)
18 |
19 | ### 备注
20 |
21 | - 本赛题的情感定义共6类(按顺序):爱、乐、惊、怒、恐、哀;
22 | - 情感识别结果:上述6类情感按固定顺序对应的情感值,情感值范围是[0, 1, 2, 3],0-没有,1-弱,2-中,3-强,以英文半角逗号分隔;
23 | - 本赛题不需要识别剧本中的角色名;
24 |
25 | PS: 训练集中有部分文本是没有标签的,这部分数据大多是剧本中的环境描写,是没有情感的,参赛选手可以当作上下文环境参考,也可以忽略 (来自官方的解答)
26 |
27 | ## baseline 思路
28 |
29 | 该题为多标签多分类问题,本 baseline 采用了 simpletransformers 的 MultiLabelClassification 模块实现
30 |
31 | https://simpletransformers.ai/docs/multi-label-classification/
32 |
33 | - 简单拼接 content 和 character
34 | - 使用 hfl/chinese-bert-wwm-ext 模型
35 | - 全量训练, 只训练了一轮, 线上得分为 0.682 左右
36 |
37 | 具体细节见代码
38 |
--------------------------------------------------------------------------------
/competitions/kesci_public_health_answers_classification/README.md:
--------------------------------------------------------------------------------
1 | # 医学数据挖掘算法评测大赛 | 任务1:公众健康问句分类
2 |
3 | PS: 赛期只有十天,每天两次提交机会
4 |
5 | ## 赛事背景
6 |
7 | 随着健康医疗信息化的发展以及云计算、物联网、移动智能等技术在健康医疗领域的广泛应用,在医疗服务、健康保健和卫生管理过程中产生了海量数据集,形成了医学大数据。医学数据挖掘算法是发挥医学大数据价值的重要技术手段。为促进数据挖掘算法更好地支撑智能化医学信息系统,中华医学会医学信息学分会特组织开展医学数据挖掘评测活动,评价算法在特定医学应用场景中的准确性和适应性。
8 |
9 | ## 赛题描述
10 |
11 | 基于给出的与健康有关的中文问句,对问句的主题进行分类(共包含 6 个大类:A 诊断、B 治疗、C 解剖学/生理学、D 流行病学、E 健康生活方式、F 择医)。由于一个中文健康问句往往归属于多个主题类别,因此该自动分类任务是一个多标签分类的问题(Multilabel Classification)。通过评测参赛团队所构建算法在测试集上的 F1-score 来衡量团队的表现。
12 |
13 | ## 数据描述
14 |
15 | 训练集:共 5,000 条与健康有关的中文提问数据,格式如下:
16 |
17 | - ID
18 | - Question Sentence
19 | - category_A(诊断)
20 | - category_B(治疗)
21 | - category_C(解剖学/生理学)
22 | - category_D(流行病学)
23 | - category_E(健康生活方式)
24 | - category_F(择医)
25 |
26 | 测试集:共 3,000 条与健康有关的中文提问数据,格式如下:
27 |
28 | - ID
29 | - Question Sentence
30 |
31 | ## Baseline
32 |
33 | 典型的 Multi-Label 多标签分类任务,直接套用 simpletransformers MultiLabelClassificationModel
34 |
35 | 预训练模型使用 hfl/chinese-roberta-wwm-ext,单折 5 epochs 线上分数大概 0.625
36 |
37 | 详细可见 ipynb 代码。
38 |
--------------------------------------------------------------------------------
/competitions/shenshui_baoqiliang/README.md:
--------------------------------------------------------------------------------
1 | # 深水云脑杯 水质净化厂工艺控制-曝气量预测 baseline 分享
2 |
3 | 赛道链接:https://www.datafountain.cn/competitions/602
4 |
5 | 比赛类型:表格、回归、时序
6 |
7 | ## 赛题简介
8 |
9 | 本次赛题主要是通过对采用A2O-MBR工艺的某污水厂运行过程中的历史数据,利用大数据建模,形成可供推理的智能曝气数理模型,通过算法迭代计算出最优曝气量。结合污水处理厂工艺流程,通过数学建模,建立污水厂精准曝气机理模型,实现生化处理系统运行效果的优化控制,在保障污水厂出水水质满足行标的前提下,采用智能化、自动化手段降低能耗,有效解决实际问题,助力市政污水处理行业低碳发展。
10 |
11 | 水质净化厂运营过程中,曝气量需根据进出水水质等参数实时进行调节,以保障出水水质达标,而在实际生产过程中,由于影响因素多,目前尚不能对曝气量进行精确控制,希望通过机器学习模型的应用预测曝气量,以指导实际生产。
12 |
13 | ## baseline 思路
14 |
15 | 1. 训练集中只有 1/4 的数据有标签,只取这部分 (后续可对没有 label 的数据做伪标签?);
16 | 2. 按比赛官方页面的提示,将数据分为”南“”北“两份进行建模;
17 | 3. 创建简单的时间特征和数值比率特征;
18 | 4. 将 target 做 log 处理;
19 | 5. 分别跑五折 LGBM 模型;
20 | 6. 合并结果提交。
21 |
22 | ~~线下 0.27~~ 线上 0.43,线上下 gap 比较大
23 |
24 | EDIT: notebook 里的线下评测函数写错了,应该改为
25 |
26 | ```
27 | def calc_score(df1, df2):
28 | df1['loss1'] = (df1['Label1'] - df1['pred'])**2
29 | df2['loss2'] = (df2['Label2'] - df2['pred'])**2
30 | loss1 = (df1['loss1'].sum()/len(df1))**0.5
31 | loss2 = (df2['loss2'].sum()/len(df2))**0.5
32 | loss = (loss1 + loss2) / 2
33 | score = (1 / (1 + loss)) * 1000
34 | return score
35 | ```
36 |
--------------------------------------------------------------------------------
/competitions/2021ccf_ner/README.md:
--------------------------------------------------------------------------------
1 | # Another CCF Baseline! 产品评论观点提取 0.646 baseline
2 |
3 | 赛题:产品评论观点提取
4 | 赛道链接: https://www.datafountain.cn/competitions/529
5 |
6 | ## 赛题介绍
7 |
8 | 官网介绍:「观点提取旨在从非结构化的评论文本中提取标准化、结构化的信息,如产品名、评论维度、评论观点等。此处希望大家能够通过自然语言处理的语义情感分析技术判断出一段银行产品评论文本的情感倾向,并能进一步通过语义分析和实体识别,标识出评论所讨论的产品名,评价指标和评价关键词。」
9 |
10 | 实体标注采用 BIO 格式,即 Begin, In, Out 格式
11 |
12 | - B-BANK 代表银行实体的开始
13 | - I-BANK 代表银行实体的内部
14 | - B-PRODUCT 代表产品实体的开始
15 | - I-PRODUCT 代表产品实体的内部
16 | - O 代表不属于标注的范围
17 | - B-COMMENTS_N 代表用户评论(名词)
18 | - I-COMMENTS_N 代表用户评论(名词)实体的内部
19 | - B-COMMENTS_ADJ 代表用户评论(形容词)
20 | - I-COMMENTS_ADJ 代表用户评论(形容词)实体的内部
21 |
22 | 另外,赛题还需要选手对文本内容进行情感分类任务。
23 |
24 | 总结: 这是一个 NER + classification 比赛,线上评测指标为两者指标相加,其中 NER 为 strict-F1,classification 采用 Kappa
25 |
26 | ## baseline 思路
27 |
28 | 还是请出我们的老熟人 NLP baseline 小能手 simpletransformers
29 |
30 | - https://simpletransformers.ai/docs/ner-model/
31 | - https://simpletransformers.ai/docs/classification-models/
32 |
33 | ### NER
34 |
35 | - 文本序列最大长度设置为 400
36 | - 将数据集整理为 CoNLL 格式
37 | - 训练集验证集大致划分为 9:1
38 | - 预训练模型 hfl/chinese-bert-wwm-ext
39 | - 训练 3 epochs,线下 f1 0.875
40 |
41 | ### Classification
42 |
43 | - 文本序列最大长度设置为 400
44 | - 三分类,数据集极度不平均,中立态度占了 96%
45 | - 使用了全量数据
46 | - 预训练模型 hfl/chinese-bert-wwm-ext
47 | - 训练 3 epochs
48 |
49 | ## 提交结果
50 |
51 | 合并两项提交,线上得分 0.64668491406,提交时能排在第 17 位
52 |
53 | 具体细节见代码
54 |
--------------------------------------------------------------------------------
/competitions/dc_fraud_phonecall/README.md:
--------------------------------------------------------------------------------
1 | # 比赛地址
2 |
3 | 2020首届数字四川创新大赛算法对抗赛:诈骗电话识别大赛
4 |
5 | ```
6 | http://www.scdata.net.cn/common/cmpt/%E8%AF%88%E9%AA%97%E7%94%B5%E8%AF%9D%E8%AF%86%E5%88%AB_%E6%8E%92%E8%A1%8C%E6%A6%9C.html
7 | ```
8 |
9 | # 竞赛背景
10 |
11 | 近年来,通过拨打电话、群发短信实施诈骗的违法行为一直困扰着手机用户,给移动手机用户造成了困扰甚至金钱的损失,如何有效防护诈骗电话对用户隐私、财产造成的威胁,成为公安急需解决的问题,也是四川移动提升用户满意度、保护用户权益需要高度关注的问题。
12 |
13 | 通过诈骗电话号码的通信行为,识别诈骗电话,及时关停诈骗号码,成为运营商与公安部门合作的持续的工作,虽取得一定成绩,但诈骗方式的不断翻新,给诈骗电话的准确识别带来了持续算法挑战。
14 |
15 | # 比赛任务
16 |
17 | 基于用户通信行为数据的诈骗电话识别,将针对诈骗电话与正常用户在语音、短信、上网行为数据的差异,构建诈骗电话识别算法,快速准确识别出诈骗电话,供公安部门与运营商及时关停诈骗号源,打击诈骗犯罪行为。选手需自行分析脱敏样本数据,找出规律,构建识别算法。
18 |
19 | # 数据介绍
20 |
21 | 比赛提供了四个表:
22 |
23 | 
24 |
25 | 数据按时间划分为训练集和测试集。
26 |
27 | - 初赛阶段:预测2020年4月的数据;
28 | - 复赛阶段:预测2020年5月的数据;
29 | - 决赛阶段:预测2020年6月的数据。
30 |
31 | 简单地说,用初赛里举例,训练集中提供了 201908、201909、... 、202003 八个月的数据,但是测试集中只有 202004 这一个月的数据;复赛决赛与之类似。
32 |
33 | # 构建 Baseline
34 |
35 | 因为 train 和 test 并没有交叉,所以最朴素的想法是只取训练集中最后一个月的数据来作为新的训练集,当然这只是 baseline,应该还有更好的做法。
36 |
37 | 特征工程部分:
38 |
39 | 1. user 表:只对 `city_name` 和 `county_name` 做了标签化处理;
40 | 2. voc 表:切割时间,当一天和一小时分别做统计特征;
41 | 3. sms 表:切割时间,当一天和一小时分别做统计特征;
42 | 4. app 表:流量统计特征。
43 |
44 | 以上都是简单的 baseline 套路。其他上分特征等待和大家一起探索。
45 |
46 | 模型方面采用 LGB 五折,最终线下 f1 得分为 0.87,线上 f1 分数为 0.85 左右。
47 |
48 | 详细的代码见 github 链接。
49 |
50 | # TODO
51 |
52 | 1. 更有效地利用训练集里八个月的数据;
53 | 2. voc、sms 除了统计特征,还可以做时间间隔特征等;
54 | 3. app 可以重点关注一些应用的流量,甚至可以像今年腾讯赛一样做 w2v;
55 | 4. city、county 可以做其他的编码方式;
56 | 5. 其他常规的融合和 stacking。
--------------------------------------------------------------------------------
/competitions/aiyanxishe_text_similarity/main.py:
--------------------------------------------------------------------------------
1 | #coding: utf-8
2 |
3 | __author__ = "zhengheng"
4 |
5 | import warnings
6 | warnings.simplefilter('ignore')
7 |
8 | import numpy as np
9 | import pandas as pd
10 |
11 | from tqdm import tqdm
12 |
13 | from simpletransformers.classification import ClassificationModel
14 |
15 | # 读取数据
16 | train = pd.read_csv('raw_data/train.csv')
17 | test = pd.read_csv('raw_data/test.csv')
18 | train.columns = ['text_a', 'text_b', 'labels']
19 |
20 | # 配置训练参数
21 | train_args = {
22 | 'reprocess_input_data': True,
23 | 'overwrite_output_dir': True,
24 | 'num_train_epochs': 3,
25 | 'regression': True,
26 | }
27 |
28 | # build model
29 | model = ClassificationModel('roberta',
30 | 'roberta-base',
31 | num_labels=1,
32 | use_cuda=True,
33 | cuda_device=0,
34 | args=train_args)
35 |
36 | # train model
37 | model.train_model(train, eval_df=test)
38 |
39 | # predict
40 | text_list = list()
41 | for i, row in tqdm(test.iterrows()):
42 | text_list.append([row['text_a'], row['text_b']])
43 |
44 | pred, _ = model.predict(text_list)
45 |
46 | sub = pd.DataFrame()
47 | sub['ID'] = test.index
48 | sub['score'] = pred
49 |
50 | # 后处理, 发现有超过 5 的情况
51 | sub.loc[sub.score < 0.08, 'score'] = 0
52 | sub.loc[sub.score > 5, 'score'] = 5
53 |
54 | # submit
55 | sub.to_csv('roberta_baseline.csv', index=False, header=False)
56 |
--------------------------------------------------------------------------------
/competitions/2021ccf_SysRisk/README.md:
--------------------------------------------------------------------------------
1 | # 提交即弃赛! CCF 系统认证风险预测 baseline
2 |
3 | 赛道链接: https://www.datafountain.cn/competitions/537
4 |
5 | ## 赛题任务
6 |
7 | 本赛题中,参赛团队将基于用户认证行为数据及风险异常标记结构,构建用户认证行为特征模型和风险异常评估模型,利用风险评估模型去判断当前用户认证行为是否存在风险。
8 |
9 | - 利用用户认证数据构建行为基线
10 | - 采用监督学习模型,基于用户认证行为特征,构建风险异常评估模型,判断当前用户认证行为是否存在风险
11 |
12 | ## 数据简介
13 |
14 | 比赛数据是从竹云的风险分析产品日志库中摘录而来,主要涉及认证日志与风险日志数据。
15 |
16 | 比赛数据经过数据脱敏和数据筛选等安全处理操作,供大家使用。
17 |
18 | 其中认证日志是用户在访问应用系统产生的行为数据,包括登录、单点登录、退出等行为。
19 |
20 | ## baseline
21 |
22 | 数据基本上都是类别特征,每个类别数量还比较少,而且 target 分布非常平均
23 |
24 | ```
25 | user_name test_d 0.190810465858328
26 | user_name test_c 0.2004201680672269
27 | user_name test_a 0.19375305026842363
28 | user_name test_b 0.20043763676148796
29 | user_name test_g 0.195578231292517
30 | user_name test_e 0.1988888888888889
31 | user_name test_f 0.19234116623150566
32 | ==================================================
33 | action login 0.1932896671567972
34 | action sso 0.19827471798274718
35 | ==================================================
36 | auth_type otp 0.19203491543917076
37 | auth_type qr 0.1888772298006296
38 | auth_type nan nan
39 | auth_type sms 0.19239013933547697
40 | auth_type pwd 0.19989339019189764
41 | ==================================================
42 | ip 192.168.100.101 0.19682539682539682
43 | ip 14.196.145.66 0.18600867678958785
44 | ip 27.10.135.254 0.1939799331103679
45 | ip 192.168.100.103 0.20709105560032232
46 | ip 192.168.0.100 0.18235294117647058
47 | ==================================================
48 | ```
49 |
50 | 所以难怪现在榜上大家都是 0.50X
51 |
52 | 本 baseline 做了点特征:
53 |
54 | 1. 用户时间变化特征;
55 | 2. 用户类别 nunique 变量;
56 |
57 | 线下分数:0.5070717,线上分数:0.5036238
58 |
--------------------------------------------------------------------------------
/competitions/sodic_job_match/README.md:
--------------------------------------------------------------------------------
1 | # 人岗精准匹配模型 Baseline 分享——怎样简单使用 bert 进行文本特征表示
2 | 线下0.8627,线上0.8692
3 |
4 | ## 赛道链接
5 | https://www.sodic.com.cn/competitions/900008
6 |
7 | ## 赛道背景
8 | 企业招聘需求日益多元化、精细化,招聘服务的开展难度正面临日益严峻的挑战。本赛题期望选手通过自然语言处理、机器学习等前沿技术手段,建立海量企业招聘岗位画像、个人用户画像,在人才推荐、岗位推荐等方向提供数据智能服务,从而提高企业人才招聘效率。
9 |
10 | 数据包括求职者基本信息,求职意向,工作经历,专业证书,项目经验,招聘岗位信息和招聘结果信息。
11 |
12 | ## Baseline
13 | Baseline 没做多少复杂的操作,尽可能将各个表的数据合并到一张表,做了一些简单的数据处理和特征工程,并利用 Bert 对文本数据进行特征提取。
14 |
15 | ### 简单的数据处理
16 | 原始数据中的学历是字符串格式,但自身包含一些排序关系,所以根据常识进行了大小映射。工作年限存在同样的情况,也进行人工规则映射。
17 | ```
18 | # 学历
19 | {
20 | '其它': 0,
21 | '中专': 1,
22 | '高中(职高、技校)': 2,
23 | '大专': 3,
24 | '大学本科': 4,
25 | '硕士研究生': 5,
26 | '博士后': 6
27 | }
28 |
29 | # 工作年限
30 | {
31 | '应届毕业生': 0,
32 | '0至1年': 1,
33 | '1至2年': 2,
34 | '3至5年': 3,
35 | '5年以上': 4,
36 | }
37 | ```
38 | 专业要求字段部分存在多余的字符,比如“【园艺学】”和“园艺学”应该属于同一个东西,为了避免二意,删除字段中的“【”和“】”。
39 |
40 |
41 | ### 简单的基础特征
42 | * 工作经历的数量
43 | * count 特征:求职者投递次数,岗位类别次数
44 | * nunique 特征:招聘岗位的投递记录中有多少个不同的岗位类别,招聘岗位的投递记录中有多少个不同的求职者专业
45 | * 数值统计:一份招聘的投递记录中求职者工作年限的mean,max,min,std
46 | * 招聘岗位的工作地点是否和求职者求职意向的工作地点一致
47 | * 招聘岗位的最低学历要求是否大于求职者的最高学历
48 |
49 | ### 简单的文本特征抽取(我瞎整的)
50 | 到 https://huggingface.co/nghuyong/ernie-1.0 下载预训练模型 ernie,当然你也可以下载其他预训练模型。将config.json,pytorch_model.bin,vocab.txt放到同一文件夹下(如 baseline 的data/pretrain_models/ernie)。 将文本特征视为句子输入到 ernie 模型,ernie 模型的输出是一个 embedding 序列,baseline 中直接取 cls 对应的 embedding 作为整个句子的表示。baseline 的 sent_to_vec 函数还留有其他的句子表示方式,也可以自己试试。
51 |
52 | baseline 中只针对招聘职位,应聘者专业和岗位要求专业这三个文本字段进行了特征抽取,然后利用余弦相似度计算应聘者专业和岗位要求专业的相似度。
53 |
54 | 原始模型输出的 embedding 维度是非常大的,比如 ernie 的输出维度是768维,要是直接把这样的 embedding 作为特征丢给模型,没加几类文本特征内存就爆炸了。此外在完全无监督的情况下,直接使用预训练 Bert 抽取的句向量,在计算余弦相似度的时候表现可能不是很好。所以使用 BERT-whitening 进行分布校正,具体原理见:https://kexue.fm/archives/8069。BERT-whitening 也可以进行降维操作,减轻内存负担。
55 |
--------------------------------------------------------------------------------
/competitions/bien_bmes/README.md:
--------------------------------------------------------------------------------
1 | # 智源-水利知识图谱构建挑战赛 Baseline 分享
2 |
3 | ## 赛道链接
4 |
5 | http://competition.baai.ac.cn/c/37/format/introduce
6 |
7 | ## 赛道介绍
8 |
9 | 近年来知识图谱技术作为一种用于描述客观世界中概念,实例及其关系的新方法,得到了人们的广泛关注,利用知识图谱可以有效拓展搜索结果的广度。目前水利行业采用的基于关键字的搜索技术难以利用对象间关系进行信息检索。如果可以构建完善的水利领域知识图谱,可以有效利用水利对象之间的关系,充分发挥水利信息资源的价值。
10 |
11 | 由于水利领域中存在大量河流、湖泊和水库等实体,造成众多实体歧义问题(如:红旗水库,黑河,清河等),导致从非结构化文本数据中精准的提取水利实体并消歧同名实体还存在较大困难。本次比赛的目的就是提出一个提取水利相关文本中不同实体类型的方法。
12 |
13 | 本次比赛的数据由中国工程科技知识中心水利专业知识服务系统(中国水利水电科学研究院)和智谱 AI 共同提供。
14 |
15 | PS: 「就是一个 NER 比赛」
16 |
17 | ## 文本清洗和预处理
18 |
19 | 训练集数据中有几十个 label 标注不完整,如:
20 |
21 | ```
22 | "长江#河流*[@]水系-TER"
23 | ```
24 |
25 | 应该分为
26 |
27 | ```
28 | "长江-RIV"
29 | "水系-TER"
30 | ```
31 |
32 | 可以通过以下命令找出
33 |
34 | ```
35 | /bin/grep -Po '^\s+".*\[@\].*-[A-Z]{3}"' bmes_train.json
36 | ```
37 |
38 | 另外还有少部分不符合上述格式的标注错误,我都找出来了,可参考 notebook 的代码。
39 |
40 | **EDIT: 以上的预处理部分因为官方平台更新了数据,不再需要处理了**
41 |
42 | 标注采用了 BIO 标注法,也请参考代码。
43 |
44 | TEXT: 东西两源汇合后,进入平原区,北流经过石埠嘴、船涨埠,至白洋淀后进入瓦埠湖。
45 | LABEL: [白洋淀-LAK, 瓦埠湖-LAK]
46 |
47 | 处理为:(O 表示 other, B_XXX 表示一个实体词语的开始,I_XXX 表示一个实体词语的中间或结束)
48 |
49 | ```
50 | ['东', 'O']
51 | ['西', 'O']
52 | ['两', 'O']
53 | ['源', 'O']
54 | ['汇', 'O']
55 | ['合', 'O']
56 | ['后', 'O']
57 | [',', 'O']
58 | ['进', 'O']
59 | ['入', 'O']
60 | ['平', 'O']
61 | ['原', 'O']
62 | ['区', 'O']
63 | [',', 'O']
64 | ['北', 'O']
65 | ['流, 'O']
66 | ['经', 'O']
67 | ['过', 'O']
68 | ['石', 'O']
69 | ['埠, 'O']
70 | ['嘴', 'O']
71 | ['、', 'O']
72 | ['船', 'O']
73 | ['涨', 'O']
74 | ['埠', 'O']
75 | [',', 'O']
76 | ['至', 'O']
77 | ['白', 'B_LAK']
78 | ['洋', 'I_LAK']
79 | ['淀', 'I_LAK']
80 | ['后', 'O']
81 | ['进', 'O']
82 | ['入', 'O']
83 | ['瓦', 'B_LAK']
84 | ['埠', 'I_LAK']
85 | ['湖', 'I_LAK']
86 | ['。', 'O']
87 | ```
88 |
89 | ## 模型训练
90 |
91 | 采用 chinese-roberta-wwm-ext,8 epochs,训练集验证集九比一划分,25 分钟内可以训练完
92 |
93 | 单折线下 f1: 0.8071,线上 0.7990
94 |
95 | **EDIT: 使用官方更新后的数据, 根据群里小伙伴的反馈, 榜单分数可以提升到 0.82**
96 |
97 | ## TODO
98 |
99 | 1. 其他预训练模型
100 | 2. 多折训练融合
101 | 3. 结合 CRF
102 |
--------------------------------------------------------------------------------
/competitions/dc_molecule/README.md:
--------------------------------------------------------------------------------
1 | # dc_molecule_baseline
2 | AI战疫·小分子成药属性预测大赛 baseline
3 |
4 | ## 主办方:DC竞赛
5 | ## 赛道:2020-AI战疫·小分子成药属性预测大赛
6 |
7 | **赛道链接**:https://www.dcjingsai.com/common/cmpt/AI%E6%88%98%E7%96%AB%C2%B7%E5%B0%8F%E5%88%86%E5%AD%90%E6%88%90%E8%8D%AF%E5%B1%9E%E6%80%A7%E9%A2%84%E6%B5%8B%E5%A4%A7%E8%B5%9B_%E7%AB%9E%E8%B5%9B%E4%BF%A1%E6%81%AF.html
8 | **赛程时间**:*2020.03.05-2020.04.06*
9 | ## 1.数据说明
10 | **train.csv**
11 |
12 | | 字段名 | 类型 | 说明 |
13 | | :----------------: | :----: | :-------------------------------------------------: |
14 | | ID | 整型 | 样本编号 |
15 | | Molecule_max_phase | 整型 | 分子的最长位相 |
16 | | Molecular weight | 浮点型 | 分子量 |
17 | | RO5_violations | 整型 | 违反新药5规则(RO5)的数量 |
18 | | AlogP | 浮点型 | 由ACD软件计算化合物的脂分配系数(该数据来自ChemBL) |
19 | | Features | 向量 | 小分子的矢量化表示 |
20 | | Label | 枚举/浮点型 | 单位时间内单位机体能将多少容积体液中的药物清除 |
21 |
22 | **test.csv**
23 | | 字段名 | 类型 | 说明 |
24 | | :----------------: | :---------: | :-------------------------------------------------: |
25 | | ID | 整型 | 样本编号 |
26 | | Molecule_max_phase | 整型 | 分子的最长位相 |
27 | | Molecular weight | 浮点型 | 分子量 |
28 | | RO5_violations | 整型 | 违反新药5规则(RO5)的数量 |
29 | | AlogP | 浮点型 | 由ACD软件计算化合物的脂分配系数(该数据来自ChemBL) |
30 | | Features | 向量 | 小分子的矢量化表示 |
31 |
32 |
33 |
34 | ## 2.配置环境与依赖库
35 | - python3
36 | - scikit-learn
37 | - numpy
38 | - lightgbm
39 | ## 3.运行代码步骤说明
40 | 将数据集下载保存到 raw_data 文件夹, 新建 sub 文件夹保存提交文件。
41 |
42 | ## 4.特征工程
43 | - Molecular weight 和 AlogP log1p
44 | - 小分子的矢量化表示拆分成具体单一特征(重要)
45 | - 小分子的矢量化统计特征
46 |
47 | ## 5.模型分数
48 | 线上成绩2.096488
49 |
--------------------------------------------------------------------------------
/competitions/wsdm_iqiyi_torch/feature.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import numpy as np
4 | import pandas as pd
5 |
6 | np.random.seed(2021)
7 |
8 | launch = pd.read_csv('raw_data/wsdm_train_data/app_launch_logs.csv')
9 | test = pd.read_csv('raw_data/test-a.csv')
10 |
11 | launch.date.min(), launch.date.max()
12 |
13 | launch_grp = launch.groupby('user_id').agg(launch_date=('date', list),
14 | launch_type=('launch_type',
15 | list)).reset_index()
16 |
17 |
18 | def choose_end_date(launch_date):
19 | n1, n2 = min(launch_date), max(launch_date)
20 | if n1 < n2 - 7:
21 | end_date = np.random.randint(n1, n2 - 7)
22 | else:
23 | end_date = np.random.randint(100, 222 - 7)
24 | return end_date
25 |
26 |
27 | def get_label(row):
28 | launch_list = row.launch_date
29 | end = row.end_date
30 | label = sum([1 for x in set(launch_list) if end < x < end + 8])
31 | return label
32 |
33 |
34 | launch_grp['end_date'] = launch_grp.launch_date.apply(choose_end_date)
35 | launch_grp['label'] = launch_grp.apply(get_label, axis=1)
36 |
37 | train = launch_grp[['user_id', 'end_date', 'label']]
38 | train
39 |
40 | test['label'] = -1
41 | test
42 |
43 | data = pd.concat([train, test], ignore_index=True)
44 | data
45 |
46 | data = data.merge(launch_grp[['user_id', 'launch_type', 'launch_date']],
47 | how='left',
48 | on='user_id')
49 | data
50 |
51 |
52 | # get latest 32 days([end_date-31, end_date]) launch type sequence
53 | # 0 for not launch, 1 for launch_type=0, and 2 for launch_type=1
54 | def gen_launch_seq(row):
55 | seq_sort = sorted(zip(row.launch_type, row.launch_date),
56 | key=lambda x: x[1])
57 | seq_map = {d: t + 1 for t, d in seq_sort}
58 | end = row.end_date
59 | seq = [seq_map.get(x, 0) for x in range(end - 31, end + 1)]
60 | return seq
61 |
62 |
63 | data['launch_seq'] = data.apply(gen_launch_seq, axis=1)
64 | data
65 |
66 | data.head()
67 |
68 | data.drop(columns=['launch_date', 'launch_type'], inplace=True)
69 |
70 | os.makedirs('data', exist_ok=True)
71 | data.to_pickle('data/all_data.pkl')
72 |
--------------------------------------------------------------------------------
/competitions/property_chat_pair/run_folds.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | import warnings
4 | warnings.simplefilter('ignore')
5 |
6 | import numpy as np
7 | import pandas as pd
8 |
9 | from simpletransformers.classification import ClassificationModel
10 |
11 | fold = int(sys.argv[1])
12 |
13 | train_query = pd.read_csv('raw_data/train/train.query.tsv', sep='\t', header=None)
14 | train_query.columns = ['qid', 'text_a']
15 | train_reply = pd.read_csv('raw_data/train/train.reply.tsv', sep='\t', header=None)
16 | train_reply.columns = ['qid', 'rid', 'text_b', 'labels']
17 | train = pd.merge(train_reply, train_query, on='qid', how='left')
18 |
19 | df = train[['text_a', 'text_b', 'labels']]
20 | df = df.sample(frac=1, random_state=1029)
21 | train_df = df[df.index % 10 != fold]
22 | eval_df = df[df.index % 10 == fold]
23 | print(train_df.shape, eval_df.shape)
24 |
25 | train_args = {
26 | 'reprocess_input_data': True,
27 | 'overwrite_output_dir': True,
28 | 'num_train_epochs': 3,
29 | 'fp16': False
30 | }
31 | model = ClassificationModel('bert',
32 | 'hfl/chinese-roberta-wwm-ext',
33 | num_labels=2,
34 | use_cuda=True,
35 | cuda_device=0,
36 | args=train_args)
37 | model.train_model(train_df, eval_df=eval_df)
38 |
39 | test_query = pd.read_csv('raw_data/test/test.query.tsv', sep='\t', header=None, encoding="gbk")
40 | test_query.columns = ['qid', 'text_a']
41 | test_reply = pd.read_csv('raw_data/test/test.reply.tsv', sep='\t', header=None, encoding="gbk")
42 | test_reply.columns = ['qid', 'rid', 'text_b']
43 | test = pd.merge(test_reply, test_query, on='qid', how='left')
44 | df_test = test[['text_a', 'text_b']]
45 |
46 | submit_sample = pd.read_csv('raw_data/sample_submission.tsv', sep='\t', header=None)
47 | submit_sample.columns =['qid', 'rid', 'label']
48 |
49 | data = []
50 | for i, row in df_test.iterrows():
51 | data.append([row['text_a'], row['text_b']])
52 |
53 | predictions, raw_outputs = model.predict(data)
54 | submit_sample['label'] = predictions
55 |
56 | np.save(f'prob_{fold}', raw_outputs)
57 | submit_sample.to_csv(f'sub_{fold}.tsv', sep='\t', index=False, header=False)
58 |
59 |
60 |
--------------------------------------------------------------------------------
/competitions/aiyanxishe_text_similarity/README.md:
--------------------------------------------------------------------------------
1 | AI 研习社 英文文本语义相似度 比赛 Baseline
2 |
3 | ## 比赛链接
4 |
5 | https://god.yanxishe.com/53
6 |
7 | ## 比赛介绍
8 |
9 | 语义相似度是 NLP 的核心问题之一,对问答、翻译、检索等任务具有非常重要的意义。
10 |
11 | 该比赛给出两段短文本,要求判断文本的相似度评分(0-5)。
12 |
13 | ```
14 | text_a: 'It depends on what you want to do next, and where you want to do it.'
15 | text_b: 'It's up to you what you want to do next.'
16 | score: 4.00
17 | ```
18 |
19 | 比赛的数据量不大,训练集 2300+ 对文本,测试集 500+ 对文本。
20 |
21 | ## 一个快速且有效的 baseline
22 |
23 | 这里我使用 simpletransformers 来构建 baseline。simpletransformers 是 transformers 的更高层封装,对于 NLP 的各种任务均提供了快速的实现方式 (通常只需要 build, train, predict 三行代码)
24 |
25 | 具体参考的例子如下:
26 |
27 | https://github.com/ThilinaRajapakse/simpletransformers#regression
28 |
29 | 代码如下:
30 |
31 | ```
32 | import warnings
33 | warnings.simplefilter('ignore')
34 |
35 | import numpy as np
36 | import pandas as pd
37 |
38 | from tqdm import tqdm
39 |
40 | from simpletransformers.classification import ClassificationModel
41 |
42 | # 读取数据
43 | train = pd.read_csv('raw_data/train.csv')
44 | test = pd.read_csv('raw_data/test.csv')
45 | train.columns = ['text_a', 'text_b', 'labels']
46 |
47 | # 配置训练参数
48 | train_args = {
49 | 'reprocess_input_data': True,
50 | 'overwrite_output_dir': True,
51 | 'num_train_epochs': 3,
52 | 'regression': True,
53 | }
54 |
55 | # build model
56 | model = ClassificationModel('roberta',
57 | 'roberta-base',
58 | num_labels=1,
59 | use_cuda=True,
60 | cuda_device=0,
61 | args=train_args)
62 |
63 | # train model
64 | model.train_model(train, eval_df=test)
65 |
66 | # predict
67 | preds = list()
68 | for i, row in tqdm(test.iterrows()):
69 | text_a = row['text_a']
70 | text_b = row['text_b']
71 | pred, _ = model.predict([[text_a, text_b]])
72 | preds.append(pred)
73 |
74 |
75 | sub = pd.DataFrame()
76 | sub['ID'] = test.index
77 | sub['score'] = [i.tolist() for i in preds]
78 |
79 | # 后处理, 发现有超过 5 的情况
80 | sub.loc[sub.score < 0.08, 'score'] = 0
81 | sub.loc[sub.score > 5, 'score'] = 5
82 |
83 | # submit
84 | sub.to_csv('roberta_baseline.csv', index=False, header=False)
85 | ```
86 |
87 | 线上分数:87.8744,当前能排在前十。
88 |
--------------------------------------------------------------------------------
/competitions/tianchi_news_classification/RAEDME.md:
--------------------------------------------------------------------------------
1 | 线上单折 0.944,五折估计可以 0.95,完整代码见文末阅读全文。
2 |
3 | # 比赛链接
4 |
5 | https://tianchi.aliyun.com/competition/entrance/531810/information
6 |
7 | # 比赛背景
8 |
9 | 赛题以新闻数据为赛题数据,数据集报名后可见并可下载。赛题数据为新闻文本,并按照字符级别进行匿名处理。整合划分出 14 个候选分类类别:财经、彩票、房产、股票、家居、教育、科技、社会、时尚、时政、体育、星座、游戏、娱乐的文本数据。
10 |
11 | 赛题数据由以下几个部分构成:训练集 20w 条样本,测试集 A 包括 5w 条样本,测试集 B 包括 5w 条样本。为了预防选手人工标注测试集的情况,我们将比赛数据的文本按照字符级别进行了匿名处理。
12 |
13 | 赛题数据应该出自中文文本分类数据集 THUCNews,THUCNews 是根据新浪新闻 RSS 订阅频道 2005~2011 年间的历史数据筛选过滤生成,包含 74 万篇新闻文档(2.19 GB),均为 UTF-8 纯文本格式。我们在原始新浪新闻分类体系的基础上,重新整合划分出 14 个候选分类类别:财经、彩票、房产、股票、家居、教育、科技、社会、时尚、时政、体育、星座、游戏、娱乐。
14 |
15 | # 思路
16 |
17 | 我也是 NLP 新手,下面只是我浅浅的理解。比赛虽然是 NLP 入门赛,但由于主办方已经将内容高度匿名化,所以 NLP 常见的预处理手段很难派上用场,比如文本分类用到的去除标点符号无法精准实现,只能通过分析字符出现次数来大致判断谁是标点,所以该比赛主要还是比谁的模型和参数调的好。
18 |
19 | 最近刚结束的 2020 广告腾讯大赛的内容和这个入门赛高度相似,都是匿名 id 序列分类,只不过腾讯赛的 id 序列较多。我们队伍“最后一次打比赛”获得了该比赛的 12 名,下面我介绍的思路大多可以从我们的开源代码找到:https://github.com/LogicJake/Tencent_Ads_Algo_2020_TOP12
20 |
21 | ## 词向量
22 |
23 | 该类型比赛常见的建模方法就是将稀疏 id 转成词向量,然后训练序列分类模型。词向量的使用可以分为两种:用 Word2Vec 预训练得到词向量初始化序列分类模型中的 Embedding 层,然后在模型训练过程中冻结(或不冻结) Embedding 的更新;Embedding 层随机初始化,随着模型的训练而更新。
24 |
25 | ## 分类模型的选择
26 |
27 | 分类模型的选择就比较多了,LSTM,transformer,CNN...LSTM 和 CNN 在比赛论坛都有搭建的基本架构,所以本次开源基于 transformer 的 encoder。有了基本架构其他就是调参的事情了。
28 |
29 | ## 调参思路 or trick
30 |
31 | 介绍几个从腾讯赛学到的调参思路和 trick。
32 |
33 | 通用:
34 |
35 | - Embedding 维度
36 | - 序列截取的最大长度的选择
37 | - 预训练模型 Word2Vec 的参数选择:window,min_count...
38 | - 序列处理时是否需要筛掉低频 id
39 | - 优化器 optimizer 的选择,可以尝试一些最新的,比如 ranger
40 | - 激活函数的选择:mish...
41 | - 学习率衰减,比如:ReduceLROnPlateau
42 | - ...
43 |
44 | LSTM:
45 |
46 | - units 的大小
47 | - 堆叠多少层 LSTM layer
48 | - ...
49 |
50 | CNN:
51 |
52 | - filter_size 除了 3,4,5 还可以更大吗
53 | - num_filters 大小
54 | - ...
55 |
56 | Transformer:
57 |
58 | - 学习率小一点
59 | - head_num 大小
60 | - 堆叠层数
61 | - ...
62 |
63 | ## 模型融合
64 |
65 | 腾讯赛中模型融合的收益还是挺大的,这个比赛的融合收益暂时没有尝试。融合讲求差异性,可以从下面几个角度尽可能多构建几个差异大的模型进行融合:
66 |
67 | - 框架差异:Pytorch,Tensorflow,Keras 甚至 Paddle
68 | - 分类模型差异:LSTM,CNN,Transformer
69 | - 激活函数差异
70 | - 甚至其他参数的差异
71 |
72 | ## 参考资料
73 |
74 | - [基于文本卷积网络的 Baseline
75 | ](https://tianchi.aliyun.com/notebook-ai/detail?postId=118161)
76 | - [中文文本分类数据集 THUCNews
77 | ](http://thuctc.thunlp.org/#%E4%B8%AD%E6%96%87%E6%96%87%E6%9C%AC%E5%88%86%E7%B1%BB%E6%95%B0%E6%8D%AE%E9%9B%86THUCNews)
78 |
--------------------------------------------------------------------------------
/competitions/wsdm_iqiyi_torch/model_tools.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | from torch.utils.data import Dataset
4 | from tqdm import tqdm
5 |
6 |
7 | def cal_score(pred, label):
8 | pred = np.array(pred)
9 | label = np.array(label)
10 |
11 | diff = (pred - label) / 7
12 | diff = np.abs(diff)
13 |
14 | score = 100 * (1 - np.mean(diff))
15 | return score
16 |
17 |
18 | class AQYDataset(Dataset):
19 | def __init__(self, df, device):
20 | self.user_id_list = df['user_id'].values
21 |
22 | self.launch_seq_list = df['launch_seq'].values
23 |
24 | self.label_list = df['label'].values
25 |
26 | def __getitem__(self, index):
27 | user_id = self.user_id_list[index]
28 |
29 | launch_seq = np.array(self.launch_seq_list[index])
30 |
31 | label = self.label_list[index]
32 |
33 | return user_id, launch_seq, label
34 |
35 | def __len__(self):
36 | return len(self.user_id_list)
37 |
38 |
39 | def fit(model, train_loader, optimizer, criterion, device):
40 | model.train()
41 |
42 | pred_list = []
43 | label_list = []
44 |
45 | for user_id, launch_seq, label in tqdm(train_loader):
46 | user_id = user_id.long().to(device)
47 | launch_seq = launch_seq.long().to(device)
48 | label = torch.tensor(label).float().to(device)
49 |
50 | pred = model(user_id, launch_seq)
51 |
52 | loss = criterion(pred.squeeze(), label)
53 | loss.backward()
54 | optimizer.step()
55 | model.zero_grad()
56 |
57 | pred_list.extend(pred.squeeze().cpu().detach().numpy())
58 | label_list.extend(label.squeeze().cpu().detach().numpy())
59 |
60 | score = cal_score(pred_list, label_list)
61 |
62 | return score
63 |
64 |
65 | def validate(model, val_loader, device):
66 | model.eval()
67 |
68 | pred_list = []
69 | label_list = []
70 |
71 | for user_id, launch_seq, label in tqdm(val_loader):
72 | user_id = user_id.long().to(device)
73 | launch_seq = launch_seq.long().to(device)
74 | label = torch.tensor(label).float().to(device)
75 |
76 | pred = model(user_id, launch_seq)
77 |
78 | pred_list.extend(pred.squeeze().cpu().detach().numpy())
79 | label_list.extend(label.squeeze().cpu().detach().numpy())
80 |
81 | score = cal_score(pred_list, label_list)
82 |
83 | return score
84 |
85 |
86 | def predict(model, test_loader, device):
87 | model.eval()
88 | test_pred = []
89 | for user_id, launch_seq, _ in tqdm(test_loader):
90 | user_id = user_id.long().to(device)
91 | launch_seq = launch_seq.long().to(device)
92 |
93 | pred = model(user_id, launch_seq).squeeze()
94 | test_pred.extend(pred.cpu().detach().numpy())
95 |
96 | return test_pred
97 |
--------------------------------------------------------------------------------
/competitions/iflytek_agriculture_ner/README.md:
--------------------------------------------------------------------------------
1 | # 赛道
2 |
3 | 讯飞开放平台 农业问答数据处理挑战赛
4 |
5 | # 赛道链接
6 |
7 | http://challenge.xfyun.cn/topic/info?type=agriculture
8 |
9 | # 赛事概要
10 |
11 | 农业生产中,由于过程的主体是生物,存在多样性和变异性、个体与群体差异性,农业大数据中存在许多的专业名词,如农作物、病虫害、土壤修复、施肥方案、生理胁迫、种苗、疑难杂症、缺素、天气条件、地理信息等,尤其是非结构化的数据快速增长,如何挖掘数据价值、提高数据分析应用能力、减少数据冗余和数据垃圾,是农业大数据面临的重要问题。
12 |
13 | 数据处理的首要任务是标记命名实体,本次大赛提供了互联网平台上的专家与农民的问答数据作为训练样本,参赛选手需基于提供的样本构建模型,对问答数据进行标记切词。
14 |
15 | # Baseline
16 |
17 | 因为本人也是第一次接触 NER 赛题,可能有很多不正确的地方,欢迎各路大佬指正。
18 |
19 | ### 标记方式
20 |
21 | 据我的了解,有好几种比较常见的标记方式,如 BIO, BIEO, BIEOS 等。
22 |
23 | 本 baseline 采用的是 BIEO,
24 |
25 | - B 表示词语的开始(Begin)
26 | - I 表示在词语的中间(Intermediate)
27 | - E 表示词语的结尾 (End)
28 | - O 表示其他情况 (Other)
29 |
30 | 在本次赛题中,实体有三种类别: 农作物(crop),病害(disease) 和 药物(medicine)
31 |
32 | 所以我们标记的情况有 10 种情况
33 |
34 | ```
35 | labels = [
36 | 'B_crop',
37 | 'I_crop',
38 | 'E_crop',
39 | 'B_disease',
40 | 'I_disease',
41 | 'E_disease',
42 | 'B_medicine',
43 | 'I_medicine',
44 | 'E_medicine',
45 | 'O'
46 | ]
47 | ```
48 |
49 | 举个例子来说如:炭疽病危害使用肟菌戊唑醇或苯甲丙环唑防治 这句话,标注后是:
50 |
51 | ```
52 | 炭 B_disease
53 | 疽 I_disease
54 | 病 E_disease
55 | 危 O
56 | 害 O
57 | 使 O
58 | 用 O
59 | 肟 B_medicine
60 | 菌 I_medicine
61 | 戊 I_medicine
62 | 唑 I_medicine
63 | 醇 E_medicine
64 | 或 O
65 | 苯 B_medicine
66 | 甲 I_medicine
67 | 丙 I_medicine
68 | 环 I_medicine
69 | 唑 E_medicine
70 | 防 O
71 | 治 O
72 | ```
73 |
74 | 但本赛题的训练集并不是以这种方式给出的,而是以词性分词的方式
75 |
76 | ```
77 | 炭疽病/n_disease 危害/v 使用/v 肟菌戊唑醇/n_medicine 或/c 苯甲丙环唑/n_medicine 防治/vn
78 | ```
79 |
80 | 所以这里要花费比较多的时间来处理成 BIEO 模式,可参见代码
81 |
82 | ### 模型
83 |
84 | 因为是初学者,所以我偷懒直接使用了 simpletransformers 这个包
85 |
86 | simpletransformers 这个包高度封装了 huggingface 的 transformers
87 |
88 | 十分贴心地提供了各种 NLP 任务如文本分类、问答系统、NER 等下游任务的封装
89 |
90 | 简直就是调包侠的福音,只需要这么短:
91 |
92 | ```
93 | model_args = NERArgs()
94 | model_args.train_batch_size = 8
95 | model_args.num_train_epochs = 5
96 | model_args.fp16 = False
97 | model_args.evaluate_during_training = True
98 |
99 | model = NERModel("bert",
100 | "hfl/chinese-bert-wwm-ext",
101 | labels=labels,
102 | args=model_args)
103 |
104 | model.train_model(train_data, eval_data=eval_data)
105 | result, model_outputs, preds_list = model.eval_model(eval_data)
106 | ```
107 |
108 | PS: 这里预训练模型使用的是哈工大讯飞联合实验室提供的 chinese-bert-wwm
109 |
110 | https://github.com/ymcui/Chinese-BERT-wwm
111 |
112 | ### 合并答案
113 |
114 | 因为 test 只提供了原始文本,这里也是需要将原始文本分割成一个个字送进去模型,
115 |
116 | 最后还需要将结果输出成要求的格式,也比较耗时间,可以参考给出的代码
117 |
118 |
119 | # 线上分数
120 |
121 | 单折 0.94,距离前排有点远,但是这么少的代码能到这个分数也挺让我惊讶的
122 |
123 |
124 | # TODO
125 |
126 | - 优化预处理方式,baseline 代码里写得不是太合理;
127 | - 改用 BIO 减少模型输出的类别
128 | - 多个预训练模型融合,目前感觉并不是太好,三个模型融合后只有 0.946
129 | - 抛弃 simpletransformers,尝试 BERT + BiLSTM + CRF 等
130 |
--------------------------------------------------------------------------------
/competitions/aiyanxishe_weibo_baseline/README.md:
--------------------------------------------------------------------------------
1 | ## 比赛链接
2 |
3 | https://god.yanxishe.com/44
4 |
5 | ## 比赛介绍
6 |
7 | 微博立场检测是判断微博作者对某个话题是持何种立场,立场有三种:FAVOR 支持,AGAINST 反对,NONE 两者都不是。
8 |
9 | ## EDA
10 |
11 | 数据集共有 3000 条数据,2400 条训练数据,600 条测试数据。
12 |
13 | 分为 target, text, stance 三个字段,其中 target 是对应于某个特定话题,text 是微博的正文内容,stance 是立场标签。
14 |
15 | 数据集中的话题共有五类,分布比较均匀。
16 |
17 | - 深圳禁摩限电
18 | - 开放二胎
19 | - 俄罗斯在叙利亚的反恐行动
20 | - IphoneSE
21 | - 春节放鞭炮
22 |
23 | 立场标签分布上,FAVOR 和 AGAINST 基本持平,NONE 比较少
24 |
25 | ## Baseline
26 |
27 | 当前的 NLP 比赛,肯定少不了 BERT 的身影。所以本 Baseline 也是采用了 BERT 预训练模型 chinese-roberta-wwm-ext,简单的 五折交叉验证。
28 |
29 | 搭建模型主要使用了 keras_bert
30 |
31 | ```
32 | def build_bert():
33 | bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None)
34 |
35 | for l in bert_model.layers:
36 | l.trainable = True
37 |
38 | x1_in = Input(shape=(None,))
39 | x2_in = Input(shape=(None,))
40 |
41 | x = bert_model([x1_in, x2_in])
42 | x = Lambda(lambda x: x[:, 0])(x)
43 | p = Dense(3, activation='softmax')(x)
44 |
45 | model = Model([x1_in, x2_in], p)
46 | model.compile(loss='categorical_crossentropy',
47 | optimizer=Adam(1e-5),
48 | metrics=['accuracy'])
49 | return model
50 | ```
51 |
52 | 交叉验证:
53 |
54 | ```
55 | def run_cv(nfold, data, data_label, data_test):
56 |
57 | kf = KFold(n_splits=nfold, shuffle=True, random_state=1029).split(data)
58 | train_model_pred = np.zeros((len(data), 3))
59 | test_model_pred = np.zeros((len(data_test), 3))
60 |
61 | for i, (train_fold, test_fold) in enumerate(kf):
62 | X_train, X_valid, = data[train_fold, :], data[test_fold, :]
63 |
64 | model = build_bert()
65 | early_stopping = EarlyStopping(monitor='val_acc', patience=3)
66 | plateau = ReduceLROnPlateau(monitor="val_acc", verbose=1, mode='max', factor=0.5, patience=2)
67 | checkpoint = ModelCheckpoint('./' + str(i) + '.hdf5', monitor='val_acc',
68 | verbose=2, save_best_only=True, mode='max', save_weights_only=True)
69 |
70 | train_D = data_generator(X_train, shuffle=True)
71 | valid_D = data_generator(X_valid, shuffle=True)
72 | test_D = data_generator(data_test, shuffle=False)
73 |
74 | model.fit_generator(
75 | train_D.__iter__(),
76 | steps_per_epoch=len(train_D),
77 | epochs=5,
78 | validation_data=valid_D.__iter__(),
79 | validation_steps=len(valid_D),
80 | callbacks=[early_stopping, plateau, checkpoint],
81 | )
82 |
83 | train_model_pred[test_fold, :] = model.predict_generator(valid_D.__iter__(), steps=len(valid_D), verbose=1)
84 | test_model_pred += model.predict_generator(test_D.__iter__(), steps=len(test_D), verbose=1)
85 |
86 | del model; gc.collect()
87 | K.clear_session()
88 |
89 | return train_model_pred, test_model_pred
90 | ```
91 |
92 | ## 效果
93 |
94 | 提交后分数为 68.6667,能在当前排 top 3,前两名分别为 69.1667 和 68.8333。
95 |
--------------------------------------------------------------------------------
/competitions/haihua2021/README.md:
--------------------------------------------------------------------------------
1 | # 2021海华AI挑战赛·中文阅读理解·技术组 BaseLine 分享
2 |
3 | ## 赛道链接
4 |
5 | https://www.biendata.xyz/competition/haihua_2021/
6 |
7 | ## 赛道背景
8 |
9 | 机器阅读理解(Machine Reading Comprehension)是自然语言处理和人工智能领域的前沿课题,对于使机器拥有认知能力、提升机器智能水平具有重要价值,拥有广阔的应用前景。机器的阅读理解是让机器阅读文本,然后回答与阅读内容相关的问题,体现的是人工智能对文本信息获取、理解和挖掘的能力,在对话、搜索、问答、同声传译等领域,机器阅读理解可以产生的现实价值正在日益凸显,长远的目标则是能够为各行各业提供解决方案。
10 |
11 | ## 比赛任务
12 |
13 | 本次比赛技术组的数据来自中高考语文阅读理解题库。每条数据都包括一篇文章,至少一个问题和多个候选选项。参赛选手需要搭建模型,从候选选项中选出正确的一个。
14 |
15 | 以下是训练集中的两个例子:
16 |
17 | ```
18 | {
19 | "ID": 1,
20 | "Content": "奉和袭美抱疾杜门见寄次韵 陆龟蒙虽失春城醉上期,下帷裁遍未裁诗。因吟郢岸百亩蕙,欲采商崖三秀芝。栖野鹤笼宽使织,施山僧饭别教炊。但医沈约重瞳健,不怕江花不满枝。",
21 | "Questions": [
22 | {
23 | "Question": "下列对这首诗的理解和赏析,不正确的一项是",
24 | "Choices": [
25 | "A.作者写作此诗之时,皮日休正患病居家,闭门谢客,与外界不通音讯。",
26 | "B.由于友人患病,原有的约会被暂时搁置,作者游春的诗篇也未能写出。",
27 | "C.作者虽然身在书斋从事教学,但心中盼望能走进自然,领略美好春光。",
28 | "D.尾联使用了关于沈约的典故,可以由此推测皮日休所患的疾病是目疾。"
29 | ],
30 | "Answer": "A",
31 | "Q_id": "000101"
32 | }
33 | ]
34 | },
35 | {
36 | "ID": 2,
37 | "Content": "隆冬之际,西伯利亚的寒流(笼罩/席卷)欧亚大陆,狂风肆虐,草木凋凌,而那些春天的元素——温暖、雨水、绿叶、鲜花,都集结在位于热带的海南岛。海南岛就像是一艘花船,(系/停)在雷州半岛上,满载寒冬大陆的梦幻和想象。每年,从广州向漠河,春天昼夜兼程,都要进行一次生命版图的(扩展/扩充)。他像赤足奔跑的孩子,一路上用稚嫩的声音轻轻呼唤,于是万物苏醒,盛装应和,可谓“东风好作阳和使, 。”迢迢旅途中,气候的巨大差异,导致众多物种中只能有限地参与这一盛会。木棉花花朵硕大,是南国花中豪杰,“一声铜鼓催开,千树珊瑚齐列,”但她终究无法走出岭南。当春天行经长江、黄河流域时,出场的是桃花、杏花等新主角,“桃花嫣然出篱笑, ”,然而她们却无法追随春天深入雪国,陆续抱憾退出,随后登场的便是白杨、连翘等北国耐寒植物。",
38 | "Questions": [
39 | {
40 | "Question": "1. 文中“肆虐”“凋凌”“昼夜兼程”“版图”“稚嫩”“嫣然”“抱憾退出”的词语中,有错别字的一项是",
41 | "Choices": [
42 | "A. 肆虐 凋凌",
43 | "B. 集结 昼夜兼程",
44 | "C. 版图 稚嫩",
45 | "D. 嫣然 抱憾退出"
46 | ],
47 | "Answer": "A",
48 | "Q_id": "000201"
49 | },
50 | {
51 | "Question": "依次选用文中括号里的词语,最恰当的一项是",
52 | "Choices": [
53 | "A. 席卷 系 扩展",
54 | "B. 笼罩 停 扩展",
55 | "C. 席卷 停 扩充",
56 | "D. 笼罩 系 扩充"
57 | ],
58 | "Answer": "A",
59 | "Q_id": "000202"
60 | }
61 | ]
62 | }
63 | ```
64 |
65 | 可以得到几个初步的结论:
66 |
67 | 1. 题目本身对于普通人类来说难度也很大 (参考高考语文试题)
68 | 2. 文本长度有相当一部分题目较长( > 512)
69 | 3. 有部分文言文,可能对 BERT 等预训练模型有难度
70 |
71 | ## BaseLine
72 |
73 | huggingface/transformers 的 examples 里有个 multiple-choice 的例子:
74 |
75 | https://github.com/huggingface/transformers/tree/master/examples/multiple-choice
76 |
77 | 可以直接拿来用,不过需要按以下思路来处理:
78 |
79 | 1. 将本赛道的数据格式 hard-code 成 SWAG 数据集的格式;
80 | 2. run_swag.py 没有 do_predict 部分,需要修改添加;
81 | 3. 参数需要微调 (特别是显存吃紧的情况下,记得添加 gradient_accumulation_steps 参数)
82 |
83 | 本 Baseline 提供了修改好的 run_swag.py,将 haihua 数据集放在 raw_data 目录内,然后直接运行 run_swag.ipynb (请自行修改 output_dir) 即可完成训练并生成 submission 文件。
84 |
85 | **PS: 非 Linux 环境下可能这个拷贝命令会失败 cp run_swag.py ./transformers/examples/multiple-choice/run_swag.py,如果遇到提示 --test_file 参数不可用,请手动拷贝 run_swag.py 文件到 ./transformers/examples/multiple-choice/ 替换掉原来的 run_swag.py 即可**
86 |
87 | - 预训练模型:hfl/chinese-roberta-wwm-ext, lr=2e-5, 5 epochs
88 | - 运行环境:ubuntu-1804,2080Ti,5 epochs 训练集:验证集 = 13000:2425 划分,大概两个半小时完成。
89 | - 线上分数:41.9394435351882
90 |
91 | ## TODO
92 |
93 | ~~从 eval 结果来看过拟合有点严重~~ (更正: 之前代码在划分训练集和验证集中有 BUG,修正后,线下 43.4,线上 41.9)
94 |
--------------------------------------------------------------------------------
/competitions/aiyanxishe_fraud_job/README.md:
--------------------------------------------------------------------------------
1 | ## 比赛链接
2 |
3 | https://god.yanxishe.com/46
4 |
5 | ## 比赛背景
6 |
7 | 通过互联网寻找职位信息已经成为当前重要求职手段,通过 AI 技术自动判断真假求职信息是一种有趣的应用场景。
8 |
9 | ## 数据分析
10 |
11 | 数据字段有:
12 |
13 | - benefits: 文本类型,描述本岗位求职者可以收获到什么;
14 | - company_profile: 文本类型,招聘公司的简介;
15 | - department: 文本类型,招聘部门;
16 | - description:文本类型,岗位描述;
17 | - employment_type:文本类型,全职还是兼职;
18 | - fraudulent: 标签,0=真,1=假;
19 | - function: 文本类型,岗位名称;
20 | - `has_company_logo`: 数值类型,招聘公司是否有 logo;
21 | - industry: 文本类型,招聘公司的行业;
22 | - location: 文本类型,招聘公司所在地;
23 | - required_education: 文本类型,该岗位所需学历;
24 | - required_experience: 文本类型,该岗位所需经验;
25 | - requirements: 文本类型,该岗位所需技能;
26 | - salary_range: 文本类型,该岗位薪资范围;
27 | - telecommuting: 数值类型,招聘公司是否提供了联系电话;
28 | - title: 文本类型,岗位名称。
29 |
30 | 可以看到大部分特征都是文本类型,其中的几个可以转成类别特征外,长文本我们可以用 TFIDF 来对文本进行降维转换操作。
31 |
32 | ## baseline
33 |
34 | 该赛题非常坑的一点是测试集只有 200 条 (训练集有 18000 条左右),并且测试集和训练集的正负样本分布不同,正样本在训练集中不到 5%,而测试集据我提交的结果来看起码有 40% 以上,所以如果我们把 train 和 test 拼接起来做 TFIDF 或者热点编码,会导致结果非常糟糕,线上只有 50 分左右。
35 |
36 | 注意到这一点后,其他的都是常规套路。
37 |
38 | 单词个数特征
39 |
40 | ```
41 | def process(x):
42 | if x == 'nan':
43 | return 0
44 | else:
45 | return len(x.split())
46 |
47 |
48 | for col in ['benefits', 'title', 'company_profile', 'description', 'requirements']:
49 | train[f'{col}_wordsLen'] = train[col].astype('str').apply(lambda x: process(x))
50 | test[f'{col}_wordsLen'] = test[col].astype('str').apply(lambda x: process(x))
51 | ```
52 |
53 | Label Encoding
54 |
55 | ```
56 | df = pd.concat([train, test])
57 | del train, test
58 |
59 | for f in tqdm(['department', 'employment_type', 'function', 'industry',
60 | 'location', 'required_education', 'required_experience', 'title']):
61 | lbl = LabelEncoder()
62 | df[f] = lbl.fit_transform(df[f].astype(str))
63 |
64 | train = df[df['fraudulent'].notnull()].copy()
65 | test = df[df['fraudulent'].isnull()].copy()
66 |
67 | del df
68 | gc.collect()
69 | ```
70 |
71 | TFIDF,这里可以尝试不同的 max_features 效果
72 |
73 | ```
74 | def get_tfidf(train, test, colname, max_features):
75 |
76 | text = list(train[colname].fillna('nan').values)
77 | tf = TfidfVectorizer(min_df=0,
78 | ngram_range=(1,2),
79 | stop_words='english',
80 | max_features=max_features)
81 | tf.fit(text)
82 | X = tf.transform(text)
83 | X_test = tf.transform(list(test[colname].fillna('nan').values))
84 |
85 | df_tfidf = pd.DataFrame(X.todense())
86 | df_tfidf_test = pd.DataFrame(X_test.todense())
87 | df_tfidf.columns = [f'{colname}_tfidf{i}' for i in range(max_features)]
88 | df_tfidf_test.columns = [f'{colname}_tfidf{i}' for i in range(max_features)]
89 | for col in df_tfidf.columns:
90 | train[col] = df_tfidf[col]
91 | test[col] = df_tfidf_test[col]
92 |
93 | return train, test
94 |
95 |
96 | train, test = get_tfidf(train, test, 'benefits', 12)
97 | train, test = get_tfidf(train, test, 'company_profile', 24)
98 | train, test = get_tfidf(train, test, 'description', 48)
99 | train, test = get_tfidf(train, test, 'requirements', 20)
100 | ```
101 |
102 | 采用 LGB 五折训练,线下 auc: 0.9766968325791855,线上 86 分,目前能排在前五名。
103 |
--------------------------------------------------------------------------------
/competitions/2021ccf_ueba/baseline.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | from tqdm import tqdm
4 | from sklearn.model_selection import KFold
5 | from sklearn.metrics import mean_squared_error
6 | from lightgbm.sklearn import LGBMRegressor
7 | import matplotlib.pyplot as plt
8 | import seaborn as sns
9 | import gc
10 | import time
11 |
12 |
13 | train = pd.read_csv('data/train_data.csv', encoding='gbk')
14 | test = pd.read_csv('data/A_test_data.csv', encoding='gbk')
15 |
16 | df = pd.concat([train, test], axis=0, ignore_index=True)
17 |
18 | df['timestamp'] = df['time'].apply(lambda x: time.mktime(time.strptime(x, '%Y-%m-%d %H:%M:%S')))
19 | df['time'] = pd.to_datetime(df['time'])
20 | df = df.sort_values('timestamp').reset_index(drop=True)
21 |
22 | df['day'] = df['time'].dt.day
23 | df['dayofweek'] = df['time'].dt.dayofweek
24 | df['hour'] = df['time'].dt.hour
25 |
26 | df['IP_PORT'] = df['IP'] + ':' + df['port'].astype('str')
27 | for f in tqdm(['account', 'group', 'IP', 'url', 'switchIP', 'IP_PORT']):
28 | df[f] = df[f].map(dict(zip(df[f].unique(), range(df[f].nunique()))))
29 | df[f + '_count'] = df[f].map(df[f].value_counts())
30 |
31 | for f1 in tqdm(['account', 'group']):
32 | for f2 in ['IP', 'url', 'IP_PORT']:
33 | df['{}_{}_count'.format(f1, f2)] = df.groupby([f1, f2])['id'].transform('count')
34 | df['{}_in_{}_count_prop'.format(f2, f1)] = df['{}_{}_count'.format(f1, f2)] / df[f1 + '_count']
35 | df['{}_in_{}_nunique'.format(f2, f1)] = df.groupby(f1)[f2].transform('nunique')
36 | df['{}_in_{}_nunique'.format(f1, f2)] = df.groupby(f2)[f1].transform('nunique')
37 |
38 | for f in tqdm(['account', 'group']):
39 | df[f + '_next10_time_gap'] = df.groupby(f)['timestamp'].shift(-10) - df['timestamp']
40 |
41 |
42 | X = df[~df['ret'].isna()].sort_values('id').reset_index(drop=True)
43 | X_test = df[df['ret'].isna()].sort_values('id').reset_index(drop=True)
44 | cols = [f for f in X.columns if f not in ['id', 'time', 'timestamp', 'ret']]
45 |
46 |
47 | def eval_score(y_true, y_pred):
48 | return 'eval_score', 1 / (np.sin(np.arctan(np.sqrt(mean_squared_error(y_true, y_pred)))) + 1), True
49 |
50 |
51 | X['score'] = 0
52 | X_test['ret'] = 0
53 | feat_imp_df = pd.DataFrame({'feats': cols, 'imp': 0})
54 | skf = KFold(n_splits=5, shuffle=True, random_state=2021)
55 | clf = LGBMRegressor(
56 | learning_rate=0.1,
57 | n_estimators=30000,
58 | num_leaves=63,
59 | subsample=0.8,
60 | colsample_bytree=0.8,
61 | random_state=2021
62 | )
63 | for i, (trn_idx, val_idx) in enumerate(skf.split(X)):
64 | print('--------------------- {} fold ---------------------'.format(i))
65 | t = time.time()
66 | trn_x, trn_y = X[cols].iloc[trn_idx].reset_index(drop=True), X['ret'].values[trn_idx]
67 | val_x, val_y = X[cols].iloc[val_idx].reset_index(drop=True), X['ret'].values[val_idx]
68 | clf.fit(
69 | trn_x, trn_y,
70 | eval_set=[(val_x, val_y)],
71 | eval_metric=eval_score,
72 | early_stopping_rounds=200,
73 | verbose=200
74 | )
75 | X.loc[val_idx, 'score'] = clf.predict(val_x)
76 | X_test['ret'] += clf.predict(X_test[cols]) / skf.n_splits
77 | feat_imp_df['imp'] += clf.feature_importances_
78 | print('runtime: {}\n'.format(time.time() - t))
79 |
80 | cv_score = eval_score(X['ret'], X['score'])[1]
81 | X_test[['id', 'ret']].to_csv('sub_{}.csv'.format(cv_score), index=False)
82 |
83 |
84 |
--------------------------------------------------------------------------------
/competitions/sodic_enterprise_hidden_dangers/RAEDME.md:
--------------------------------------------------------------------------------
1 | # sodic: 基于文本挖掘的企业隐患排查质量分析模型 baseline 分享
2 |
3 | ## 赛道链接
4 |
5 | https://www.sodic.com.cn/competitions/900010
6 |
7 | ## 赛题背景
8 |
9 | 企业自主填报安全生产隐患,对于将风险消除在事故萌芽阶段具有重要意义。企业在填报隐患时,往往存在不认真填报的情况,“虚报、假报”隐患内容,增大了企业监管的难度。采用大数据手段分析隐患内容,找出不切实履行主体责任的企业,向监管部门进行推送,实现精准执法,能够提高监管手段的有效性,增强企业安全责任意识。
10 |
11 | ## 赛题任务
12 |
13 | 本赛题提供企业填报隐患数据,参赛选手需通过智能化手段识别其中是否存在“虚报、假报”的情况。
14 |
15 | ## 数据说明
16 |
17 | 
18 |
19 | - 其中“id”为主键,无业务意义;
20 | - “一级标准、二级标准、三级标准、四级标准”为《深圳市安全隐患自查和巡查基本指引(2016年修订版)》规定的排查指引,一级标准对应不同隐患类型,二至四级标准是对一级标准的细化,企业自主上报隐患时,根据不同类型隐患的四级标准开展隐患自查工作;
21 | - “隐患内容”为企业上报的具体隐患;
22 | - “标签”标识的是该条隐患的合格性,“1”表示隐患填报不合格,“0”表示隐患填报合格。
23 |
24 | ## 建模思路
25 |
26 | 典型的文本二分类问题。
27 |
28 | 大概做了一个小时,提交了两次,以下说法有可能不太准确,仅供参考。代码请查阅 github 仓库上的 notebook。
29 |
30 | ### 常规特征
31 |
32 | 从上面的 head(10) 就很明显地看出,正样本基本上都是文本比较短的,可以用文本字符串长度来确认。
33 |
34 | ```
35 | train['content_strlen'] = train['content'].astype(str).apply(len)
36 | train[train['content_strlen']<=3]['label'].mean()
37 | ```
38 |
39 | 可以看到高达 87.7% 的正样本比例。
40 |
41 | ### 思路1:TF-IDF
42 |
43 | 使用 jieba 分词后对词语进行 TF-IDF + SVD 降维
44 |
45 | ```
46 | df['content'].fillna('', inplace=True)
47 | df['content_seg'] = df['content'].apply(lambda x: " ".join(jieba.cut(x)))
48 |
49 | n_components = 16
50 |
51 | X = list(df['content_seg'].values)
52 | tfv = TfidfVectorizer(ngram_range=(1,1),
53 | token_pattern=r"(?u)\b[^ ]+\b",
54 | max_features=10000)
55 | tfv.fit(X)
56 | X_tfidf = tfv.transform(X)
57 | svd = TruncatedSVD(n_components=n_components)
58 | svd.fit(X_tfidf)
59 | X_svd = svd.transform(X_tfidf)
60 |
61 | for i in range(n_components):
62 | df[f'content_tfidf_{i}'] = X_svd[:, i]
63 | ```
64 |
65 | TF-IDF + strlen 五折后训练 LGBM f1 线下分数:0.8347,线上:0.9009
66 |
67 | ### 思路2:BERT
68 |
69 | 直接上 BERT,使用 roberta-wwm-ext 五折训练生成 oof 以及 test 的 prob,
70 |
71 | ```
72 | oof = []
73 | prediction = test[['id']]
74 | prediction['bert_pred'] = 0
75 |
76 | kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=2021)
77 | for fold_id, (trn_idx, val_idx) in enumerate(kfold.split(train, train['label'])):
78 | train_df = train.iloc[trn_idx][['content', 'label']]
79 | valid_df = train.iloc[val_idx][['content', 'label']]
80 | train_df.columns = ['text', 'labels']
81 | valid_df.columns = ['text', 'labels']
82 |
83 | model_args = get_model_args()
84 | model = ClassificationModel('bert',
85 | 'hfl/chinese-roberta-wwm-ext',
86 | args=model_args)
87 | model.train_model(train_df, eval_df=valid_df)
88 | _, vaild_outputs, _ = model.eval_model(valid_df)
89 |
90 | df_oof = train.iloc[val_idx][['id', 'label']].copy()
91 | df_oof['bert_pred'] = vaild_outputs[:,1]
92 | oof.append(df_oof)
93 |
94 | _, test_outputs = model.predict([text for text in test['content']])
95 | prediction['bert_pred'] += test_outputs[:, 1] / kfold.n_splits
96 |
97 | del model, train_df, valid_df, vaild_outputs, test_outputs
98 | gc.collect()
99 | ```
100 |
101 | 然后塞进上面的 LGBM 中,再进行五折训练。
102 |
103 | 线下分数:0.8955,线上分数:0.9506
104 |
105 | PS:先执行 simpletransformers.ipynb 生成 prob 再执行 baseline.ipynb。
106 |
107 | ### TODO:
108 |
109 | 1. BERT 只训练了一轮,可以训练多几轮看下效果;
110 | 2. 使用其他预训练模型;
111 | 3. 可能直接使用 BERT 交效果也很好,我没比对过;
112 | 4. 不知道其他 level 之类的文本放进来有没有什么用,可以尝试下;
113 | 5. 其他的我不知道的骚操作。
114 |
115 |
--------------------------------------------------------------------------------
/competitions/wsdm_iqiyi_torch/train.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import os
3 | import warnings
4 |
5 | import numpy as np
6 | import pandas as pd
7 | import torch
8 | import torch.nn as nn
9 | from sklearn.preprocessing import LabelEncoder
10 | from torch.utils.data import DataLoader
11 |
12 | from model import AQYModel
13 | from model_tools import AQYDataset, fit, predict, validate
14 |
15 | warnings.filterwarnings('ignore')
16 |
17 |
18 | def random_seed(seed):
19 | np.random.seed(seed)
20 | torch.manual_seed(seed)
21 | torch.cuda.manual_seed_all(seed)
22 |
23 | torch.backends.cudnn.deterministic = True
24 | torch.backends.cudnn.benchmark = False
25 |
26 |
27 | random_seed(2021)
28 |
29 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
30 | device
31 |
32 | data = pd.read_pickle('data/all_data.pkl')
33 | data.head()
34 |
35 | user_lbe = LabelEncoder()
36 | data['user_id'] = user_lbe.fit_transform(data['user_id'])
37 | data['user_id'] = data['user_id'] + 1
38 |
39 | train = data[data['label'] != -1]
40 | test = data[data['label'] == -1]
41 |
42 | train = train.sample(frac=1, random_state=2021).reset_index(drop=True)
43 |
44 | train_shape = int(train.shape[0] * 0.9)
45 |
46 | valid = train.iloc[train_shape:]
47 | train = train.iloc[:train_shape]
48 |
49 | print(train.shape, valid.shape, test.shape)
50 |
51 | train_dataset = AQYDataset(train, device)
52 | valid_dataset = AQYDataset(valid, device)
53 | test_dataset = AQYDataset(test, device)
54 |
55 | train_loader = DataLoader(train_dataset,
56 | batch_size=128,
57 | shuffle=True,
58 | num_workers=4)
59 | valid_loader = DataLoader(valid_dataset,
60 | batch_size=128,
61 | shuffle=False,
62 | num_workers=4)
63 | test_loader = DataLoader(test_dataset,
64 | batch_size=128,
65 | shuffle=False,
66 | num_workers=4)
67 |
68 | model = AQYModel().to(device)
69 |
70 | optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
71 | criterion = nn.MSELoss()
72 |
73 | best_val_score = float('-inf')
74 | last_improve = 0
75 | best_model = None
76 |
77 | for epoch in range(10):
78 | train_score = fit(model, train_loader, optimizer, criterion, device)
79 | val_score = validate(model, valid_loader, device)
80 |
81 | if val_score > best_val_score:
82 | best_val_score = val_score
83 | best_model = copy.deepcopy(model)
84 | last_improve = epoch
85 | improve = '*'
86 | else:
87 | improve = ''
88 |
89 | if epoch - last_improve > 3:
90 | break
91 |
92 | print(
93 | f'Epoch: {epoch} Train Score: {train_score}, Valid Score: {val_score} {improve}'
94 | )
95 |
96 | model = best_model
97 |
98 | valid['pred'] = predict(model, valid_loader, device)
99 | valid['diff'] = valid['label'] - valid['pred']
100 | valid['diff'] = abs(valid['diff']) / 7
101 | score = 100 * (1 - valid['diff'].mean())
102 | print(f'Valid Score: {score}')
103 |
104 | os.makedirs('sub', exist_ok=True)
105 |
106 | test['pred'] = predict(model, test_loader, device)
107 | test = test[['user_id', 'pred']]
108 | test['user_id'] = test['user_id'] - 1
109 | test['user_id'] = user_lbe.inverse_transform(test['user_id'])
110 |
111 | test.to_csv(f'sub/{score}.csv', index=False, header=False, float_format="%.2f")
112 |
--------------------------------------------------------------------------------
/competitions/dc_yizhifu/README.md:
--------------------------------------------------------------------------------
1 | 线下 0.7154,线上 0.675 左右,完整代码见文末阅读全文。
2 |
3 | ## 比赛链接
4 |
5 | https://www.dcjingsai.com/v2/cmptDetail.html?id=410
6 |
7 | ## 比赛背景
8 | 央行发布《金融科技FinTech》报告,强调金融科技成为推动金融转型升级的新引擎,成为促进普惠金融发展的新机遇。运用大数据、人工智能等技术建立金融风控模型,有效甄别高风险交易,智能感知异常交易,实现风险早识别、早预警、早处置,提升金融风险技防能力,是 “金融+科技”成果的显著体现。
9 |
10 | 翼支付积极研究探索“金融科技FinTech”技术并努力应用到实际业务中,挖掘更多金融科技在实际普惠金融业务的应用方案。本次竞赛将为校园新生力量提供才华施展的舞台和交流学习的通道,在实践中磨炼数据挖掘的专业能力,帮助学生完成从校园到社会的角色转变。
11 |
12 | ## 数据分析
13 | 赛方给出了三张数据表:基础信息表,交易信息表和操作信息表。数据字段较多,且匿名化程度很高,很多连续特征均被分桶成离散量。
14 |
15 | ## baseline
16 | 该 baseline 主要给大家建立起一个完整的特征工程,模型训练,sub 提交的流程。特征工程建立在基础信息表和交易信息表之上。基础信息表中有大量的类别特征,有些类别特征保留了大小关系,所以可以直接从变量中提取数字做处理:
17 |
18 | ```
19 | for f in [
20 | 'balance', 'balance_avg', 'balance1', 'balance1_avg', 'balance2',
21 | 'balance2_avg', 'product1_amount', 'product2_amount',
22 | 'product3_amount', 'product4_amount', 'product5_amount', 'product6_amount'
23 | ]:
24 | df_feature[f] = df_feature[f].apply(lambda x: int(x.split(' ')[1]) if type(x) != float else np.NaN)
25 | ```
26 |
27 | 效仿点击率特征,对欺诈问题还可以进行类似的欺诈率特征构造,即某某类别下欺诈的可能性为多大。由于该类特征设计到标签信息,要特别小心标签泄露问题,所以采用五折构造的方式,对训练集,每次使用其中4折数据做统计,给另外一折做特征。
28 |
29 | ```
30 | # 欺诈率
31 | def stat(df, df_merge, group_by, agg):
32 | group = df.groupby(group_by).agg(agg)
33 |
34 | columns = []
35 | for on, methods in agg.items():
36 | for method in methods:
37 | columns.append('{}_{}_{}'.format('_'.join(group_by), on, method))
38 | group.columns = columns
39 | group.reset_index(inplace=True)
40 | df_merge = df_merge.merge(group, on=group_by, how='left')
41 |
42 | del (group)
43 | gc.collect()
44 | return df_merge
45 |
46 |
47 | def statis_feat(df_know, df_unknow):
48 | df_unknow = stat(df_know, df_unknow, ['province'], {'label': ['mean']})
49 | df_unknow = stat(df_know, df_unknow, ['city'], {'label': ['mean']})
50 |
51 | return df_unknow
52 |
53 |
54 | df_train = df_feature[~df_feature['label'].isnull()]
55 | df_train = df_train.reset_index(drop=True)
56 | df_test = df_feature[df_feature['label'].isnull()]
57 |
58 | df_stas_feat = None
59 | kf = StratifiedKFold(n_splits=5, random_state=seed, shuffle=True)
60 | for train_index, val_index in kf.split(df_train, df_train['label']):
61 | df_fold_train = df_train.iloc[train_index]
62 | df_fold_val = df_train.iloc[val_index]
63 |
64 | df_fold_val = statis_feat(df_fold_train, df_fold_val)
65 | df_stas_feat = pd.concat([df_stas_feat, df_fold_val], axis=0)
66 |
67 | del (df_fold_train)
68 | del (df_fold_val)
69 | gc.collect()
70 |
71 | df_test = statis_feat(df_train, df_test)
72 | df_feature = pd.concat([df_stas_feat, df_test], axis=0)
73 | df_feature = df_feature.reset_index(drop=True)
74 |
75 | del (df_stas_feat)
76 | del (df_train)
77 | del (df_test)
78 | gc.collect()
79 | ```
80 |
81 | 从行为上更能识别欺诈行为,从交易表可以挖掘很多有用的特征。所以这里给出了交易金额的统计数据。
82 |
83 | ```
84 | df_temp = df_trans.groupby(['user'
85 | ])['amount'].agg(amount_mean='mean',
86 | amount_std='std',
87 | amount_sum='sum',
88 | amount_max='max',
89 | amount_min='min').reset_index()
90 | df_feature = df_feature.merge(df_temp, how='left')
91 | ```
92 |
93 | 交易时间采用的是距离某起始时间点的时间间隔,例如: 9 days 09:02:45.000000000,表示距离某起始时间点9天9小时2分钟45秒。为了方便后面提取时间特征,可以自己设置一个起始日期转成正常日期。
94 |
95 | ```
96 | def parse_time(tm):
97 | days, _, time = tm.split(' ')
98 | time = time.split('.')[0]
99 |
100 | time = '2020-1-1 ' + time
101 | time = datetime.strptime(time, '%Y-%m-%d %H:%M:%S')
102 | time = (time + timedelta(days=int(days)))
103 |
104 | return time
105 |
106 |
107 | df_trans['date'] = df_trans['tm_diff'].apply(parse_time)
108 | df_trans['day'] = df_trans['date'].dt.day
109 | df_trans['hour'] = df_trans['date'].dt.hour
110 | ```
111 |
112 | ## 进阶思路
113 | * 对类别特征做 embedding
114 | * 深度挖掘交易表和操作表
115 | * 时间信息的利用
116 | * 对于表中大量类别特征如何处理
117 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # baseline 合集
2 | ## dc_molecule_baseline
3 | 赛道:AI战疫·小分子成药属性预测大赛
4 | 赛道链接:https://www.dcjingsai.com/common/cmpt/AI%E6%88%98%E7%96%AB%C2%B7%E5%B0%8F%E5%88%86%E5%AD%90%E6%88%90%E8%8D%AF%E5%B1%9E%E6%80%A7%E9%A2%84%E6%B5%8B%E5%A4%A7%E8%B5%9B_%E7%AB%9E%E8%B5%9B%E4%BF%A1%E6%81%AF.html
5 |
6 | ## aiyanxishe_weibo_baseline
7 | 赛道:微博立场检测
8 | 赛道链接:https://god.yanxishe.com/44
9 |
10 | ## tianchi_car_sale
11 | 赛道:零基础入门数据挖掘 - 二手车交易价格预测
12 | 赛道链接:https://tianchi.aliyun.com/competition/entrance/231784/introduction
13 |
14 | ## tianchi_elm_delivery
15 | 赛道:智慧物流:新冠期间饿了么骑士行为预估
16 | 赛道链接:https://tianchi.aliyun.com/competition/entrance/231777/introduction
17 |
18 | ## aiyanxishe_fraud_job
19 | 赛道:职荐专场--真假职位信息检测
20 | 赛道链接:https://god.yanxishe.com/46
21 |
22 | ## aiyanxishe_text_similarity
23 | 赛道:英文文本语义相似度
24 | 赛道链接: https://god.yanxishe.com/53
25 |
26 | ## aiyanxishe_102_flowers
27 | 赛道:102种鲜花分类
28 | 赛道链接:https://god.yanxishe.com/54
29 |
30 | ## dc_fraud_phonecall
31 | 赛道:2020首届数字四川创新大赛算法对抗赛:诈骗电话识别大赛
32 | 赛道链接:http://www.scdata.net.cn/common/cmpt/%E8%AF%88%E9%AA%97%E7%94%B5%E8%AF%9D%E8%AF%86%E5%88%AB_%E6%8E%92%E8%A1%8C%E6%A6%9C.html
33 |
34 | ## dc_yizhifu
35 | 赛道:第二届翼支付杯大数据建模大赛-信用风险用户识别
36 | 赛道链接:https://www.dcjingsai.com/v2/cmptDetail.html?id=410
37 |
38 | ## tianchi_news_classification
39 | 赛道:零基础入门NLP - 新闻文本分类
40 | 赛道链接:https://tianchi.aliyun.com/competition/entrance/531810/introduction
41 |
42 | ## iflytek_agriculture_ner
43 | 赛道:讯飞开放平台 - 农业问答数据处理挑战赛
44 | 赛道链接:http://challenge.xfyun.cn/topic/info?type=agriculture
45 |
46 | ## kesci_public_health_answers_classification
47 | 赛道:和鲸社区 - 医学数据挖掘算法评测大赛 | 任务1:公众健康问句分类
48 | 赛道链接:https://www.kesci.com/home/competition/5f2d0ea1b4ac2e002c164d82
49 |
50 | ## severless_load_prediction
51 | 赛道:CCF 2020 - 大数据时代的Serverless工作负载预测
52 | 赛道链接:https://www.datafountain.cn/competitions/468
53 |
54 | ## property_chat_pair
55 | 赛道:CCF 2020 - 房产行业聊天问答匹配
56 | 赛道链接:https://www.datafountain.cn/competitions/474
57 |
58 | ## fund_raising_risk_prediction
59 | 赛道:CCF 2020 - 企业非法集资风险预测
60 | 赛道链接:https://www.datafountain.cn/competitions/469
61 |
62 | ## xiamen_international_bank_2020
63 | 赛道:DC 2020厦门国际银行数创金融杯建模大赛
64 | 赛道链接:https://www.dcjingsai.com/v2/cmptDetail.html?id=439
65 |
66 | ## dc_onecity
67 | 赛道:DC 中移集成(雄安产业研究院)首届OneCity编程大赛
68 | 赛道链接:https://www.dcjingsai.com/v2/cmptDetail.html?id=457
69 |
70 | ## biendata_bmes
71 | 赛道:智源-水利知识图谱构建挑战赛
72 | 赛道链接:http://competition.baai.ac.cn/c/37/format/introduce
73 |
74 | ## haihua2021
75 | 赛道:2021海华AI挑战赛·中文阅读理解·技术组
76 | 赛道链接:https://www.biendata.xyz/competition/haihua_2021/
77 |
78 | ## kesci_datajoy_airbnb
79 | 赛道:kesci datajoy 预测分析·民宿价格预测
80 | 赛道链接:https://www.heywhale.com/home/competition/605c426d21e3f6003b56a920/content
81 |
82 | ## sodic_job_match
83 | 赛道:人岗精准匹配模型
84 | 赛道链接:https://www.sodic.com.cn/competitions/900008
85 |
86 | ## sodic_enterprise_hidden_dangers
87 | 赛道:基于文本挖掘的企业隐患排查质量分析模型
88 | 赛道链接:https://www.sodic.com.cn/competitions/900010
89 |
90 | ## 2021CCF-aqy
91 | 赛道:剧本角色情感识别
92 | 赛道链接:https://www.datafountain.cn/competitions/518
93 |
94 | ## 2021CCF-loan
95 | 赛道:个贷违约预测
96 | 赛道链接:https://www.datafountain.cn/competitions/530
97 |
98 | ## 2021CCF-ueba
99 | 赛道:基于UEBA的用户上网异常行为分析
100 | 赛道链接:https://www.datafountain.cn/competitions/520
101 |
102 | ## 2021CCF-ner
103 | 赛道:产品评论观点提取
104 | 赛道链接:https://www.datafountain.cn/competitions/529
105 |
106 | ## 2021CCF-SysRisk
107 | 赛道:系统认证风险预测
108 | 赛道链接:https://www.datafountain.cn/competitions/537
109 |
110 | ## wsdm_iqiyi
111 | 赛道:爱奇艺用户留存预测挑战赛
112 | 赛道链接:http://challenge.ai.iqiyi.com/detail?raceId=61600f6cef1b65639cd5eaa6
113 |
114 | ## 2021shandong-dianwang
115 | 赛道:山东大数据-电网母线负荷预测
116 | 赛道链接:http://data.sd.gov.cn/cmpt/cmptDetail.html?id=55
117 |
118 | ## 2021shandong-dongying
119 | 赛道:山东大数据-网格事件智能分类
120 | 赛道链接:http://data.sd.gov.cn/cmpt/cmptDetail.html?id=67
121 |
122 | ## tianchi_aiops2022
123 | 赛道:第三届阿里云磐久智维算法大赛
124 | 赛道链接:https://tianchi.aliyun.com/competition/entrance/531947/introduction
125 |
126 |
127 | ## sohu2022_nlp_rec
128 | 赛道:2022搜狐校园 情感分析 × 推荐排序 算法大赛
129 | 赛道链接:https://www.biendata.xyz/competition/sohu_2022/
130 |
131 | ## 2022ccf_web_attack_detect
132 | 赛道:2022 CCF Web攻击检测与分类识别
133 | 赛道链接:https://www.datafountain.cn/competitions/596
134 |
135 |
--------------------------------------------------------------------------------
/competitions/serverless_load_prediction/README.md:
--------------------------------------------------------------------------------
1 | # CCF 2020 - 大数据时代的Serverless工作负载预测 Baseline 分享
2 |
3 | 本次比赛将以系列形式进行分享,本次为第一个可提交的版本,A 榜分数为 0.08459508000。
4 |
5 | EDIT1: 2020/10/21 开源第二个版本 A 榜分数为 0.20860830。
6 |
7 | EDIT2: 2020/11/10 开源第三个版本 A 榜分数为 0.26515555。
8 |
9 | ### 背景:
10 |
11 | 云计算时代,Serverless软件架构可根据业务工作负载进行弹性资源调整,这种方式可以有效减少资源在空闲期的浪费以及在繁忙期的业务过载,同时给用户带来极致的性价比服务。在弹性资源调度的背后,对工作负载的预测是一个重要环节。如何快速感知业务的坡峰波谷,是一个实用的Serverless服务应该考虑的问题。
12 |
13 | ### 任务:
14 |
15 | 传统的资源控制系统以阈值为决策依据,只关注当前监控点的取值,缺少对历史数据以及工作负载趋势的把控,不能提前做好资源的调整,具有很长的滞后性。近年来,随着企业不断上云,云环境的工作负载预测成为一个经典且极具挑战的难题。
16 |
17 | 在此任务中,我们将提供给参赛者一系列真实场景下的性能监控数据,参赛者可针对训练数据做特征工程及建模,预测未来一段时间的工作负载情况。为了简化任务,本赛题挑选两个在生产环境较为重要的指标作为评测标准:CPU的利用率和队列中的Job数。
18 |
19 | ### BaseLine V1
20 |
21 | 本版本的 baseline 直接使用原始数据,未做特征工作,仅按照赛题要求理清建模和提交流程。
22 |
23 | 结构化时序问题,
24 |
25 | 先导入数据,按 qid 和时间进行排序
26 |
27 | ```
28 | train = pd.read_csv('raw_data/train.csv')
29 | train = train.sort_values(by=['QUEUE_ID', 'DOTTING_TIME']).reset_index(drop=True)
30 |
31 | test = pd.read_csv('raw_data/evaluation_public.csv')
32 | test = test.sort_values(by=['ID', 'DOTTING_TIME']).reset_index(drop=True)
33 | ```
34 |
35 | 这些 columns 在 test 只有单一值, 所以可以直接去掉
36 |
37 | ```
38 | del train['STATUS']
39 | del train['PLATFORM']
40 | del train['RESOURCE_TYPE']
41 |
42 | del test['STATUS']
43 | del test['PLATFORM']
44 | del test['RESOURCE_TYPE']
45 | ```
46 |
47 | 时间排序好后应该也没什么用了
48 |
49 | ```
50 | del train['DOTTING_TIME']
51 | del test['DOTTING_TIME']
52 | ```
53 |
54 | Label Encoding
55 |
56 | ```
57 | le = LabelEncoder()
58 | train['QUEUE_TYPE'] = le.fit_transform(train['QUEUE_TYPE'].astype(str))
59 | test['QUEUE_TYPE'] = le.transform(test['QUEUE_TYPE'].astype(str))
60 | ```
61 |
62 | 生成 target 列,
63 |
64 | 根据赛题要求:
65 |
66 | 对于每行测试数据,赛题会给定该队列在某时段的性能监控数据(比如9:35 – 10:00),希望参赛者可以预测该点之后的未来五个点的指标(10:00 – 10:25)
67 |
68 | 所以我们 shift(-5) 取值就可以了
69 |
70 | ```
71 | df_train = pd.DataFrame()
72 |
73 | for id_ in tqdm(train.QUEUE_ID.unique()):
74 | tmp = train[train.QUEUE_ID == id_]
75 | tmp['CPU_USAGE_next25mins'] = tmp['CPU_USAGE'].shift(-5)
76 | tmp['LAUNCHING_JOB_NUMS_next25mins'] = tmp['LAUNCHING_JOB_NUMS'].shift(-5)
77 | df_train = df_train.append(tmp)
78 |
79 | df_train = df_train[df_train.CPU_USAGE_next25mins.notna()]
80 | ```
81 |
82 | 接下来是建模
83 |
84 | 因为我们有两个目标,采用 GroupKFold 五折 LGBM 回归,分别进行建模训练
85 |
86 | 代码较长,请直接参考源码
87 |
88 | 两个模型分别训练好后,进行合并以及后续处理和生成提交结果
89 |
90 | ```
91 | prediction = prediction1.copy()
92 | prediction = pd.merge(prediction, prediction2[['myid', 'LAUNCHING_JOB_NUMS_next25mins']], on='myid')
93 |
94 | # 注意: 提交要求预测结果需为非负整数
95 |
96 | prediction['CPU_USAGE_next25mins'] = prediction['CPU_USAGE_next25mins'].apply(np.floor)
97 | prediction['CPU_USAGE_next25mins'] = prediction['CPU_USAGE_next25mins'].apply(lambda x: 0 if x<0 else x)
98 | prediction['CPU_USAGE_next25mins'] = prediction['CPU_USAGE_next25mins'].astype(int)
99 | prediction['LAUNCHING_JOB_NUMS_next25mins'] = prediction['LAUNCHING_JOB_NUMS_next25mins'].apply(np.floor)
100 | prediction['LAUNCHING_JOB_NUMS_next25mins'] = prediction['LAUNCHING_JOB_NUMS_next25mins'].apply(lambda x: 0 if x<0 else x)
101 | prediction['LAUNCHING_JOB_NUMS_next25mins'] = prediction['LAUNCHING_JOB_NUMS_next25mins'].astype(int)
102 |
103 | preds = []
104 |
105 | for id_ in tqdm(prediction.ID.unique()):
106 | items = [id_]
107 | tmp = prediction[prediction.ID == id_].sort_values(by='myid').reset_index(drop=True)
108 | for i, row in tmp.iterrows():
109 | items.append(row['CPU_USAGE_next25mins'])
110 | items.append(row['LAUNCHING_JOB_NUMS_next25mins'])
111 | preds.append(items)
112 |
113 | sub = pd.DataFrame(preds)
114 | sub.columns = sub_sample.columns
115 |
116 | sub.to_csv('baseline.csv', index=False)
117 | ```
118 |
119 | ### BaseLine V2
120 |
121 | PS: 我这种构造 label 的方式是有问题的,因为 DOTTING_TIME 并不是连续的,暂时还没有时间去做数据清洗
122 |
123 | PS: V2 的改进是基于 qid 训练多个模型,即一个 qid 一个模型,因为所有 test 的 qid 都是在训练集中出现过的,所以可以这么搞,如果切榜后有新的 qid 就只能抓瞎了 :( (QQ群里官方已答疑,切榜后的 qid 应该没有新的)
124 |
125 | PS: 另外随便加了点 diff 特征,原始特征是 0.17 左右
126 |
127 | 参考 baseline_v2.ipynb
128 |
129 | ### BaseLine V3
130 |
131 | 与上个版本不同的地方主要在于:
132 |
133 | 根据官网里的这句话:“对于每行测试数据,赛题会给定该队列在某时段的性能监控数据(比如9: 35– 10:00),希望参赛者可以预测该点之后的未来五个点的指标(10:00 – 10:25)” 来重新构造 label:
134 |
135 | 使用 sliding window 滑窗 groupby qid 依次划取 10 个时间点的数据,前 5 个为特征,后 5 个为 label
136 |
137 | ```
138 | t0 t1 t2 t3 t4 --> t5 t6 t7 t8 t9
139 | t1 t2 t3 t4 t5 --> t6 t7 t8 t9 t10
140 | ```
141 |
142 | 另外,直接去掉了 job 的建模和预测,全置 0;增加了一点行内统计特征。
143 |
--------------------------------------------------------------------------------
/competitions/aiyanxishe_102_flowers/fastai_densenet121.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "from pathlib import Path\n",
12 | "from fastai import *\n",
13 | "from fastai.vision import *\n",
14 | "import torch"
15 | ]
16 | },
17 | {
18 | "cell_type": "code",
19 | "execution_count": null,
20 | "metadata": {
21 | "collapsed": true
22 | },
23 | "outputs": [],
24 | "source": [
25 | "data_folder = Path(\"./raw_data\")"
26 | ]
27 | },
28 | {
29 | "cell_type": "code",
30 | "execution_count": null,
31 | "metadata": {
32 | "collapsed": true
33 | },
34 | "outputs": [],
35 | "source": [
36 | "train_df = pd.read_csv(\"./raw_data/train.csv\")\n",
37 | "test_df = pd.DataFrame({\"filename\": [f'{i}.jpg' for i in range(1434)]})"
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": null,
43 | "metadata": {
44 | "collapsed": true
45 | },
46 | "outputs": [],
47 | "source": [
48 | "test_img = ImageList.from_df(test_df, path=data_folder, folder='test')"
49 | ]
50 | },
51 | {
52 | "cell_type": "code",
53 | "execution_count": null,
54 | "metadata": {
55 | "collapsed": true
56 | },
57 | "outputs": [],
58 | "source": [
59 | "trfm = get_transforms(do_flip=True,\n",
60 | " max_rotate=10.0, \n",
61 | " max_zoom=1.25, \n",
62 | " max_lighting=0.2, \n",
63 | " max_warp=0.2, \n",
64 | " p_affine=0.65, \n",
65 | " p_lighting=0.55)\n",
66 | "\n",
67 | "train_img = (ImageList.from_df(train_df, path=data_folder, folder='train')\n",
68 | " .split_by_rand_pct(0.01)\n",
69 | " .label_from_df()\n",
70 | " .add_test(test_img)\n",
71 | " .transform(trfm, size=128)\n",
72 | " .databunch(path='.', bs=128, device= torch.device('cuda:0'))\n",
73 | " .normalize(imagenet_stats)\n",
74 | ")"
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": null,
80 | "metadata": {},
81 | "outputs": [],
82 | "source": [
83 | "train_img.show_batch(rows=3, figsize=(7,6))"
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "execution_count": null,
89 | "metadata": {
90 | "collapsed": true
91 | },
92 | "outputs": [],
93 | "source": [
94 | "learn = cnn_learner(train_img, models.densenet121, metrics=[error_rate, accuracy]) "
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": null,
100 | "metadata": {},
101 | "outputs": [],
102 | "source": [
103 | "learn.lr_find()\n",
104 | "learn.recorder.plot()"
105 | ]
106 | },
107 | {
108 | "cell_type": "code",
109 | "execution_count": null,
110 | "metadata": {
111 | "collapsed": true
112 | },
113 | "outputs": [],
114 | "source": [
115 | "callbacks = [\n",
116 | " callbacks.EarlyStoppingCallback(learn, min_delta=1e-5, patience=3),\n",
117 | " callbacks.SaveModelCallback(learn)\n",
118 | "]\n",
119 | "\n",
120 | "learn.callbacks = callbacks"
121 | ]
122 | },
123 | {
124 | "cell_type": "code",
125 | "execution_count": null,
126 | "metadata": {},
127 | "outputs": [],
128 | "source": [
129 | "lr = 1e-02\n",
130 | "learn.fit_one_cycle(10, slice(lr))"
131 | ]
132 | },
133 | {
134 | "cell_type": "code",
135 | "execution_count": null,
136 | "metadata": {},
137 | "outputs": [],
138 | "source": [
139 | "interp = ClassificationInterpretation.from_learner(learn)"
140 | ]
141 | },
142 | {
143 | "cell_type": "code",
144 | "execution_count": null,
145 | "metadata": {},
146 | "outputs": [],
147 | "source": [
148 | "interp.plot_top_losses(9, figsize=(7,6))"
149 | ]
150 | },
151 | {
152 | "cell_type": "code",
153 | "execution_count": null,
154 | "metadata": {},
155 | "outputs": [],
156 | "source": [
157 | "learn.load('bestmodel')"
158 | ]
159 | },
160 | {
161 | "cell_type": "code",
162 | "execution_count": null,
163 | "metadata": {},
164 | "outputs": [],
165 | "source": [
166 | "preds, _ = learn.TTA(ds_type=DatasetType.Test)"
167 | ]
168 | },
169 | {
170 | "cell_type": "code",
171 | "execution_count": null,
172 | "metadata": {
173 | "collapsed": true
174 | },
175 | "outputs": [],
176 | "source": [
177 | "test_df['labels'] = np.argmax(preds.numpy(), axis=1)"
178 | ]
179 | },
180 | {
181 | "cell_type": "code",
182 | "execution_count": null,
183 | "metadata": {
184 | "collapsed": true
185 | },
186 | "outputs": [],
187 | "source": [
188 | "sub = test_df.copy()\n",
189 | "sub['ID'] = test_df.index\n",
190 | "sub[['ID', 'labels']].to_csv('submission.csv', index=False, header=False)"
191 | ]
192 | },
193 | {
194 | "cell_type": "code",
195 | "execution_count": null,
196 | "metadata": {},
197 | "outputs": [],
198 | "source": [
199 | "!head submission.csv"
200 | ]
201 | },
202 | {
203 | "cell_type": "code",
204 | "execution_count": null,
205 | "metadata": {
206 | "collapsed": true
207 | },
208 | "outputs": [],
209 | "source": []
210 | }
211 | ],
212 | "metadata": {
213 | "kernelspec": {
214 | "display_name": "Python 3",
215 | "language": "python",
216 | "name": "python3"
217 | },
218 | "language_info": {
219 | "codemirror_mode": {
220 | "name": "ipython",
221 | "version": 3
222 | },
223 | "file_extension": ".py",
224 | "mimetype": "text/x-python",
225 | "name": "python",
226 | "nbconvert_exporter": "python",
227 | "pygments_lexer": "ipython3",
228 | "version": "3.6.3"
229 | }
230 | },
231 | "nbformat": 4,
232 | "nbformat_minor": 2
233 | }
234 |
--------------------------------------------------------------------------------
/competitions/tianchi_aiops2022/README.md:
--------------------------------------------------------------------------------
1 | # 第三届阿里云磐久智维算法大赛初赛 baseline 分享
2 |
3 | - 赛道:第三届阿里云磐久智维算法大赛(天池平台)
4 | - 赛道链接:https://tianchi.aliyun.com/competition/entrance/531947/introduction
5 |
6 |
7 |
8 |
9 | ## 问题描述
10 |
11 | 给定一段时间的系统日志数据,参赛者应提出自己的解决方案,以诊断服务器发生了哪种故障。具体来说,参赛者需要从组委会提供的数据中挖掘出和各类故障相关的特征,并采用合适的机器学习算法予以训练,最终得到可以区分故障类型的最优模型。数据处理方法和算法不限,但选手应该综合考虑算法的效果和复杂度,以构建相对高效的解决方案。
12 |
13 | ## 数据描述
14 |
15 | - SEL 日志数据,提供了每一个 sn 必要时段的 log 日志
16 | - 训练标签数据,提供了一个 sn 在 fault_time 时刻出现的故障 (label,四分类,其中 0 类和 1 类表示CPU相关故障,2 类表示内存相关故障,3 类表示其他类型故障)
17 | - 提交样例,给定 sn、fault_time 两个字段信息,选手需要根据 SEL 日志信息给出最终的 label。
18 |
19 | ## 评测函数
20 |
21 | 多分类加权 Macro F1-score (类别 0 权重最大)
22 |
23 | ```
24 | def macro_f1(target_df: pd.DataFrame, submit_df: pd.DataFrame) -> float:
25 |
26 | """
27 | 计算得分
28 | :param target_df: [sn,fault_time,label]
29 | :param submit_df: [sn,fault_time,label]
30 | :return:
31 | """
32 |
33 | weights = [3/7, 2/7, 1/7, 1/7]
34 |
35 | overall_df = target_df.merge(submit_df, how='left', on=['sn', 'fault_time'], suffixes=['_gt', '_pr'])
36 | overall_df.fillna(-1)
37 |
38 | macro_F1 = 0.
39 | for i in range(len(weights)):
40 | TP = len(overall_df[(overall_df['label_gt'] == i) & (overall_df['label_pr'] == i)])
41 | FP = len(overall_df[(overall_df['label_gt'] != i) & (overall_df['label_pr'] == i)])
42 | FN = len(overall_df[(overall_df['label_gt'] == i) & (overall_df['label_pr'] != i)])
43 | precision = TP / (TP + FP) if (TP + FP) > 0 else 0
44 | recall = TP / (TP + FN) if (TP + FN) > 0 else 0
45 | F1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
46 | macro_F1 += weights[i] * F1
47 | return macro_F1
48 | ```
49 |
50 | ## baseline (线下 0.59 线上 0.65)
51 |
52 | 本赛题有明显的文本特征,也有时间序列特征,可以考虑结合两者做特征,也可以尝试直接拼接日志当成文本多分类。
53 |
54 | 本 baseline 简单做了 tfidf, w2v, nunique, 时间特征等,具体代码点击阅读原文(或者访问我们的 github 仓库)
55 |
56 | ```
57 | def make_dataset(dataset, data_type='train'):
58 | ret = list()
59 |
60 | for idx, row in tqdm(dataset.iterrows()):
61 | sn = row['sn']
62 | fault_time = row['fault_time']
63 | ts = row['fault_time_ts']
64 |
65 | if data_type == 'train':
66 | label = row['label']
67 |
68 | df = sel_data[sel_data['sn'] == sn].copy()
69 |
70 | df = df[df['time_ts'] <= ts].copy()
71 | df = df.sort_values(by='time_ts').reset_index(drop=True)
72 | df = df.tail(40).copy() # TODO: could change last 40 logs here
73 |
74 | # make some features
75 |
76 | logs_count = len(df)
77 |
78 | if logs_count > 0:
79 | msg_nunique = df['msg'].nunique()
80 | msg_category_nunique = df['category'].nunique()
81 | msg_split_0_nunique = df['msg_split_0'].nunique()
82 | msg_split_1_nunique = df['msg_split_1'].nunique()
83 | msg_split_2_nunique = df['msg_split_2'].nunique()
84 | last_category = df['category'].value_counts().index[0]
85 | last_category = cate_map[last_category] if last_category in cate_map else len(cate_map)
86 |
87 | s = df['time_ts'].values
88 | if len(s) > 0:
89 | seconds_span = s[-1] - s[0]
90 | else:
91 | seconds_span = 0
92 |
93 | df['time_ts_shift_1'] = df['time_ts'].shift(1)
94 | df['time_ts_diffs_1'] = df['time_ts'] - df['time_ts_shift_1']
95 | s = df['time_ts_diffs_1'].values
96 | if len(s) > 1:
97 | log_time_diffs_avg = np.mean(s[1:])
98 | log_time_diffs_max = np.max(s[1:])
99 | log_time_diffs_min = np.min(s[1:])
100 | log_time_diffs_std = np.std(s[1:])
101 | else:
102 | try:
103 | log_time_diffs_avg = log_time_diffs_max = log_time_diffs_min = s[0]
104 | log_time_diffs_std = 0
105 | except:
106 | log_time_diffs_avg = log_time_diffs_max = log_time_diffs_min = log_time_diffs_std = 0
107 |
108 | all_msg = "\n".join(df['msg'].values.tolist()).lower()
109 | w2v_emb = get_w2v_mean(all_msg)[0]
110 | tfv_emb = get_tfidf_svd([s.lower() for s in df['msg'].values.tolist()])
111 |
112 | else:
113 | logs_count = 0
114 | msg_nunique = 0
115 | msg_category_nunique = 0
116 | msg_split_0_nunique = 0
117 | msg_split_1_nunique = 0
118 | msg_split_2_nunique = 0
119 | last_category = 0
120 | seconds_span = 0
121 | log_time_diffs_avg = 0
122 | log_time_diffs_max = 0
123 | log_time_diffs_min = 0
124 | log_time_diffs_std = 0
125 | w2v_emb = [0] * 32
126 | tfv_emb = [0] * 16
127 |
128 |
129 | # format dataset
130 | data = {
131 | 'sn': sn,
132 | 'fault_time': fault_time,
133 | 'logs_count': logs_count,
134 | 'msg_nunique': msg_nunique,
135 | 'msg_category_nunique': msg_category_nunique,
136 | 'msg_split_0_nunique': msg_split_0_nunique,
137 | 'msg_split_1_nunique': msg_split_1_nunique,
138 | 'msg_split_2_nunique': msg_split_2_nunique,
139 | 'last_category': last_category,
140 | 'seconds_span': seconds_span,
141 | 'log_time_diffs_avg': log_time_diffs_avg,
142 | 'log_time_diffs_max': log_time_diffs_max,
143 | 'log_time_diffs_min': log_time_diffs_min,
144 | 'log_time_diffs_std': log_time_diffs_std,
145 | }
146 |
147 | for i in range(32):
148 | data[f'msg_w2v_{i}'] = w2v_emb[i]
149 | for i in range(16):
150 | data[f'msg_tfv_{i}'] = tfv_emb[i]
151 |
152 | if data_type == 'train':
153 | data['label'] = label
154 |
155 | ret.append(data)
156 |
157 | return ret
158 | ```
159 |
160 | ## TODO:
161 |
162 | 1. 构造更多的文本特征
163 | 2. 构造更多的统计特征
164 | 3. 使用 BERT 等更先进的模型提取文本特征
165 | 4. 其他我不知道的构建数据集方式
166 | 5. 文本清洗等
167 | 6. 融合或者 stacking
168 |
--------------------------------------------------------------------------------
/competitions/tianchi_car_sale/final/xgb.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "ExecuteTime": {
8 | "end_time": "2020-04-01T10:11:53.573638Z",
9 | "start_time": "2020-04-01T10:11:52.954068Z"
10 | }
11 | },
12 | "outputs": [],
13 | "source": [
14 | "import os\n",
15 | "from sklearn.preprocessing import StandardScaler\n",
16 | "from sklearn.metrics import cohen_kappa_score, accuracy_score, mean_absolute_error, f1_score\n",
17 | "from sklearn.model_selection import GroupKFold, KFold, StratifiedKFold\n",
18 | "from tqdm import tqdm\n",
19 | "import xgboost as xgb\n",
20 | "import numpy as np\n",
21 | "import pandas as pd\n",
22 | "import math\n",
23 | "import matplotlib.pyplot as plt\n",
24 | "import seaborn as sns\n",
25 | "import warnings\n",
26 | "import gc\n",
27 | "from datetime import datetime\n",
28 | "\n",
29 | "warnings.filterwarnings('ignore')\n",
30 | "pd.set_option('display.max_columns', None)\n",
31 | "pd.set_option('display.max_rows', None)"
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": null,
37 | "metadata": {
38 | "ExecuteTime": {
39 | "end_time": "2020-04-01T10:11:53.577557Z",
40 | "start_time": "2020-04-01T10:11:53.575435Z"
41 | }
42 | },
43 | "outputs": [],
44 | "source": [
45 | "seed = 2020"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "metadata": {
52 | "ExecuteTime": {
53 | "end_time": "2020-04-01T10:11:54.013477Z",
54 | "start_time": "2020-04-01T10:11:53.578780Z"
55 | }
56 | },
57 | "outputs": [],
58 | "source": [
59 | "df_feature = pd.read_pickle('feature.pickle')"
60 | ]
61 | },
62 | {
63 | "cell_type": "code",
64 | "execution_count": null,
65 | "metadata": {
66 | "ExecuteTime": {
67 | "end_time": "2020-04-01T10:11:54.555479Z",
68 | "start_time": "2020-04-01T10:11:54.014809Z"
69 | }
70 | },
71 | "outputs": [],
72 | "source": [
73 | "from sklearn.preprocessing import LabelEncoder\n",
74 | "for f in tqdm(df_feature.select_dtypes('object')):\n",
75 | " lbl = LabelEncoder()\n",
76 | " df_feature[f] = lbl.fit_transform(df_feature[f].astype(str))"
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": null,
82 | "metadata": {
83 | "ExecuteTime": {
84 | "end_time": "2020-04-01T10:11:55.031052Z",
85 | "start_time": "2020-04-01T10:11:54.556693Z"
86 | }
87 | },
88 | "outputs": [],
89 | "source": [
90 | "df_test = df_feature[df_feature['price'].isnull()].copy()\n",
91 | "df_train = df_feature[df_feature['price'].notnull()].copy()"
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": null,
97 | "metadata": {
98 | "ExecuteTime": {
99 | "end_time": "2020-04-01T12:19:53.834236Z",
100 | "start_time": "2020-04-01T10:11:55.032861Z"
101 | },
102 | "scrolled": true
103 | },
104 | "outputs": [],
105 | "source": [
106 | "ycol = 'price'\n",
107 | "feature_names = list(\n",
108 | " filter(lambda x: x not in [ycol, 'SaleID', 'regDate', 'creatDate', 'creatDate_year', 'creatDate_month'], df_train.columns))\n",
109 | "\n",
110 | "model = xgb.XGBRegressor(num_leaves=64,\n",
111 | " max_depth=8,\n",
112 | " learning_rate=0.08,\n",
113 | " n_estimators=10000000,\n",
114 | " subsample=0.75,\n",
115 | " feature_fraction=0.75,\n",
116 | " reg_alpha=0.7,\n",
117 | " reg_lambda=1.2,\n",
118 | " random_state=seed,\n",
119 | " metric=None,\n",
120 | " tree_method='gpu_hist'\n",
121 | " )\n",
122 | "\n",
123 | "oof = []\n",
124 | "prediction = df_test[['SaleID']]\n",
125 | "prediction['price'] = 0\n",
126 | "df_importance_list = []\n",
127 | "\n",
128 | "kfold = KFold(n_splits=5, shuffle=False, random_state=seed)\n",
129 | "for fold_id, (trn_idx, val_idx) in enumerate(kfold.split(df_train[feature_names])):\n",
130 | " X_train = df_train.iloc[trn_idx][feature_names]\n",
131 | " Y_train = df_train.iloc[trn_idx][ycol]\n",
132 | "\n",
133 | " X_val = df_train.iloc[val_idx][feature_names]\n",
134 | " Y_val = df_train.iloc[val_idx][ycol]\n",
135 | "\n",
136 | " print('\\nFold_{} Training ================================\\n'.format(fold_id+1))\n",
137 | "\n",
138 | " lgb_model = model.fit(X_train,\n",
139 | " Y_train,\n",
140 | " eval_set=[(X_train, Y_train), (X_val, Y_val)],\n",
141 | " verbose=1000,\n",
142 | " eval_metric='mae',\n",
143 | " early_stopping_rounds=500)\n",
144 | "\n",
145 | " pred_val = lgb_model.predict(\n",
146 | " X_val)\n",
147 | " df_oof = df_train.iloc[val_idx][['SaleID', ycol]].copy()\n",
148 | " df_oof['pred'] = pred_val\n",
149 | " oof.append(df_oof)\n",
150 | "\n",
151 | " pred_test = lgb_model.predict(\n",
152 | " df_test[feature_names])\n",
153 | " prediction['price'] += pred_test / 5\n",
154 | "\n",
155 | " df_importance = pd.DataFrame({\n",
156 | " 'column': feature_names,\n",
157 | " 'importance': lgb_model.feature_importances_,\n",
158 | " })\n",
159 | " df_importance_list.append(df_importance)\n",
160 | "\n",
161 | " del lgb_model, pred_val, pred_test, X_train, Y_train, X_val, Y_val\n",
162 | " gc.collect()"
163 | ]
164 | },
165 | {
166 | "cell_type": "code",
167 | "execution_count": null,
168 | "metadata": {
169 | "ExecuteTime": {
170 | "end_time": "2020-04-01T12:19:53.866498Z",
171 | "start_time": "2020-04-01T12:19:53.835713Z"
172 | },
173 | "scrolled": true
174 | },
175 | "outputs": [],
176 | "source": [
177 | "df_importance = pd.concat(df_importance_list)\n",
178 | "df_importance = df_importance.groupby(['column'])['importance'].agg(\n",
179 | " 'mean').sort_values(ascending=False).reset_index()\n",
180 | "df_importance"
181 | ]
182 | },
183 | {
184 | "cell_type": "code",
185 | "execution_count": null,
186 | "metadata": {
187 | "ExecuteTime": {
188 | "end_time": "2020-04-01T12:19:54.344228Z",
189 | "start_time": "2020-04-01T12:19:53.867796Z"
190 | }
191 | },
192 | "outputs": [],
193 | "source": [
194 | "df_oof = pd.concat(oof)\n",
195 | "df_oof[ycol] = np.expm1(df_oof[ycol])\n",
196 | "df_oof['pred'] = np.expm1(df_oof['pred'])\n",
197 | "mae = mean_absolute_error(df_oof[ycol], df_oof['pred'])\n",
198 | "print('mae:', mae)\n",
199 | "df_oof.to_csv('xgb_oof.csv'.format(mae), index=False, encoding='utf-8')"
200 | ]
201 | },
202 | {
203 | "cell_type": "code",
204 | "execution_count": null,
205 | "metadata": {
206 | "ExecuteTime": {
207 | "end_time": "2020-04-01T12:19:54.618364Z",
208 | "start_time": "2020-04-01T12:19:54.345642Z"
209 | }
210 | },
211 | "outputs": [],
212 | "source": [
213 | "prediction['price'] = np.expm1(prediction['price'])\n",
214 | "sub = prediction.copy(deep=True)\n",
215 | "sub.to_csv('sub/xgb_{}.csv'.format(mae), index=False, encoding='utf-8')\n",
216 | "sub.to_csv('xgb.csv'.format(mae), index=False, encoding='utf-8')"
217 | ]
218 | },
219 | {
220 | "cell_type": "code",
221 | "execution_count": null,
222 | "metadata": {
223 | "ExecuteTime": {
224 | "end_time": "2020-04-01T12:19:54.625279Z",
225 | "start_time": "2020-04-01T12:19:54.619753Z"
226 | }
227 | },
228 | "outputs": [],
229 | "source": [
230 | "sub.head()"
231 | ]
232 | },
233 | {
234 | "cell_type": "code",
235 | "execution_count": null,
236 | "metadata": {},
237 | "outputs": [],
238 | "source": []
239 | }
240 | ],
241 | "metadata": {
242 | "kernelspec": {
243 | "display_name": "Python [conda env:dm] *",
244 | "language": "python",
245 | "name": "conda-env-dm-py"
246 | },
247 | "language_info": {
248 | "codemirror_mode": {
249 | "name": "ipython",
250 | "version": 3
251 | },
252 | "file_extension": ".py",
253 | "mimetype": "text/x-python",
254 | "name": "python",
255 | "nbconvert_exporter": "python",
256 | "pygments_lexer": "ipython3",
257 | "version": "3.6.9"
258 | }
259 | },
260 | "nbformat": 4,
261 | "nbformat_minor": 2
262 | }
263 |
--------------------------------------------------------------------------------
/competitions/xiamen_international_bank_2020/baseline.py:
--------------------------------------------------------------------------------
1 | import warnings
2 | warnings.simplefilter('ignore')
3 |
4 | import gc
5 |
6 | import numpy as np
7 | import pandas as pd
8 | pd.set_option('max_columns', 100)
9 | pd.set_option('max_rows', 100)
10 | from tqdm.notebook import tqdm
11 |
12 | from sklearn.preprocessing import LabelEncoder
13 | from sklearn.model_selection import GroupKFold, KFold
14 | from sklearn.metrics import mean_squared_error
15 |
16 | import lightgbm as lgb
17 |
18 |
19 |
20 | def run_lgb_id(df_train, df_test, target, eve_id):
21 | feature_names = list(
22 | filter(lambda x: x not in ['label','cust_no'], df_train.columns))
23 |
24 |
25 | # 提取 QUEUE_ID 对应的数据集
26 | df_train = df_train[df_train.I3 == eve_id]
27 | df_test = df_test[df_test.I3 == eve_id]
28 |
29 |
30 |
31 | model = lgb.LGBMRegressor(num_leaves=32,
32 | max_depth=6,
33 | learning_rate=0.08,
34 | n_estimators=10000,
35 | subsample=0.9,
36 | feature_fraction=0.8,
37 | reg_alpha=0.5,
38 | reg_lambda=0.8,
39 | random_state=2020)
40 | oof = []
41 | prediction = df_test[['cust_no']]
42 | prediction[target] = 0
43 |
44 | kfold = KFold(n_splits=5, random_state=2020)
45 | for fold_id, (trn_idx, val_idx) in enumerate(kfold.split(df_train, df_train[target])):
46 | X_train = df_train.iloc[trn_idx][feature_names]
47 | Y_train = df_train.iloc[trn_idx][target]
48 | X_val = df_train.iloc[val_idx][feature_names]
49 | Y_val = df_train.iloc[val_idx][target]
50 |
51 | lgb_model = model.fit(X_train,
52 | Y_train,
53 | eval_names=['train', 'valid'],
54 | eval_set=[(X_train, Y_train), (X_val, Y_val)],
55 | verbose=0,
56 | eval_metric='mse',
57 | early_stopping_rounds=20,
58 | )
59 |
60 | pred_val = lgb_model.predict(X_val, num_iteration=lgb_model.best_iteration_)
61 | df_oof = df_train.iloc[val_idx][[target, 'cust_no']].copy()
62 | df_oof['pred'] = pred_val
63 | oof.append(df_oof)
64 |
65 | pred_test = lgb_model.predict(df_test[feature_names], num_iteration=lgb_model.best_iteration_)
66 |
67 | prediction[target] += pred_test / kfold.n_splits
68 |
69 |
70 | del lgb_model, pred_val, pred_test, X_train, Y_train, X_val, Y_val
71 | gc.collect()
72 |
73 | df_oof = pd.concat(oof)
74 | score = mean_squared_error(df_oof[target], df_oof['pred'])
75 | print('MSE:', score)
76 |
77 | return prediction,score
78 |
79 |
80 | if __name__ == "__main__":
81 | # 1.读取文件:
82 | train_label_3=pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_label\y_Q3_3.csv')
83 | train_label_4 = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_label\y_Q4_3.csv')
84 |
85 | train_3 = pd.DataFrame()
86 |
87 |
88 | train_4 = pd.DataFrame()
89 |
90 |
91 |
92 | id3_data = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\cust_avli_Q3.csv')
93 | id4_data = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\cust_avli_Q4.csv')
94 |
95 | #合并有效客户的label
96 | train_label_3 = pd.merge(left=id3_data, right=train_label_3, how='inner', on='cust_no')
97 | train_label_4 = pd.merge(left=id4_data, right=train_label_4, how='inner', on='cust_no')
98 | #合并个人信息
99 | inf3_data = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\cust_info_q3.csv')
100 | inf4_data = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\cust_info_q4.csv')
101 | train_label_3 = pd.merge(left=inf3_data, right=train_label_3, how='inner', on='cust_no')
102 | train_label_4 = pd.merge(left=inf4_data, right=train_label_4, how='inner', on='cust_no')
103 |
104 |
105 |
106 | #第3季度信息提取
107 | for i in range(9,10):
108 | aum_3=pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\aum_m'+str(i)+'.csv')
109 |
110 |
111 | be_3 = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\behavior_m' + str(i) + '.csv')
112 |
113 |
114 | cun_3 = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\cunkuan_m' + str(i) + '.csv')
115 |
116 | fre_3=pd.merge(left=aum_3,right=be_3,how='inner', on='cust_no')
117 | fre_3=pd.merge(left=fre_3,right=cun_3,how='inner', on='cust_no')
118 | train_3=train_3.append(fre_3)
119 |
120 | train_fe3=pd.merge(left=fre_3,right=train_label_3,how='inner', on='cust_no')
121 |
122 | train_fe3.to_csv(r'E:\For_test2-10\data\厦门_data\train_feature\train3_fe_B7.csv',index=None)
123 |
124 | #第4季度信息提取
125 | for i in range(12,13):
126 | aum_4=pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\aum_m'+str(i)+'.csv')
127 |
128 |
129 | be_4 = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\behavior_m' + str(i) + '.csv')
130 |
131 |
132 | cun_4 = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\cunkuan_m' + str(i) + '.csv')
133 |
134 | fre_4=pd.merge(left=aum_4,right=be_4,how='inner', on='cust_no')
135 | fre_4=pd.merge(left=fre_4,right=cun_4,how='inner', on='cust_no')
136 | train_3=train_3.append(fre_4)
137 |
138 | train_fe4=pd.merge(left=fre_4,right=train_label_4,how='inner', on='cust_no')
139 |
140 | train_fe4.to_csv(r'E:\For_test2-10\data\厦门_data\train_feature\train4_fe_B7.csv',index=None)
141 |
142 | train_B7=[train_fe3,train_fe4]
143 | train_B7=pd.concat(train_B7)
144 |
145 | test = pd.DataFrame()
146 | idtest_data = pd.read_csv(r'E:\For_test2-10\data\厦门_data\test_feature\cust_avli_Q1.csv')
147 | inftest_data = pd.read_csv(r'E:\For_test2-10\data\厦门_data\test_feature\cust_info_q1.csv')
148 | test_inf = pd.merge(left=inftest_data, right=idtest_data, how='inner', on='cust_no')
149 | # 第3季度信息提取
150 | for i in range(3, 4):
151 | aum = pd.read_csv(r'E:\For_test2-10\data\厦门_data\test_feature\aum_m' + str(i) + '.csv')
152 |
153 | be = pd.read_csv(r'E:\For_test2-10\data\厦门_data\test_feature\behavior_m' + str(i) + '.csv')
154 |
155 | cun = pd.read_csv(r'E:\For_test2-10\data\厦门_data\test_feature\cunkuan_m' + str(i) + '.csv')
156 |
157 | fre = pd.merge(left=aum, right=be, how='inner', on='cust_no')
158 | fre = pd.merge(left=fre, right=cun, how='inner', on='cust_no')
159 | test = test.append(fre)
160 |
161 | test_fe = pd.merge(left=test, right=test_inf, how='inner', on='cust_no')
162 | test_fe.to_csv(r'E:\For_test2-10\data\厦门_data\train_feature\test_fe_B7.csv', index=None)
163 |
164 | test_B7=test_fe.dropna(axis=1, how='any')
165 | train_B7=train_B7.dropna(axis=1, how='any')
166 | print(test_B7)
167 | print(train_B7)
168 |
169 | # Label Encoding
170 | le = LabelEncoder()
171 | train_B7['I3'] = le.fit_transform(train_B7['I3'].astype(str))
172 | test_B7['I3'] = le.transform(test_B7['I3'].astype(str))
173 | le = LabelEncoder()
174 | train_B7['I8'] = le.fit_transform(train_B7['I8'].astype(str))
175 | test_B7['I8'] = le.transform(test_B7['I8'].astype(str))
176 | le = LabelEncoder()
177 | train_B7['I12'] = le.fit_transform(train_B7['I12'].astype(str))
178 | test_B7['I12'] = le.transform(test_B7['I12'].astype(str))
179 |
180 | predictionsB4 = pd.DataFrame()
181 |
182 |
183 | predictionsB7 = pd.DataFrame()
184 | scoresB7 = list()
185 |
186 | for eve_id in tqdm(test_B7.I3.unique()):
187 | prediction,score= run_lgb_id(train_B7, test_B7, target='label', eve_id=eve_id)
188 | predictionsB7=predictionsB7.append(prediction)
189 | scoresB7.append(score)
190 | print(np.mean(scoresB7))
191 | predictionsB7['label'] = predictionsB7['label'].apply(np.round)
192 | predictionsB7['label'] = predictionsB7['label'].apply(lambda x: -1 if x<-1 else x)
193 | predictionsB7['label'] = predictionsB7['label'].apply(lambda x: 1 if x>1 else x)
194 | predictionsB7['label'] = predictionsB7['label'].astype(int)
195 |
196 |
197 | #没找到这三个cust_no。摸奖。
198 | low=pd.DataFrame()
199 | low['cust_no']=['0xb2d0afb2', '0xb2d2ed87', '0xb2d2d9d2']
200 | low['label']=[0,0,0]
201 |
202 | print(low)
203 | predictionsB7=predictionsB7.append(low)
204 |
205 | prediction=[predictionsB4,predictionsB7]
206 | prediction=pd.concat(prediction)
207 | prediction.to_csv('sub_10_30.csv',index=None)
208 |
209 |
--------------------------------------------------------------------------------
/competitions/tianchi_car_sale/final/lgb.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "ExecuteTime": {
8 | "end_time": "2020-04-05T10:44:26.631356Z",
9 | "start_time": "2020-04-05T10:44:26.046611Z"
10 | }
11 | },
12 | "outputs": [],
13 | "source": [
14 | "import os\n",
15 | "from sklearn.preprocessing import StandardScaler\n",
16 | "from sklearn.metrics import cohen_kappa_score, accuracy_score, mean_absolute_error, f1_score\n",
17 | "from sklearn.model_selection import GroupKFold, KFold, StratifiedKFold\n",
18 | "from tqdm import tqdm\n",
19 | "import lightgbm as lgb\n",
20 | "import numpy as np\n",
21 | "import pandas as pd\n",
22 | "import math\n",
23 | "import matplotlib.pyplot as plt\n",
24 | "import seaborn as sns\n",
25 | "import warnings\n",
26 | "import gc\n",
27 | "from datetime import datetime\n",
28 | "import itertools\n",
29 | "\n",
30 | "warnings.filterwarnings('ignore')\n",
31 | "pd.set_option('display.max_columns', None)\n",
32 | "pd.set_option('display.max_rows', None)"
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {
39 | "ExecuteTime": {
40 | "end_time": "2020-04-05T10:44:26.635381Z",
41 | "start_time": "2020-04-05T10:44:26.633087Z"
42 | }
43 | },
44 | "outputs": [],
45 | "source": [
46 | "seed = 2020"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": null,
52 | "metadata": {
53 | "ExecuteTime": {
54 | "end_time": "2020-04-05T10:44:27.092446Z",
55 | "start_time": "2020-04-05T10:44:26.636754Z"
56 | }
57 | },
58 | "outputs": [],
59 | "source": [
60 | "df_feature = pd.read_pickle('feature.pickle')"
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": null,
66 | "metadata": {
67 | "ExecuteTime": {
68 | "end_time": "2020-04-05T10:44:27.652525Z",
69 | "start_time": "2020-04-05T10:44:27.093840Z"
70 | }
71 | },
72 | "outputs": [],
73 | "source": [
74 | "from sklearn.preprocessing import LabelEncoder\n",
75 | "for f in tqdm(df_feature.select_dtypes('object')):\n",
76 | " lbl = LabelEncoder()\n",
77 | " df_feature[f] = lbl.fit_transform(df_feature[f].astype(str))"
78 | ]
79 | },
80 | {
81 | "cell_type": "code",
82 | "execution_count": null,
83 | "metadata": {
84 | "ExecuteTime": {
85 | "end_time": "2020-04-05T10:44:28.142152Z",
86 | "start_time": "2020-04-05T10:44:27.653809Z"
87 | }
88 | },
89 | "outputs": [],
90 | "source": [
91 | "df_test = df_feature[df_feature['price'].isnull()].copy()\n",
92 | "df_train = df_feature[df_feature['price'].notnull()].copy()"
93 | ]
94 | },
95 | {
96 | "cell_type": "code",
97 | "execution_count": null,
98 | "metadata": {
99 | "ExecuteTime": {
100 | "end_time": "2020-04-05T10:54:33.965679Z",
101 | "start_time": "2020-04-05T10:44:29.669809Z"
102 | },
103 | "scrolled": true
104 | },
105 | "outputs": [],
106 | "source": [
107 | "ycol = 'price'\n",
108 | "feature_names = list(\n",
109 | " filter(lambda x: x not in [ycol, 'SaleID', 'regDate', 'creatDate', 'creatDate_year', 'creatDate_month'], df_train.columns))\n",
110 | "\n",
111 | "\n",
112 | "# 0.08\n",
113 | "model = lgb.LGBMRegressor(num_leaves=64,\n",
114 | " max_depth=8,\n",
115 | " learning_rate=0.08,\n",
116 | " n_estimators=10000000,\n",
117 | " subsample=0.75,\n",
118 | " feature_fraction=0.75,\n",
119 | " reg_alpha=0.7,\n",
120 | " reg_lambda=1.2,\n",
121 | " random_state=seed,\n",
122 | " metric=None\n",
123 | " )\n",
124 | "\n",
125 | "oof = []\n",
126 | "prediction = df_test[['SaleID']]\n",
127 | "prediction['price'] = 0\n",
128 | "df_importance_list = []\n",
129 | "\n",
130 | "kfold = KFold(n_splits=5, shuffle=False, random_state=seed)\n",
131 | "for fold_id, (trn_idx, val_idx) in enumerate(kfold.split(df_train[feature_names])):\n",
132 | " X_train = df_train.iloc[trn_idx][feature_names]\n",
133 | " Y_train = df_train.iloc[trn_idx][ycol]\n",
134 | "\n",
135 | " X_val = df_train.iloc[val_idx][feature_names]\n",
136 | " Y_val = df_train.iloc[val_idx][ycol]\n",
137 | "\n",
138 | " print('\\nFold_{} Training ================================\\n'.format(fold_id+1))\n",
139 | "\n",
140 | " lgb_model = model.fit(X_train,\n",
141 | " Y_train,\n",
142 | " eval_names=['train', 'valid'],\n",
143 | " eval_set=[(X_train, Y_train), (X_val, Y_val)],\n",
144 | " verbose=500,\n",
145 | " eval_metric='mae',\n",
146 | " early_stopping_rounds=500)\n",
147 | "\n",
148 | " pred_val = lgb_model.predict(\n",
149 | " X_val, num_iteration=lgb_model.best_iteration_)\n",
150 | " df_oof = df_train.iloc[val_idx][['SaleID', ycol]].copy()\n",
151 | " df_oof['pred'] = pred_val\n",
152 | " oof.append(df_oof)\n",
153 | "\n",
154 | " pred_test = lgb_model.predict(\n",
155 | " df_test[feature_names], num_iteration=lgb_model.best_iteration_)\n",
156 | " prediction['price'] += pred_test / 5\n",
157 | "\n",
158 | " df_importance = pd.DataFrame({\n",
159 | " 'column': feature_names,\n",
160 | " 'importance': lgb_model.feature_importances_,\n",
161 | " })\n",
162 | " df_importance_list.append(df_importance)\n",
163 | "\n",
164 | " del lgb_model, pred_val, pred_test, X_train, Y_train, X_val, Y_val\n",
165 | " gc.collect()"
166 | ]
167 | },
168 | {
169 | "cell_type": "code",
170 | "execution_count": null,
171 | "metadata": {
172 | "ExecuteTime": {
173 | "end_time": "2020-04-05T10:54:33.969199Z",
174 | "start_time": "2020-04-05T10:54:33.966991Z"
175 | }
176 | },
177 | "outputs": [],
178 | "source": [
179 | "# 0.107853\n",
180 | "# 0.106296\n",
181 | "# 0.107481\n",
182 | "# 0.106911\n",
183 | "# 0.106629"
184 | ]
185 | },
186 | {
187 | "cell_type": "code",
188 | "execution_count": null,
189 | "metadata": {
190 | "ExecuteTime": {
191 | "end_time": "2020-04-05T10:54:34.001089Z",
192 | "start_time": "2020-04-05T10:54:33.970253Z"
193 | },
194 | "scrolled": true
195 | },
196 | "outputs": [],
197 | "source": [
198 | "df_importance = pd.concat(df_importance_list)\n",
199 | "df_importance = df_importance.groupby(['column'])['importance'].agg(\n",
200 | " 'mean').sort_values(ascending=False).reset_index()\n",
201 | "df_importance"
202 | ]
203 | },
204 | {
205 | "cell_type": "code",
206 | "execution_count": null,
207 | "metadata": {
208 | "ExecuteTime": {
209 | "end_time": "2020-04-05T10:54:34.561521Z",
210 | "start_time": "2020-04-05T10:54:34.002228Z"
211 | },
212 | "scrolled": true
213 | },
214 | "outputs": [],
215 | "source": [
216 | "df_oof = pd.concat(oof)\n",
217 | "df_oof[ycol] = np.expm1(df_oof[ycol])\n",
218 | "df_oof['pred'] = np.expm1(df_oof['pred'])\n",
219 | "mae = mean_absolute_error(df_oof[ycol], df_oof['pred'])\n",
220 | "print('mae:', mae)\n",
221 | "df_oof.to_csv('lgb_oof.csv'.format(mae), index=False, encoding='utf-8')"
222 | ]
223 | },
224 | {
225 | "cell_type": "code",
226 | "execution_count": null,
227 | "metadata": {
228 | "ExecuteTime": {
229 | "end_time": "2020-04-05T10:54:34.804380Z",
230 | "start_time": "2020-04-05T10:54:34.562779Z"
231 | }
232 | },
233 | "outputs": [],
234 | "source": [
235 | "prediction['price'] = np.expm1(prediction['price'])\n",
236 | "sub = prediction.copy(deep=True)\n",
237 | "sub.to_csv('sub/lgb_{}.csv'.format(mae), index=False, encoding='utf-8')\n",
238 | "sub.to_csv('lgb.csv'.format(mae), index=False, encoding='utf-8')"
239 | ]
240 | },
241 | {
242 | "cell_type": "code",
243 | "execution_count": null,
244 | "metadata": {
245 | "ExecuteTime": {
246 | "end_time": "2020-04-05T10:54:34.811447Z",
247 | "start_time": "2020-04-05T10:54:34.805703Z"
248 | }
249 | },
250 | "outputs": [],
251 | "source": [
252 | "sub.head()"
253 | ]
254 | },
255 | {
256 | "cell_type": "code",
257 | "execution_count": null,
258 | "metadata": {
259 | "ExecuteTime": {
260 | "end_time": "2020-04-05T10:54:34.915432Z",
261 | "start_time": "2020-04-05T10:54:34.812561Z"
262 | }
263 | },
264 | "outputs": [],
265 | "source": [
266 | "# 5930.6270\n",
267 | "sub['price'].mean()"
268 | ]
269 | }
270 | ],
271 | "metadata": {
272 | "kernelspec": {
273 | "display_name": "Python [conda env:dm] *",
274 | "language": "python",
275 | "name": "conda-env-dm-py"
276 | },
277 | "language_info": {
278 | "codemirror_mode": {
279 | "name": "ipython",
280 | "version": 3
281 | },
282 | "file_extension": ".py",
283 | "mimetype": "text/x-python",
284 | "name": "python",
285 | "nbconvert_exporter": "python",
286 | "pygments_lexer": "ipython3",
287 | "version": "3.6.9"
288 | }
289 | },
290 | "nbformat": 4,
291 | "nbformat_minor": 2
292 | }
293 |
--------------------------------------------------------------------------------
/competitions/kesci_datajoy_airbnb/README.md:
--------------------------------------------------------------------------------
1 | # 结构化比赛: 如何在比赛初期建立一个稳健的 baseline
2 |
3 | 一般的结构化比赛,都需要在特征工程上耗费 90% 以上的时间。
4 |
5 | 本文从实战的角度出发,以一个实际的数据比赛例子为例,实验和总结了一些特征工程的常用套路,帮助大家在比赛初期就能建立起一个较为健壮的基线模型。
6 |
7 | ### 实战数据
8 |
9 | 本文使用的数据为和鲸社区和 ChallengeHub 共同举办的 DataJoy 新人赛:预测分析·民宿价格预测
10 |
11 | 链接:https://www.heywhale.com/home/competition/605c426d21e3f6003b56a920/content
12 |
13 | 数据应该是从 Airbnb 上收集而来,归纳为单表数据。从提交的情况来看,线上线下比较一致。
14 |
15 | 训练集信息如下:
16 |
17 | 
18 |
19 | 比赛的目标是预测民宿的价格,是一个回归类型的比赛(优化 RMSE)。
20 |
21 | 大致情况了解之后,我们就可以着手搭建基线模型了。
22 |
23 | ### 裸数据基线
24 |
25 | 首先是完全不做特征工程,做一些简单的数据预处理后,直接塞入 LGBM 多折模型(固定折数和参数),看下效果。
26 |
27 | 这里的数据预处理,只做了部分数据的清洗、缺失值的填充(不填充也可以),和 Object 类型特征的 LabelEncoding:
28 |
29 | ```
30 | df_features['洗手间数量'].fillna(-1, inplace=True)
31 | df_features['床的数量'].fillna(-1, inplace=True)
32 | df_features['卧室数量'].fillna(-1, inplace=True)
33 | df_features['房主是否有个人资料图片'].fillna('na', inplace=True)
34 | df_features['房主身份是否验证'].fillna('na', inplace=True)
35 | df_features['房主回复率'].fillna('-1', inplace=True)
36 | df_features['房主回复率'] = df_features['房主回复率'].astype(str).apply(lambda x: x.replace('%', ''))
37 | df_features['房主回复率'] = df_features['房主回复率'].astype(int)
38 | df_features['民宿周边'].fillna('na', inplace=True)
39 | mean_score = df_features['民宿评分'].mean()
40 | df_features['民宿评分'].fillna(mean_score, inplace=True)
41 | df_features['邮编'].fillna('na', inplace=True)
42 |
43 | for feat in ['房主是否有个人资料图片', '房主身份是否验证', '民宿周边', '邮编']:
44 | lbl = LabelEncoder()
45 | lbl.fit(df_features[feat])
46 | df_features[feat] = lbl.transform(df_features[feat])
47 | ```
48 |
49 | 线下 RMSE 得分:5.5084
50 |
51 | ### Frequency Encoding
52 |
53 | 接下来我们考虑类别特征的 encoding 处理。
54 |
55 | 频率编码,或者也可以称为 Counter 编码,通过计算特征变量中每个值的出现次数来表示该特征的信息,是比赛中最常用的 encoding 方式。
56 |
57 | 有些特征是数值型特征,但如果 value_counts 比较少,其实也是可以当成类别特征来做频率编码。
58 |
59 | ```
60 | def freq_enc(df, col):
61 | vc = df[col].value_counts(dropna=True, normalize=True).to_dict()
62 | df[f'{col}_freq'] = df[col].map(vc)
63 | return df
64 |
65 | for feat in ['容纳人数', '洗手间数量', '床的数量', '床的类型',
66 | '卧室数量', '取消条款', '所在城市', '清洁费',
67 | '房主是否有个人资料图片', '房主回复率', '是否支持随即预订',
68 | '民宿周边', '房产类型', '房型', '邮编']:
69 | df_features = freq_enc(df_features, feat)
70 | ```
71 |
72 | 增加了频率编码特征后,线下 RMSE 得分:5.5114
73 |
74 | 可以看到分数变差了,但变化不算太大,我们先留着。
75 |
76 | ### Target Encoding
77 |
78 | 目标编码的原理这里不多说了(其实我也说不太明白),通常为了避免过拟合,采用 K-Fold Target Encoding 的方式将类别特征进行编码生成特征。
79 |
80 | ```
81 | # Target Encoding
82 |
83 | def stat(df, df_merge, group_by, agg):
84 | group = df.groupby(group_by).agg(agg)
85 |
86 | columns = []
87 | for on, methods in agg.items():
88 | for method in methods:
89 | columns.append('{}_{}_{}'.format('_'.join(group_by), on, method))
90 | group.columns = columns
91 | group.reset_index(inplace=True)
92 | df_merge = df_merge.merge(group, on=group_by, how='left')
93 |
94 | del (group)
95 | gc.collect()
96 | return df_merge
97 |
98 |
99 | def statis_feat(df_know, df_unknow):
100 | df_unknow = stat(df_know, df_unknow, ['民宿评分'], {'价格': ['mean', 'std', 'max']})
101 | df_unknow = stat(df_know, df_unknow, ['邮编'], {'价格': ['mean', 'std', 'max']})
102 |
103 | return df_unknow
104 |
105 |
106 | # 5折交叉
107 | df_train = df_features[~df_features['价格'].isnull()]
108 | df_train = df_train.reset_index(drop=True)
109 | df_test = df_features[df_features['价格'].isnull()]
110 |
111 | df_stas_feat = None
112 | kf = KFold(n_splits=5, random_state=2021, shuffle=True)
113 | for train_index, val_index in kf.split(df_train):
114 | df_fold_train = df_train.iloc[train_index]
115 | df_fold_val = df_train.iloc[val_index]
116 |
117 | df_fold_val = statis_feat(df_fold_train, df_fold_val)
118 | df_stas_feat = pd.concat([df_stas_feat, df_fold_val], axis=0)
119 |
120 | del(df_fold_train)
121 | del(df_fold_val)
122 | gc.collect()
123 |
124 | df_test = statis_feat(df_train, df_test)
125 | df_features = pd.concat([df_stas_feat, df_test], axis=0)
126 |
127 | del(df_stas_feat)
128 | del(df_train)
129 | del(df_test)
130 | gc.collect()
131 | ```
132 |
133 | 简单地增加了两组 target encoding 后,线下 RMSE 得分:5.5472
134 |
135 | 分数变差了,而且变差的幅度较大,所以我们暂时先去掉 target encoding。
136 |
137 | ### 统计特征
138 |
139 | 对于数值型的特征,我们对其进行 groupby 类别特征下的统计特征。
140 |
141 | 这里非常方便地可以制造出成千上万个特征,也就是传说中的「一把梭」,但是同时也会造成特征筛选的困难。
142 |
143 | ```
144 | def brute_force(df, features, groups):
145 | for method in tqdm(['max', 'min', 'mean', 'median', 'std']):
146 | for feature in features:
147 | for group in groups:
148 | df[f'{group}_{feature}_{method}'] = df.groupby(group)[feature].transform(method)
149 |
150 | return df
151 |
152 | dense_feats = ['容纳人数', '洗手间数量', '床的数量', '卧室数量',
153 | '房主回复率', '评论个数', '民宿评分']
154 | cate_feats = ['取消条款', '床的类型', '房产类型', '房型', '邮编']
155 |
156 | df_features = brute_force(df_features, dense_feats, cate_feats)
157 | ```
158 |
159 | 经过简单的特征筛选后(通过 importance 逐次排序 gain = 0 的特征),线下 RMSE 得分:5.5264,效果也不是很好,先不采用。
160 |
161 | ### 交叉特征
162 |
163 | 交叉特征主要是两两(或者更多)特征之间组合出来的衍生特征,通常会需要一些专业领域的知识(当然也可以一把梭)。
164 |
165 | ```
166 | df_features['人均床数量'] = df_features['容纳人数'] / (df_features['床的数量'] + 1e-3) # 1e-3 是为了避免 zero-divide
167 | df_features['人均卧室量'] = df_features['容纳人数'] / (df_features['卧室数量'] + 1e-3)
168 | df_features['卧室床均量'] = df_features['床的数量'] / (df_features['卧室数量'] + 1e-3)
169 | df_features['经纬度平方根'] = (df_features['维度']*df_features['维度'] + df_features['经度']*df_features['经度'])**.5
170 | ```
171 |
172 | 增加了这四个交叉特征后,线下 RMSE 得分:5.5054。
173 |
174 | 类别特征两两组合可以产生更为细化的类别特征,数值特征两两组合(加减乘除)可以产生更多的数值特征;而这些衍生特征又可以上面的 encoding 和统计等方法产生更多的特征。
175 |
176 | ### 时间特征
177 |
178 | 一般是求时间差等交叉衍生特征。
179 |
180 | ```
181 | # 时间特征处理
182 |
183 | df_features['首次评论日期'] = pd.to_datetime(df_features['首次评论日期']).values.astype(np.int64) // 10 ** 9
184 | df_features['何时成为房主'] = pd.to_datetime(df_features['何时成为房主']).values.astype(np.int64) // 10 ** 9
185 | df_features['最近评论日期'] = pd.to_datetime(df_features['最近评论日期']).values.astype(np.int64) // 10 ** 9
186 |
187 | df_features['timestamp_diff1'] = df_features['首次评论日期'] - df_features['何时成为房主']
188 | df_features['timestamp_diff2'] = df_features['最近评论日期'] - df_features['首次评论日期']
189 | df_features['timestamp_diff3'] = df_features['最近评论日期'] - df_features['何时成为房主']
190 | ```
191 |
192 | 增加了这三个时间衍生特征后,线下 RMSE 得分:5.4703。
193 |
194 | 对这些时间衍生特征再做统计特征:
195 |
196 | ```
197 | dense_feats = ['timestamp_diff1', 'timestamp_diff2', 'timestamp_diff3']
198 | cate_feats = ['房型']
199 |
200 | df_features = brute_force(df_features, dense_feats, cate_feats)
201 | ```
202 |
203 | 线下 RMSE 进一步优化到 5.4680。
204 |
205 | ### 文本编码
206 |
207 | 我们可以注意到,在结构化的数据中,其实也有非结构化的字段,如这份数据中的「便利设施」字段,就是个典型的例子:
208 |
209 | 
210 |
211 | 具体的样本如:
212 |
213 | ```
214 | '{TV,"Cable TV",Internet,"Wireless Internet","Air conditioning",Kitchen,"Pets live on this property",Dog(s),"Hot tub",Heating,Washer,Dryer,"Smoke detector","Fire extinguisher",Essentials,"translation missing: en.hosting_amenity_49","translation missing: en.hosting_amenity_50"}'
215 | ```
216 |
217 | 我们可以先清洗下文本,让它形成 word / sentence 的形式,方便我们做 TF-IDF 编码:
218 |
219 | ```
220 | df_features['便利设施'] = df_features['便利设施'].apply(
221 | lambda x: x.replace('{', '').replace('}', '').replace('"', '').replace(':', '').replace(',', ' '))
222 | # df_features['便利设施'] = df_features['便利设施'].str.lower()
223 |
224 | n_components = 12
225 |
226 | X = list(df_features['便利设施'].values)
227 | tfv = TfidfVectorizer(ngram_range=(1,1), max_features=10000)
228 | tfv.fit(X)
229 | X_tfidf = tfv.transform(X)
230 | svd = TruncatedSVD(n_components= n_components)
231 | svd.fit(X_tfidf)
232 | X_svd = svd.transform(X_tfidf)
233 |
234 | for i in range(n_components):
235 | df_features[f'便利设施_tfidf_{i}'] = X_svd[:, i]
236 | ```
237 |
238 | 增加了这 12 个 TF-IDF 编码特征后,线下 RMSE 得分:5.4343。
239 |
240 | 除此之外,我们还可以做 w2v 特征,不过因为我们做 w2v 特征是基于 word 的,因此需要对 w2v 再做如 mean/max 等统计特征,得到一整个 sentence 的特征,代码如下:
241 |
242 | ```
243 | emb_size = 4
244 | sentences = df_features['便利设施'].values.tolist()
245 |
246 | words = []
247 | for i in range(len(sentences)):
248 | sentences[i] = sentences[i].split()
249 | words += sentences[i]
250 |
251 | words = list(set(words))
252 |
253 | model = Word2Vec(sentences, size=emb_size, window=3,
254 | min_count=1, sg=0, hs=1, seed=2021)
255 |
256 | emb_matrix_mean = []
257 | emb_matrix_max = []
258 |
259 | for seq in sentences:
260 | vec = []
261 | for w in seq:
262 | if w in model:
263 | vec.append(model[w])
264 | if len(vec) > 0:
265 | emb_matrix_mean.append(np.mean(vec, axis=0))
266 | emb_matrix_max.append(np.max(vec, axis=0))
267 | else:
268 | emb_matrix_mean.append([0] * emb_size)
269 | emb_matrix_max.append([0] * emb_size)
270 |
271 | df_emb_mean = pd.DataFrame(emb_matrix_mean)
272 | df_emb_mean.columns = ['便利设施_w2v_mean_{}'.format(
273 | i) for i in range(emb_size)]
274 |
275 | df_emb_max = pd.DataFrame(emb_matrix_max)
276 | df_emb_max.columns = ['便利设施_w2v_max_{}'.format(
277 | i) for i in range(emb_size)]
278 |
279 | for i in range(emb_size):
280 | df_features[f'便利设施_w2v_mean_{i}'] = df_emb_mean[f'便利设施_w2v_mean_{i}']
281 | df_features[f'便利设施_w2v_max_{i}'] = df_emb_max[f'便利设施_w2v_max_{i}']
282 |
283 | df_features.head()
284 | ```
285 |
286 | 再加上这 8 个 w2v 编码特征后,线下 RMSE 分数有所下降:5.4436,因此暂不采用。
287 |
288 | ### 总结
289 |
290 | 至此,我们得到了一个线下 5.4343 的较为健壮的 baseline,提交到线上为 5.3394,截止到当前,该分数还能排在前十。
291 |
292 | 这是笔者在做结构化数据比赛时的基本套路,先用裸特征建立初步的基线;然后逐步添加类别特征的频率编码、目标编码;尝试数值特征的统计编码,接着在专业领域内思考下如何构造交叉特征;如果有时间特征,则做时间差等衍生特征;而对于结构化数据内的非结构文本数据,可以考虑 TF-IDF / w2v 等编码方式构造特征。
293 |
294 | 希望本文对大家在比赛初期能有所帮助。
295 |
296 |
297 | PS: 这个 notebook 版本是线下 5.4313,线上 5.3304
298 |
299 |
--------------------------------------------------------------------------------
/competitions/xiamen_international_bank_2020/README.md:
--------------------------------------------------------------------------------
1 | # 2020厦门国际银行数创金融杯建模大赛baseline分享
2 |
3 | 本 Baseline 来自 「酒心巧克力」的分享
4 |
5 | 成绩:0.34
6 |
7 | 比赛地址:https://www.dcjingsai.com/v2/cmptDetail.html?id=439&=76f6724e6fa9455a9b5ef44402c08653&ssoLoginpToken=&sso_global_session=e44c4d57-cd19-4ada-a1d3-a5250252bf86&sso_session=irjO90jPA0%205ytlVRkI1fA%3D%3D
8 |
9 | ## 赛题背景
10 |
11 | 在数字金融时代,大数据、人工智能技术在银行业内的发展日新月异,业内各机构都在加速数字化转型发展。厦门国际银行作为有特色的科技领先型中小银行,多年来始终坚持发挥数字金融科技力量,践行“数字赋能”理念,持续推进智慧风控、智慧营销、智慧运营、智慧管理,运用人工智能和大数据分析技术建立智能化客户服务模式和金融智慧营销服务体系,提升营销过程的智慧化、精准化水平,在为客户提供更贴心更具可用性的金融服务。
12 |
13 | 厦门国际银行联合厦门大学数据挖掘研究中心,为搭建一个行业交流平台,与社会各界精英共同探索机器学习和人工智能等热门技术问题,携手DataCastle数据城堡共同举办“2020第二届厦门国际银行“数创金融杯”建模大赛“。本届大赛以“金融+科技”为理念,着力于金融营销中的真实场景,总奖金达31万元。
14 |
15 | ## 任务
16 |
17 | 随着科技发展,银行陆续打造了线上线下、丰富多样的客户触点,来满足客户日常业务办理、渠道交易等客户需求。面对着大量的客户,银行需要更全面、准确地洞察客户需求。在实际业务开展过程中,需要发掘客户流失情况,对客户的资金变动情况预判;提前/及时针对客户进行营销,减少银行资金流失。本次竞赛提供实际业务场景中的客户行为和资产信息为建模对象,一方面希望能借此展现各参赛选手的数据挖掘实战能力,另一方面需要选手在复赛中结合建模的结果提出相应的营销解决方案,充分体现数据分析的价值。
18 |
19 | ## Label说明
20 |
21 | label -1 下降
22 |
23 | label 0 维稳
24 |
25 | label 1 提升
26 |
27 | ## 官方说明
28 |
29 | 客户贡献度主要和客户的aum值有关
30 |
31 | ## 评价函数KAPPA
32 |
33 | Kappa系数是一个用于一致性检验的指标,也可以用于衡量分类的效果。因为对于分类问题,所谓一致性就是模型预测结果和实际分类结果是否一致。kappa系数的计算是基于混淆矩阵的,取值为-1到1之间,通常大于0。
34 |
35 | 基于混淆矩阵的kappa系数计算公式如下:
36 |
37 | ![[公式]](https://www.zhihu.com/equation?tex=kappa+%3D+%5Cfrac%7Bp_o-p_e%7D%7B1-p_e%7D+)
38 |
39 | 其中:
40 |
41 | ![[公式]](https://www.zhihu.com/equation?tex=p_o+%3D+%5Cfrac+%7B%E5%AF%B9%E8%A7%92%E7%BA%BF%E5%85%83%E7%B4%A0%E4%B9%8B%E5%92%8C%7D%7B%E6%95%B4%E4%B8%AA%E7%9F%A9%E9%98%B5%E5%85%83%E7%B4%A0%E4%B9%8B%E5%92%8C%7D) ,**其实就是acc**。
42 |
43 | ![[公式]](https://www.zhihu.com/equation?tex=p_e+%3D+%5Cfrac%7B%5Csum_%7Bi%7D%7B%E7%AC%ACi%E8%A1%8C%E5%85%83%E7%B4%A0%E4%B9%8B%E5%92%8C+%2A+%E7%AC%ACi%E5%88%97%E5%85%83%E7%B4%A0%E4%B9%8B%E5%92%8C%7D%7D%7B%28%5Csum%7B%E7%9F%A9%E9%98%B5%E6%89%80%E6%9C%89%E5%85%83%E7%B4%A0%7D%29%5E2%7D+) ,即所有类别分别对应的“实际与预测数量的乘积”,之总和,除以“样本总数的平方”。
44 |
45 | 具体可以参考这个网址:
46 |
47 | https://zhuanlan.zhihu.com/p/67844308
48 |
49 | ## 数据介绍
50 |
51 | 参考比赛网址,任务与数据
52 |
53 | ## 方案
54 |
55 | 在观察数据后发现,每个季度的最后一个月都有B6,B7,B8。而对于测试集,他的最后一个月的cust_no包含了绝大部分cust_no,要预测客户未来的情况走向。采用的是最后一个季度的情况进行预测,其中以下三个cust_no经过合并特征后丢失了,前两个月的合并后也一样丢失了:['0xb2d0afb2', '0xb2d2ed87', '0xb2d2d9d2']。这里先把他们的label都设为0.丢掉了含有NAN的列。
56 |
57 | ```python
58 | #没找到这三个cust_no。摸奖。
59 | low=pd.DataFrame()
60 | low['cust_no']=['0xb2d0afb2', '0xb2d2ed87', '0xb2d2d9d2']
61 | low['label']=[0,0,0]
62 |
63 | ```
64 |
65 | ### 基础特征
66 |
67 | 目前并没有做什么特征,只是简单的对I3,I8,I12进行了encoder。然后以I3进行分组训练(客户等级),其它的就是全梭哈。
68 |
69 | ```python
70 | le = LabelEncoder()
71 | train_B7['I3'] = le.fit_transform(train_B7['I3'].astype(str))
72 | test_B7['I3'] = le.transform(test_B7['I3'].astype(str))
73 | le = LabelEncoder()
74 | train_B7['I8'] = le.fit_transform(train_B7['I8'].astype(str))
75 | test_B7['I8'] = le.transform(test_B7['I8'].astype(str))
76 | le = LabelEncoder()
77 | train_B7['I12'] = le.fit_transform(train_B7['I12'].astype(str))
78 | test_B7['I12'] = le.transform(test_B7['I12'].astype(str))
79 |
80 | predictionsB4 = pd.DataFrame()
81 |
82 |
83 | predictionsB7 = pd.DataFrame()
84 | scoresB7 = list()
85 |
86 | for eve_id in tqdm(test_B7.I3.unique()):
87 | prediction,score= run_lgb_id(train_B7, test_B7, target='label', eve_id=eve_id)
88 | predictionsB7=predictionsB7.append(prediction)
89 | scoresB7.append(score)
90 | ```
91 |
92 |
93 |
94 | ### 采用的数据
95 |
96 | 对训练集,只采用了9月份,12月份的数据,测试集也采用的是3月份数据
97 |
98 | ```python
99 | # 1.读取文件:
100 | train_label_3=pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_label\y_Q3_3.csv')
101 | train_label_4 = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_label\y_Q4_3.csv')
102 |
103 | train_3 = pd.DataFrame()
104 | train_4 = pd.DataFrame()
105 |
106 | id3_data = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\cust_avli_Q3.csv')
107 | id4_data = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\cust_avli_Q4.csv')
108 |
109 | #合并有效客户的label
110 | train_label_3 = pd.merge(left=id3_data, right=train_label_3, how='inner', on='cust_no')
111 | train_label_4 = pd.merge(left=id4_data, right=train_label_4, how='inner', on='cust_no')
112 | #合并个人信息
113 | inf3_data = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\cust_info_q3.csv')
114 | inf4_data = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\cust_info_q4.csv')
115 | train_label_3 = pd.merge(left=inf3_data, right=train_label_3, how='inner', on='cust_no')
116 | train_label_4 = pd.merge(left=inf4_data, right=train_label_4, how='inner', on='cust_no')
117 |
118 | #第3季度信息提取
119 | for i in range(9,10):
120 | aum_3=pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\aum_m'+str(i)+'.csv')
121 | be_3 = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\behavior_m' + str(i) + '.csv')
122 | cun_3 = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\cunkuan_m' + str(i) + '.csv')
123 | fre_3=pd.merge(left=aum_3,right=be_3,how='inner', on='cust_no')
124 | fre_3=pd.merge(left=fre_3,right=cun_3,how='inner', on='cust_no')
125 | train_3=train_3.append(fre_3)
126 |
127 | train_fe3=pd.merge(left=fre_3,right=train_label_3,how='inner', on='cust_no')
128 | train_fe3.to_csv(r'E:\For_test2-10\data\厦门_data\train_feature\train3_fe_B7.csv',index=None)
129 |
130 | #第4季度信息提取
131 | for i in range(12,13):
132 | aum_4=pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\aum_m'+str(i)+'.csv')
133 | be_4 = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\behavior_m' + str(i) + '.csv')
134 | cun_4 = pd.read_csv(r'E:\For_test2-10\data\厦门_data\train_feature\cunkuan_m' + str(i) + '.csv')
135 | fre_4=pd.merge(left=aum_4,right=be_4,how='inner', on='cust_no')
136 | fre_4=pd.merge(left=fre_4,right=cun_4,how='inner', on='cust_no')
137 | train_3=train_3.append(fre_4)
138 |
139 | train_fe4=pd.merge(left=fre_4,right=train_label_4,how='inner', on='cust_no')
140 | train_fe4.to_csv(r'E:\For_test2-10\data\厦门_data\train_feature\train4_fe_B7.csv',index=None)
141 |
142 | train_B7=[train_fe3,train_fe4]
143 | train_B7=pd.concat(train_B7)
144 |
145 | test = pd.DataFrame()
146 | idtest_data = pd.read_csv(r'E:\For_test2-10\data\厦门_data\test_feature\cust_avli_Q1.csv')
147 | inftest_data = pd.read_csv(r'E:\For_test2-10\data\厦门_data\test_feature\cust_info_q1.csv')
148 | test_inf = pd.merge(left=inftest_data, right=idtest_data, how='inner', on='cust_no')
149 | # 第3季度信息提取
150 | for i in range(3, 4):
151 | aum = pd.read_csv(r'E:\For_test2-10\data\厦门_data\test_feature\aum_m' + str(i) + '.csv')
152 | be = pd.read_csv(r'E:\For_test2-10\data\厦门_data\test_feature\behavior_m' + str(i) + '.csv')
153 | cun = pd.read_csv(r'E:\For_test2-10\data\厦门_data\test_feature\cunkuan_m' + str(i) + '.csv')
154 | fre = pd.merge(left=aum, right=be, how='inner', on='cust_no')
155 | fre = pd.merge(left=fre, right=cun, how='inner', on='cust_no')
156 | test = test.append(fre)
157 |
158 | test_fe = pd.merge(left=test, right=test_inf, how='inner', on='cust_no')
159 | test_fe.to_csv(r'E:\For_test2-10\data\厦门_data\train_feature\test_fe_B7.csv', index=None)
160 |
161 | test_B7=test_fe.dropna(axis=1, how='any')
162 | train_B7=train_B7.dropna(axis=1, how='any')
163 | ```
164 |
165 |
166 |
167 | ## 模型
168 |
169 | 采用的是LGB模型,5折交叉验证
170 |
171 | ```python
172 | def run_lgb_id(df_train, df_test, target, eve_id):
173 | feature_names = list(
174 | filter(lambda x: x not in ['label','cust_no'], df_train.columns))
175 |
176 |
177 | # 提取 eve_ID 对应的数据集
178 | df_train = df_train[df_train.I3 == eve_id]
179 | df_test = df_test[df_test.I3 == eve_id]
180 |
181 | model = lgb.LGBMRegressor(num_leaves=32,
182 | max_depth=6,
183 | learning_rate=0.08,
184 | n_estimators=10000,
185 | subsample=0.9,
186 | feature_fraction=0.8,
187 | reg_alpha=0.5,
188 | reg_lambda=0.8,
189 | random_state=2020)
190 | oof = []
191 | prediction = df_test[['cust_no']]
192 | prediction[target] = 0
193 |
194 | kfold = KFold(n_splits=5, random_state=2020)
195 | for fold_id, (trn_idx, val_idx) in enumerate(kfold.split(df_train, df_train[target])):
196 | X_train = df_train.iloc[trn_idx][feature_names]
197 | Y_train = df_train.iloc[trn_idx][target]
198 | X_val = df_train.iloc[val_idx][feature_names]
199 | Y_val = df_train.iloc[val_idx][target]
200 |
201 | lgb_model = model.fit(X_train,
202 | Y_train,
203 | eval_names=['train', 'valid'],
204 | eval_set=[(X_train, Y_train), (X_val, Y_val)],
205 | verbose=0,
206 | eval_metric='mse',
207 | early_stopping_rounds=20,
208 | )
209 |
210 | pred_val = lgb_model.predict(X_val, num_iteration=lgb_model.best_iteration_)
211 | df_oof = df_train.iloc[val_idx][[target, 'cust_no']].copy()
212 | df_oof['pred'] = pred_val
213 | oof.append(df_oof)
214 |
215 | pred_test = lgb_model.predict(df_test[feature_names], num_iteration=lgb_model.best_iteration_)
216 |
217 | prediction[target] += pred_test / kfold.n_splits
218 |
219 |
220 | del lgb_model, pred_val, pred_test, X_train, Y_train, X_val, Y_val
221 | gc.collect()
222 |
223 | df_oof = pd.concat(oof)
224 | score = mean_squared_error(df_oof[target], df_oof['pred'])
225 | print('MSE:', score)
226 |
227 | return prediction,score
228 |
229 | ```
230 |
231 | 最后采用的是MSE作为线下评价指标
232 |
233 | 大佬们可以修改一下
234 |
235 | 可能合并特征的时候把cust_no merge掉了哈哈
236 |
237 | 线上:0.34左右
238 |
239 | 代码很大程度上借鉴了恒佬分享的baseline
240 |
241 | 第一次分享baseline 不喜勿喷
242 |
243 | 谢谢大家
244 |
--------------------------------------------------------------------------------
/competitions/sohu2022_nlp_rec/Rec_deepfm.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "import warnings\n",
12 | "warnings.simplefilter('ignore')\n",
13 | "\n",
14 | "import os\n",
15 | "import re\n",
16 | "import gc\n",
17 | "\n",
18 | "import numpy as np\n",
19 | "import pandas as pd\n",
20 | "pd.set_option('max_columns', None)\n",
21 | "pd.set_option('max_rows', 200)\n",
22 | "\n",
23 | "from tqdm.notebook import tqdm\n",
24 | "\n",
25 | "from sklearn.preprocessing import LabelEncoder, MinMaxScaler, minmax_scale\n",
26 | "from sklearn.feature_extraction.text import TfidfVectorizer\n",
27 | "from sklearn.decomposition import TruncatedSVD\n",
28 | "from sklearn.metrics import auc, roc_auc_score\n",
29 | "\n",
30 | "from deepctr.models import DeepFM\n",
31 | "from deepctr.feature_column import SparseFeat, DenseFeat, get_feature_names"
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": null,
37 | "metadata": {},
38 | "outputs": [],
39 | "source": [
40 | "train_data = pd.read_csv('Sohu2022_data/rec_data/train-dataset.csv')\n",
41 | "test_data = pd.read_csv('Sohu2022_data/rec_data/test-dataset.csv')\n",
42 | "\n",
43 | "print(train_data.shape, test_data.shape)\n",
44 | "display(train_data.head())\n",
45 | "display(test_data.head())"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": null,
51 | "metadata": {},
52 | "outputs": [],
53 | "source": [
54 | "train_data.label.value_counts()"
55 | ]
56 | },
57 | {
58 | "cell_type": "code",
59 | "execution_count": null,
60 | "metadata": {},
61 | "outputs": [],
62 | "source": [
63 | "data = pd.concat([train_data, test_data])\n",
64 | "print(data.shape)"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": null,
70 | "metadata": {},
71 | "outputs": [],
72 | "source": [
73 | "senti_feats = pd.read_csv('senti_feats.csv')\n",
74 | "senti_feats"
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": null,
80 | "metadata": {},
81 | "outputs": [],
82 | "source": [
83 | "data = data.merge(senti_feats, left_on='itemId', right_on='id', how='left')\n",
84 | "data"
85 | ]
86 | },
87 | {
88 | "cell_type": "code",
89 | "execution_count": null,
90 | "metadata": {},
91 | "outputs": [],
92 | "source": [
93 | "data['entity_count'].isna().sum()"
94 | ]
95 | },
96 | {
97 | "cell_type": "code",
98 | "execution_count": null,
99 | "metadata": {
100 | "collapsed": true
101 | },
102 | "outputs": [],
103 | "source": [
104 | "for feat in ['pvId', 'suv', 'itemId', 'operator', 'browserType', \n",
105 | " 'deviceType', 'osType', 'province', 'city']:\n",
106 | " lbe = LabelEncoder()\n",
107 | " data[feat] = lbe.fit_transform(data[feat])"
108 | ]
109 | },
110 | {
111 | "cell_type": "code",
112 | "execution_count": null,
113 | "metadata": {
114 | "collapsed": true
115 | },
116 | "outputs": [],
117 | "source": [
118 | "# 造点统计特征\n",
119 | "\n",
120 | "data['pvid_count'] = data.groupby('pvId')['itemId'].transform('count')\n",
121 | "data['pvid_item_nunique'] = data.groupby('pvId')['itemId'].transform('nunique')\n",
122 | "data['pvid_suv_nunique'] = data.groupby('pvId')['suv'].transform('nunique')\n",
123 | "\n",
124 | "data['item_count'] = data.groupby('itemId')['itemId'].transform('count')\n",
125 | "data['item_suv_nunique'] = data.groupby('pvId')['suv'].transform('nunique')"
126 | ]
127 | },
128 | {
129 | "cell_type": "code",
130 | "execution_count": null,
131 | "metadata": {
132 | "collapsed": true
133 | },
134 | "outputs": [],
135 | "source": [
136 | "sparse_features = ['pvId', 'suv', 'itemId', 'operator', 'browserType', \n",
137 | " 'deviceType', 'osType', 'province', 'city']\n",
138 | "dense_features = ['pvid_count', 'pvid_item_nunique', 'pvid_suv_nunique',\n",
139 | " 'item_count', 'item_suv_nunique', \n",
140 | " 'senti_0_max', 'senti_0_min', 'senti_0_mean',\n",
141 | " 'senti_0_std', 'senti_1_max', 'senti_1_min', 'senti_1_mean',\n",
142 | " 'senti_1_std', 'senti_2_max', 'senti_2_min', 'senti_2_mean',\n",
143 | " 'senti_2_std', 'senti_3_max', 'senti_3_min', 'senti_3_mean',\n",
144 | " 'senti_3_std', 'senti_4_max', 'senti_4_min', 'senti_4_mean',\n",
145 | " 'senti_4_std', 'entity_count']\n",
146 | "\n",
147 | "target = ['label']"
148 | ]
149 | },
150 | {
151 | "cell_type": "code",
152 | "execution_count": null,
153 | "metadata": {
154 | "collapsed": true
155 | },
156 | "outputs": [],
157 | "source": [
158 | "data[dense_features] = data[dense_features].fillna(0, )"
159 | ]
160 | },
161 | {
162 | "cell_type": "code",
163 | "execution_count": null,
164 | "metadata": {},
165 | "outputs": [],
166 | "source": [
167 | "mms = MinMaxScaler(feature_range=(0, 1))\n",
168 | "data[dense_features] = mms.fit_transform(data[dense_features])"
169 | ]
170 | },
171 | {
172 | "cell_type": "code",
173 | "execution_count": null,
174 | "metadata": {
175 | "collapsed": true
176 | },
177 | "outputs": [],
178 | "source": [
179 | "fixlen_feature_columns = [SparseFeat(feat, vocabulary_size=data[feat].max() + 1, embedding_dim=16)\n",
180 | " for i, feat in enumerate(sparse_features)] + [DenseFeat(feat, 1, )\n",
181 | " for feat in dense_features]"
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": null,
187 | "metadata": {
188 | "collapsed": true
189 | },
190 | "outputs": [],
191 | "source": [
192 | "dnn_feature_columns = fixlen_feature_columns\n",
193 | "linear_feature_columns = fixlen_feature_columns\n",
194 | "\n",
195 | "feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)"
196 | ]
197 | },
198 | {
199 | "cell_type": "code",
200 | "execution_count": null,
201 | "metadata": {},
202 | "outputs": [],
203 | "source": [
204 | "data.drop(['userSeq', 'logTs'], axis=1, inplace=True)\n",
205 | "display(data.head())"
206 | ]
207 | },
208 | {
209 | "cell_type": "code",
210 | "execution_count": null,
211 | "metadata": {},
212 | "outputs": [],
213 | "source": [
214 | "train_data = data[data['label'].notna()]\n",
215 | "test_data = data[data['label'].isna()]\n",
216 | "\n",
217 | "train_data.drop(['testSampleId'], axis=1, inplace=True)\n",
218 | "test_data.drop(['sampleId', 'label'], axis=1, inplace=True)\n",
219 | "\n",
220 | "print(train_data.shape, test_data.shape)"
221 | ]
222 | },
223 | {
224 | "cell_type": "code",
225 | "execution_count": null,
226 | "metadata": {},
227 | "outputs": [],
228 | "source": [
229 | "del data; gc.collect()"
230 | ]
231 | },
232 | {
233 | "cell_type": "code",
234 | "execution_count": null,
235 | "metadata": {},
236 | "outputs": [],
237 | "source": [
238 | "display(train_data.head())\n",
239 | "display(test_data.head())"
240 | ]
241 | },
242 | {
243 | "cell_type": "code",
244 | "execution_count": null,
245 | "metadata": {},
246 | "outputs": [],
247 | "source": [
248 | "# 数据划分\n",
249 | "\n",
250 | "train = train_data[:int(train_data.shape[0]*0.8)]\n",
251 | "valid = train_data[int(train_data.shape[0]*0.8):]\n",
252 | "test = test_data.copy()\n",
253 | "\n",
254 | "train.shape, valid.shape, test.shape"
255 | ]
256 | },
257 | {
258 | "cell_type": "code",
259 | "execution_count": null,
260 | "metadata": {
261 | "collapsed": true
262 | },
263 | "outputs": [],
264 | "source": [
265 | "train_model_input = {name: train[name] for name in feature_names}\n",
266 | "valid_model_input = {name: valid[name] for name in feature_names}\n",
267 | "test_model_input = {name: test[name] for name in feature_names}"
268 | ]
269 | },
270 | {
271 | "cell_type": "code",
272 | "execution_count": null,
273 | "metadata": {},
274 | "outputs": [],
275 | "source": [
276 | "model = DeepFM(linear_feature_columns, dnn_feature_columns, task='binary')\n",
277 | "model.compile(\"adam\", \"binary_crossentropy\", metrics=['binary_crossentropy', 'accuracy'])"
278 | ]
279 | },
280 | {
281 | "cell_type": "code",
282 | "execution_count": null,
283 | "metadata": {},
284 | "outputs": [],
285 | "source": [
286 | "history = model.fit(train_model_input, train[target].values,\n",
287 | " batch_size=256, epochs=3, verbose=1, \n",
288 | " validation_data=(valid_model_input, valid[target].values))"
289 | ]
290 | },
291 | {
292 | "cell_type": "code",
293 | "execution_count": null,
294 | "metadata": {},
295 | "outputs": [],
296 | "source": [
297 | "pred_ans = model.predict(valid_model_input, batch_size=256)\n",
298 | "print(\"valid AUC\", round(roc_auc_score(valid[target].values, pred_ans), 4))"
299 | ]
300 | },
301 | {
302 | "cell_type": "code",
303 | "execution_count": null,
304 | "metadata": {
305 | "collapsed": true
306 | },
307 | "outputs": [],
308 | "source": [
309 | "pred_ans = model.predict(test_model_input, batch_size=256)\n",
310 | "sub = pd.read_csv('submission_orig/section2.txt', sep='\\t')\n",
311 | "sub['result'] = pred_ans\n",
312 | "sub.to_csv('section2.txt', sep='\\t', index=False)"
313 | ]
314 | },
315 | {
316 | "cell_type": "code",
317 | "execution_count": null,
318 | "metadata": {
319 | "collapsed": true
320 | },
321 | "outputs": [],
322 | "source": []
323 | }
324 | ],
325 | "metadata": {
326 | "kernelspec": {
327 | "display_name": "Python 3",
328 | "language": "python",
329 | "name": "python3"
330 | },
331 | "language_info": {
332 | "codemirror_mode": {
333 | "name": "ipython",
334 | "version": 3
335 | },
336 | "file_extension": ".py",
337 | "mimetype": "text/x-python",
338 | "name": "python",
339 | "nbconvert_exporter": "python",
340 | "pygments_lexer": "ipython3",
341 | "version": "3.6.3"
342 | }
343 | },
344 | "nbformat": 4,
345 | "nbformat_minor": 2
346 | }
347 |
--------------------------------------------------------------------------------
/competitions/iflytek_agriculture_ner/baseline.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import warnings\n",
10 | "warnings.simplefilter('ignore')\n",
11 | "\n",
12 | "import logging\n",
13 | "\n",
14 | "import re\n",
15 | "\n",
16 | "import pandas as pd\n",
17 | "pd.set_option('max_rows', 500)\n",
18 | "pd.set_option('max_colwidth', 100)\n",
19 | "\n",
20 | "from tqdm import tqdm\n",
21 | "tqdm.pandas()\n",
22 | "\n",
23 | "from simpletransformers.ner import NERModel, NERArgs"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {
30 | "scrolled": false
31 | },
32 | "outputs": [],
33 | "source": [
34 | "train = pd.read_csv('raw_data/train.csv')\n",
35 | "train.head(5)"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": null,
41 | "metadata": {},
42 | "outputs": [],
43 | "source": [
44 | "clean_medicine = {\n",
45 | " '甲霜锰锌': \"锰锌\",\n",
46 | " '烯酰锰锌': \"锰锌\",\n",
47 | " '霜脲锰锌': \"锰锌\",\n",
48 | " '恶霜锰锌': \"锰锌\",\n",
49 | " '春雷王铜': \"王铜\",\n",
50 | " '阿维哒螨灵': \"哒螨灵\",\n",
51 | " '苯甲丙环唑': \"丙环唑\",\n",
52 | "}"
53 | ]
54 | },
55 | {
56 | "cell_type": "code",
57 | "execution_count": null,
58 | "metadata": {},
59 | "outputs": [],
60 | "source": [
61 | "train_data = list()\n",
62 | "\n",
63 | "for i, row in tqdm(train.iterrows()):\n",
64 | " id_ = i\n",
65 | " text = re.sub(\"[-\\d\\.%·\\+,。%一+]\", \"\", row['text'])\n",
66 | " text = text.replace('多 /w', '多/w')\n",
67 | " for d in clean_medicine:\n",
68 | " text = text.replace(d, clean_medicine[d])\n",
69 | "\n",
70 | " for item in text.split():\n",
71 | " item = item.replace(' ', '')\n",
72 | " try:\n",
73 | " w, lbl = item.split('/')\n",
74 | " if lbl not in ['n_crop', 'n_disease', 'n_medicine']:\n",
75 | " for c in w:\n",
76 | " train_data.append([id_, c, 'O'])\n",
77 | " elif lbl == 'n_crop':\n",
78 | " if len(w) < 2:\n",
79 | " print(f\"word len < 2: {w}\")\n",
80 | " else:\n",
81 | " train_data.append([id_, w[0], 'B_crop'])\n",
82 | " if len(w) > 2:\n",
83 | " for c in w[1:-1]:\n",
84 | " train_data.append([id_, c, 'I_crop'])\n",
85 | " train_data.append([id_, w[-1], 'E_crop'])\n",
86 | " elif lbl == 'n_disease':\n",
87 | " if len(w) < 2:\n",
88 | " print(f\"word len < 2: {w}\")\n",
89 | " else:\n",
90 | " train_data.append([id_, w[0], 'B_disease'])\n",
91 | " if len(w) > 2:\n",
92 | " for c in w[1:-1]:\n",
93 | " train_data.append([id_, c, 'I_disease'])\n",
94 | " train_data.append([id_, w[-1], 'E_disease'])\n",
95 | " elif lbl == 'n_medicine':\n",
96 | " if len(w) < 2:\n",
97 | " print(f\"word len < 2: {w}\")\n",
98 | " else:\n",
99 | " train_data.append([id_, w[0], 'B_medicine'])\n",
100 | " if len(w) > 2:\n",
101 | " for c in w[1:-1]:\n",
102 | " train_data.append([id_, c, 'I_medicine'])\n",
103 | " train_data.append([id_, w[-1], 'E_medicine'])\n",
104 | " except:\n",
105 | " item = re.sub(r'/[a-z]+', '', item)\n",
106 | " for c in item:\n",
107 | " train_data.append([id_, c, 'O'])"
108 | ]
109 | },
110 | {
111 | "cell_type": "code",
112 | "execution_count": null,
113 | "metadata": {},
114 | "outputs": [],
115 | "source": [
116 | "train_data = pd.DataFrame(\n",
117 | " train_data, columns=[\"sentence_id\", \"words\", \"labels\"]\n",
118 | ")\n",
119 | "\n",
120 | "train_data.head()"
121 | ]
122 | },
123 | {
124 | "cell_type": "code",
125 | "execution_count": null,
126 | "metadata": {},
127 | "outputs": [],
128 | "source": [
129 | "train_data.labels.value_counts()"
130 | ]
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": null,
135 | "metadata": {},
136 | "outputs": [],
137 | "source": [
138 | "labels = [\n",
139 | " 'B_crop',\n",
140 | " 'I_crop',\n",
141 | " 'E_crop',\n",
142 | " 'B_disease',\n",
143 | " 'I_disease',\n",
144 | " 'E_disease',\n",
145 | " 'B_medicine',\n",
146 | " 'I_medicine',\n",
147 | " 'E_medicine',\n",
148 | " 'O'\n",
149 | "]"
150 | ]
151 | },
152 | {
153 | "cell_type": "code",
154 | "execution_count": null,
155 | "metadata": {},
156 | "outputs": [],
157 | "source": [
158 | "eval_data = train_data[train_data['sentence_id'] >= len(train)-300]\n",
159 | "eval_data.head()"
160 | ]
161 | },
162 | {
163 | "cell_type": "code",
164 | "execution_count": null,
165 | "metadata": {},
166 | "outputs": [],
167 | "source": [
168 | "train_data = train_data[train_data['sentence_id'] < len(train)-300]\n",
169 | "\n",
170 | "train_data.shape, eval_data.shape"
171 | ]
172 | },
173 | {
174 | "cell_type": "code",
175 | "execution_count": null,
176 | "metadata": {},
177 | "outputs": [],
178 | "source": [
179 | "model_args = NERArgs()\n",
180 | "model_args.train_batch_size = 8\n",
181 | "model_args.num_train_epochs = 5\n",
182 | "model_args.fp16 = False\n",
183 | "model_args.evaluate_during_training = True"
184 | ]
185 | },
186 | {
187 | "cell_type": "code",
188 | "execution_count": null,
189 | "metadata": {},
190 | "outputs": [],
191 | "source": [
192 | "model = NERModel(\"bert\", \n",
193 | " \"hfl/chinese-bert-wwm-ext\",\n",
194 | " labels=labels,\n",
195 | " args=model_args)"
196 | ]
197 | },
198 | {
199 | "cell_type": "code",
200 | "execution_count": null,
201 | "metadata": {},
202 | "outputs": [],
203 | "source": [
204 | "model.train_model(train_data, eval_data=eval_data)"
205 | ]
206 | },
207 | {
208 | "cell_type": "code",
209 | "execution_count": null,
210 | "metadata": {},
211 | "outputs": [],
212 | "source": [
213 | "result, model_outputs, preds_list = model.eval_model(eval_data)\n",
214 | "result"
215 | ]
216 | },
217 | {
218 | "cell_type": "code",
219 | "execution_count": null,
220 | "metadata": {},
221 | "outputs": [],
222 | "source": [
223 | "test = pd.read_csv('raw_data/test.csv')\n",
224 | "test.head()"
225 | ]
226 | },
227 | {
228 | "cell_type": "code",
229 | "execution_count": null,
230 | "metadata": {},
231 | "outputs": [],
232 | "source": [
233 | "test.shape"
234 | ]
235 | },
236 | {
237 | "cell_type": "code",
238 | "execution_count": null,
239 | "metadata": {},
240 | "outputs": [],
241 | "source": [
242 | "test_data = list()\n",
243 | "\n",
244 | "for i, row in tqdm(test.iterrows()):\n",
245 | " id_ = i\n",
246 | " text = re.sub(\"[-\\d\\.%·\\+,。%一+ ]\", \"\", row['text'])\n",
247 | " text = text.replace('多 /w', '多/w')\n",
248 | " for d in clean_medicine:\n",
249 | " text = text.replace(d, clean_medicine[d])\n",
250 | " \n",
251 | " preds, _ = model.predict([text], split_on_space=False)\n",
252 | "\n",
253 | " n_crop = list()\n",
254 | " n_disease = list()\n",
255 | " n_medicine = list()\n",
256 | " \n",
257 | " new_li = list()\n",
258 | " for i in preds[0]:\n",
259 | " for ch, lb in i.items():\n",
260 | " new_li.append([ch, lb])\n",
261 | " \n",
262 | " max_ = len(new_li)\n",
263 | " for i in range(max_):\n",
264 | " w = list()\n",
265 | " ch1, lb1 = new_li[i]\n",
266 | " if lb1 == 'B_crop':\n",
267 | " w.append(ch1)\n",
268 | " for j in range(i+1, max_):\n",
269 | " ch2, lb2 = new_li[j]\n",
270 | " if lb2 == 'I_crop' or lb2 == 'O':\n",
271 | " w.append(ch2)\n",
272 | " elif lb2 == 'E_crop':\n",
273 | " w.append(ch2)\n",
274 | " n_crop.append(\"\".join(w))\n",
275 | " break\n",
276 | " elif lb1 == 'B_disease':\n",
277 | " w.append(ch1)\n",
278 | " for j in range(i+1, max_):\n",
279 | " ch2, lb2 = new_li[j]\n",
280 | " if lb2 == 'I_disease' or lb2 == 'O':\n",
281 | " w.append(ch2)\n",
282 | " elif lb2 == 'E_disease':\n",
283 | " w.append(ch2)\n",
284 | " n_disease.append(\"\".join(w))\n",
285 | " break\n",
286 | " elif lb1 == 'B_medicine':\n",
287 | " w.append(ch1)\n",
288 | " for j in range(i+1, max_):\n",
289 | " ch2, lb2 = new_li[j]\n",
290 | " if lb2 == 'I_medicine' or lb2 == 'O':\n",
291 | " w.append(ch2)\n",
292 | " elif lb2 == 'E_medicine':\n",
293 | " w.append(ch2)\n",
294 | " n_medicine.append(\"\".join(w))\n",
295 | " break\n",
296 | " \n",
297 | " test_data.append([id_, n_crop, n_disease, n_medicine])"
298 | ]
299 | },
300 | {
301 | "cell_type": "code",
302 | "execution_count": null,
303 | "metadata": {},
304 | "outputs": [],
305 | "source": [
306 | "test_data = pd.DataFrame(\n",
307 | " test_data, columns=['id', 'n_crop', 'n_disease', 'n_medicine']\n",
308 | ")\n",
309 | "\n",
310 | "test_data.head(10)"
311 | ]
312 | },
313 | {
314 | "cell_type": "code",
315 | "execution_count": null,
316 | "metadata": {},
317 | "outputs": [],
318 | "source": [
319 | "test_data.to_csv('submission.csv', index=False)"
320 | ]
321 | },
322 | {
323 | "cell_type": "code",
324 | "execution_count": null,
325 | "metadata": {},
326 | "outputs": [],
327 | "source": []
328 | }
329 | ],
330 | "metadata": {
331 | "kernelspec": {
332 | "display_name": "Python 3",
333 | "language": "python",
334 | "name": "python3"
335 | },
336 | "language_info": {
337 | "codemirror_mode": {
338 | "name": "ipython",
339 | "version": 3
340 | },
341 | "file_extension": ".py",
342 | "mimetype": "text/x-python",
343 | "name": "python",
344 | "nbconvert_exporter": "python",
345 | "pygments_lexer": "ipython3",
346 | "version": "3.7.4"
347 | }
348 | },
349 | "nbformat": 4,
350 | "nbformat_minor": 2
351 | }
352 |
--------------------------------------------------------------------------------
/competitions/aiyanxishe_weibo_baseline/yanxishe_weibo_BERT_baseline.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "scrolled": true
8 | },
9 | "outputs": [],
10 | "source": [
11 | "import gc\n",
12 | "import codecs\n",
13 | "import warnings\n",
14 | "warnings.simplefilter('ignore')\n",
15 | "\n",
16 | "import numpy as np\n",
17 | "import pandas as pd\n",
18 | "\n",
19 | "from random import choice\n",
20 | "from sklearn.preprocessing import LabelEncoder\n",
21 | "from sklearn.model_selection import KFold\n",
22 | "\n",
23 | "from keras_bert import load_trained_model_from_checkpoint, Tokenizer\n",
24 | "\n",
25 | "from keras.utils import to_categorical\n",
26 | "from keras.layers import *\n",
27 | "from keras.callbacks import *\n",
28 | "from keras.models import Model\n",
29 | "import keras.backend as K\n",
30 | "from keras.optimizers import Adam"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": null,
36 | "metadata": {
37 | "scrolled": true
38 | },
39 | "outputs": [],
40 | "source": [
41 | "train = pd.read_csv('raw_data/train.csv', sep='\\t')\n",
42 | "test = pd.read_csv('raw_data/test.csv', sep='\\t')"
43 | ]
44 | },
45 | {
46 | "cell_type": "code",
47 | "execution_count": null,
48 | "metadata": {
49 | "scrolled": true
50 | },
51 | "outputs": [],
52 | "source": [
53 | "train.head()"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": null,
59 | "metadata": {
60 | "scrolled": true
61 | },
62 | "outputs": [],
63 | "source": [
64 | "test.head()"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": null,
70 | "metadata": {
71 | "scrolled": true
72 | },
73 | "outputs": [],
74 | "source": [
75 | "train.target.value_counts()"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": null,
81 | "metadata": {
82 | "scrolled": true
83 | },
84 | "outputs": [],
85 | "source": [
86 | "train.stance.value_counts(dropna=False)"
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": null,
92 | "metadata": {
93 | "scrolled": true
94 | },
95 | "outputs": [],
96 | "source": [
97 | "train['text'] = train.text.fillna('')\n",
98 | "test['text'] = test.text.fillna('')"
99 | ]
100 | },
101 | {
102 | "cell_type": "code",
103 | "execution_count": null,
104 | "metadata": {
105 | "scrolled": true
106 | },
107 | "outputs": [],
108 | "source": [
109 | "train['stance'] = train['stance'].map({'AGAINST':0, 'FAVOR':1, 'NONE':2})\n",
110 | "train.head()"
111 | ]
112 | },
113 | {
114 | "cell_type": "code",
115 | "execution_count": null,
116 | "metadata": {
117 | "scrolled": true
118 | },
119 | "outputs": [],
120 | "source": [
121 | "maxlen = 256\n",
122 | "\n",
123 | "config_path = '/data/zhengheng/nlp_pretrain_models/chinese-roberta-wwm-ext-l12-h768-a12/bert_config.json'\n",
124 | "checkpoint_path = '/data/zhengheng/nlp_pretrain_models/chinese-roberta-wwm-ext-l12-h768-a12/bert_model.ckpt'\n",
125 | "dict_path = '/data/zhengheng/nlp_pretrain_models/chinese-roberta-wwm-ext-l12-h768-a12/vocab.txt'\n",
126 | "\n",
127 | "token_dict = {}\n",
128 | "with codecs.open(dict_path, 'r', 'utf8') as reader:\n",
129 | " for line in reader:\n",
130 | " token = line.strip()\n",
131 | " token_dict[token] = len(token_dict)\n",
132 | "\n",
133 | "class OurTokenizer(Tokenizer):\n",
134 | " def _tokenize(self, text):\n",
135 | " R = []\n",
136 | " for c in text:\n",
137 | " if c in self._token_dict:\n",
138 | " R.append(c)\n",
139 | " elif self._is_space(c):\n",
140 | " R.append('[unused1]') # space类用未经训练的[unused1]表示\n",
141 | " else:\n",
142 | " R.append('[UNK]') # 剩余的字符是[UNK]\n",
143 | " return R\n",
144 | "\n",
145 | "tokenizer = OurTokenizer(token_dict)\n",
146 | "\n",
147 | "def seq_padding(X, padding=0):\n",
148 | " L = [len(x) for x in X]\n",
149 | " ML = max(L)\n",
150 | " return np.array([\n",
151 | " np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x for x in X\n",
152 | " ])\n",
153 | "\n",
154 | "class data_generator:\n",
155 | " def __init__(self, data, batch_size=8, shuffle=True):\n",
156 | " self.data = data\n",
157 | " self.batch_size = batch_size\n",
158 | " self.shuffle = shuffle\n",
159 | " self.steps = len(self.data) // self.batch_size\n",
160 | " if len(self.data) % self.batch_size != 0:\n",
161 | " self.steps += 1\n",
162 | " def __len__(self):\n",
163 | " return self.steps\n",
164 | " def __iter__(self):\n",
165 | " while True:\n",
166 | " idxs = list(range(len(self.data)))\n",
167 | " \n",
168 | " if self.shuffle:\n",
169 | " np.random.shuffle(idxs)\n",
170 | " \n",
171 | " X1, X2, Y = [], [], []\n",
172 | " for i in idxs:\n",
173 | " d = self.data[i]\n",
174 | " text = d[0][:maxlen]\n",
175 | " x1, x2 = tokenizer.encode(first=text)\n",
176 | " y = d[1]\n",
177 | " X1.append(x1)\n",
178 | " X2.append(x2)\n",
179 | " Y.append([y])\n",
180 | " if len(X1) == self.batch_size or i == idxs[-1]:\n",
181 | " X1 = seq_padding(X1)\n",
182 | " X2 = seq_padding(X2)\n",
183 | " Y = seq_padding(Y)\n",
184 | " yield [X1, X2], Y[:, 0, :]\n",
185 | " [X1, X2, Y] = [], [], []\n",
186 | " \n",
187 | "def build_bert():\n",
188 | " bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None)\n",
189 | "\n",
190 | " for l in bert_model.layers:\n",
191 | " l.trainable = True\n",
192 | "\n",
193 | " x1_in = Input(shape=(None,))\n",
194 | " x2_in = Input(shape=(None,))\n",
195 | "\n",
196 | " x = bert_model([x1_in, x2_in])\n",
197 | " x = Lambda(lambda x: x[:, 0])(x)\n",
198 | " p = Dense(3, activation='softmax')(x)\n",
199 | "\n",
200 | " model = Model([x1_in, x2_in], p)\n",
201 | " model.compile(loss='categorical_crossentropy', \n",
202 | " optimizer=Adam(1e-5),\n",
203 | " metrics=['accuracy'])\n",
204 | " print(model.summary())\n",
205 | " return model"
206 | ]
207 | },
208 | {
209 | "cell_type": "code",
210 | "execution_count": null,
211 | "metadata": {
212 | "scrolled": true
213 | },
214 | "outputs": [],
215 | "source": [
216 | "DATA_LIST = []\n",
217 | "for data_row in train.iloc[:].itertuples():\n",
218 | " DATA_LIST.append((data_row.text, to_categorical(data_row.stance, 3)))\n",
219 | "DATA_LIST = np.array(DATA_LIST)\n",
220 | "\n",
221 | "DATA_LIST_TEST = []\n",
222 | "for data_row in test.iloc[:].itertuples():\n",
223 | " DATA_LIST_TEST.append((data_row.text, to_categorical(0, 3)))\n",
224 | "DATA_LIST_TEST = np.array(DATA_LIST_TEST)"
225 | ]
226 | },
227 | {
228 | "cell_type": "code",
229 | "execution_count": null,
230 | "metadata": {
231 | "scrolled": true
232 | },
233 | "outputs": [],
234 | "source": [
235 | "def run_cv(nfold, data, data_label, data_test):\n",
236 | " \n",
237 | " kf = KFold(n_splits=nfold, shuffle=True, random_state=1029).split(data)\n",
238 | " train_model_pred = np.zeros((len(data), 3))\n",
239 | " test_model_pred = np.zeros((len(data_test), 3))\n",
240 | "\n",
241 | " for i, (train_fold, test_fold) in enumerate(kf):\n",
242 | " X_train, X_valid, = data[train_fold, :], data[test_fold, :]\n",
243 | " \n",
244 | " model = build_bert()\n",
245 | " early_stopping = EarlyStopping(monitor='val_acc', patience=3)\n",
246 | " plateau = ReduceLROnPlateau(monitor=\"val_acc\", verbose=1, mode='max', factor=0.5, patience=2)\n",
247 | " checkpoint = ModelCheckpoint('./' + str(i) + '.hdf5', monitor='val_acc', \n",
248 | " verbose=2, save_best_only=True, mode='max', save_weights_only=True)\n",
249 | " \n",
250 | " train_D = data_generator(X_train, shuffle=True)\n",
251 | " valid_D = data_generator(X_valid, shuffle=True)\n",
252 | " test_D = data_generator(data_test, shuffle=False)\n",
253 | " \n",
254 | " model.fit_generator(\n",
255 | " train_D.__iter__(),\n",
256 | " steps_per_epoch=len(train_D),\n",
257 | " epochs=5,\n",
258 | " validation_data=valid_D.__iter__(),\n",
259 | " validation_steps=len(valid_D),\n",
260 | " callbacks=[early_stopping, plateau, checkpoint],\n",
261 | " )\n",
262 | " \n",
263 | " train_model_pred[test_fold, :] = model.predict_generator(valid_D.__iter__(), steps=len(valid_D), verbose=1)\n",
264 | " test_model_pred += model.predict_generator(test_D.__iter__(), steps=len(test_D), verbose=1)\n",
265 | " \n",
266 | " del model; gc.collect()\n",
267 | " K.clear_session()\n",
268 | " \n",
269 | " return train_model_pred, test_model_pred"
270 | ]
271 | },
272 | {
273 | "cell_type": "code",
274 | "execution_count": null,
275 | "metadata": {
276 | "scrolled": true
277 | },
278 | "outputs": [],
279 | "source": [
280 | "train_model_pred, test_model_pred = run_cv(5, DATA_LIST, None, DATA_LIST_TEST)"
281 | ]
282 | },
283 | {
284 | "cell_type": "code",
285 | "execution_count": null,
286 | "metadata": {
287 | "scrolled": true
288 | },
289 | "outputs": [],
290 | "source": [
291 | "test_pred = [np.argmax(x) for x in test_model_pred]\n",
292 | "test['y'] = test_pred"
293 | ]
294 | },
295 | {
296 | "cell_type": "code",
297 | "execution_count": null,
298 | "metadata": {
299 | "scrolled": true
300 | },
301 | "outputs": [],
302 | "source": [
303 | "submission = test[['y']]\n",
304 | "submission['id'] = submission.index\n",
305 | "submission['y'] = submission['y'].map({0:'AGAINST', 1:'FAVOR', 2:'NONE'})\n",
306 | "submission = submission[['id', 'y']]\n",
307 | "submission.head()"
308 | ]
309 | },
310 | {
311 | "cell_type": "code",
312 | "execution_count": null,
313 | "metadata": {
314 | "scrolled": true
315 | },
316 | "outputs": [],
317 | "source": [
318 | "submission.to_csv('submissions/submission_bert_ext_wwm_baseline.csv', index=False, header=False)"
319 | ]
320 | },
321 | {
322 | "cell_type": "code",
323 | "execution_count": null,
324 | "metadata": {
325 | "scrolled": true
326 | },
327 | "outputs": [],
328 | "source": [
329 | "!head 'submissions/submission_bert_ext_wwm_baseline.csv'"
330 | ]
331 | },
332 | {
333 | "cell_type": "code",
334 | "execution_count": null,
335 | "metadata": {
336 | "scrolled": true
337 | },
338 | "outputs": [],
339 | "source": []
340 | }
341 | ],
342 | "metadata": {
343 | "kernelspec": {
344 | "display_name": "Python 3",
345 | "language": "python",
346 | "name": "python3"
347 | },
348 | "language_info": {
349 | "codemirror_mode": {
350 | "name": "ipython",
351 | "version": 3
352 | },
353 | "file_extension": ".py",
354 | "mimetype": "text/x-python",
355 | "name": "python",
356 | "nbconvert_exporter": "python",
357 | "pygments_lexer": "ipython3",
358 | "version": "3.7.6"
359 | }
360 | },
361 | "nbformat": 4,
362 | "nbformat_minor": 2
363 | }
364 |
--------------------------------------------------------------------------------
/competitions/tianchi_news_classification/baseline.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import warnings\n",
10 | "warnings.simplefilter('ignore')\n",
11 | "\n",
12 | "import numpy as np\n",
13 | "import pandas as pd\n",
14 | "pd.set_option('max_columns', None)\n",
15 | "pd.set_option('max_rows', 1000)\n",
16 | "\n",
17 | "%matplotlib inline\n",
18 | "import matplotlib.pyplot as plt\n",
19 | "import seaborn as sns\n",
20 | "\n",
21 | "import pickle\n",
22 | "import gc\n",
23 | "import logging\n",
24 | "from collections import Counter\n",
25 | "\n",
26 | "from tqdm.autonotebook import *\n",
27 | "\n",
28 | "import gensim\n",
29 | "from gensim.models import FastText, Word2Vec\n",
30 | "\n",
31 | "from sklearn.model_selection import train_test_split\n",
32 | "from sklearn.metrics import f1_score\n",
33 | "\n",
34 | "import keras\n",
35 | "from keras import layers\n",
36 | "from keras import callbacks\n",
37 | "\n",
38 | "from bert4keras.snippets import sequence_padding, DataGenerator\n",
39 | "\n",
40 | "from keras_multi_head import MultiHead, MultiHeadAttention\n",
41 | "from keras_self_attention import SeqSelfAttention\n",
42 | "from keras_position_wise_feed_forward import FeedForward\n",
43 | "from keras_layer_normalization import LayerNormalization"
44 | ]
45 | },
46 | {
47 | "cell_type": "code",
48 | "execution_count": null,
49 | "metadata": {},
50 | "outputs": [],
51 | "source": [
52 | "df_train = pd.read_csv('raw_data/train_set.csv', sep='\\t')\n",
53 | "df_test = pd.read_csv('raw_data/test_a.csv', sep='\\t')\n",
54 | "\n",
55 | "df_train['text'] = df_train['text'].apply(lambda x: list(map(lambda y: int(y), x.split())))\n",
56 | "df_test['text'] = df_test['text'].apply(lambda x: list(map(lambda y: int(y), x.split())))"
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": null,
62 | "metadata": {},
63 | "outputs": [],
64 | "source": [
65 | "df_train, df_valid = train_test_split(df_train, test_size=0.2, random_state=2020)"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": null,
71 | "metadata": {},
72 | "outputs": [],
73 | "source": [
74 | "num_classes = 14\n",
75 | "vocabulary_size = 7600\n",
76 | "\n",
77 | "maxlen = 256\n",
78 | "batch_size = 128\n",
79 | "embedding_dim = 128"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": null,
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | "def load_data(df):\n",
89 | " \"\"\"加载数据\"\"\"\n",
90 | " D = list()\n",
91 | " for _, row in df.iterrows():\n",
92 | " text = row['text']\n",
93 | " label = row['label']\n",
94 | " D.append((text, int(label)))\n",
95 | " return D"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "metadata": {},
102 | "outputs": [],
103 | "source": [
104 | "train_data = load_data(df_train)\n",
105 | "valid_data = load_data(df_valid)"
106 | ]
107 | },
108 | {
109 | "cell_type": "code",
110 | "execution_count": null,
111 | "metadata": {},
112 | "outputs": [],
113 | "source": [
114 | "class data_generator(DataGenerator):\n",
115 | " \"\"\"数据生成器\"\"\"\n",
116 | "\n",
117 | " def __init__(self, data, batch_size=32, buffer_size=None, random=False):\n",
118 | " super().__init__(data, batch_size, buffer_size)\n",
119 | " self.random = random\n",
120 | "\n",
121 | " def __iter__(self, random=False):\n",
122 | " batch_token_ids, batch_labels = [], []\n",
123 | " for is_end, (text, label) in self.sample(random):\n",
124 | " token_ids = text[:maxlen] if len(text) > maxlen else text + (maxlen - len(text)) * [0]\n",
125 | " batch_token_ids.append(token_ids)\n",
126 | " batch_labels.append([label])\n",
127 | " if len(batch_token_ids) == self.batch_size or is_end:\n",
128 | " batch_token_ids = sequence_padding(batch_token_ids)\n",
129 | " batch_labels = sequence_padding(batch_labels)\n",
130 | " yield [batch_token_ids], batch_labels\n",
131 | " batch_token_ids, batch_labels = [], []\n",
132 | "\n",
133 | " def forfit(self):\n",
134 | " while True:\n",
135 | " for d in self.__iter__(self.random):\n",
136 | " yield d"
137 | ]
138 | },
139 | {
140 | "cell_type": "code",
141 | "execution_count": null,
142 | "metadata": {},
143 | "outputs": [],
144 | "source": [
145 | "train_generator = data_generator(train_data, batch_size, random=True)\n",
146 | "valid_generator = data_generator(valid_data, batch_size)"
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": null,
152 | "metadata": {},
153 | "outputs": [],
154 | "source": [
155 | "def build_model():\n",
156 | "\n",
157 | " inp = layers.Input(shape=(maxlen,))\n",
158 | "\n",
159 | " emb_layer = layers.Embedding(\n",
160 | " input_dim=vocabulary_size,\n",
161 | " output_dim=embedding_dim,\n",
162 | " input_length=maxlen\n",
163 | " )(inp)\n",
164 | "\n",
165 | " sdrop = layers.SpatialDropout1D(rate=0.2)\n",
166 | "\n",
167 | " emb_layer = sdrop(emb_layer)\n",
168 | "\n",
169 | " mha1 = MultiHeadAttention(head_num=16)(emb_layer)\n",
170 | " mha1 = layers.Dropout(0.01)(mha1)\n",
171 | " mha1 = layers.Add()([emb_layer, mha1])\n",
172 | " mha1 = LayerNormalization()(mha1)\n",
173 | " mha1 = layers.Dropout(0.01)(mha1)\n",
174 | " mha1_ff = FeedForward(128)(mha1)\n",
175 | " mha1_out = layers.Add()([mha1, mha1_ff])\n",
176 | " mha1_out = LayerNormalization()(mha1_out)\n",
177 | "\n",
178 | " mha2 = MultiHeadAttention(head_num=16)(mha1_out)\n",
179 | " mha2 = layers.Dropout(0.01)(mha2)\n",
180 | " mha2 = layers.Add()([mha1_out, mha2])\n",
181 | " mha2 = LayerNormalization()(mha2)\n",
182 | " mha2 = layers.Dropout(0.01)(mha2)\n",
183 | " mha2_ff = FeedForward(128)(mha2)\n",
184 | " mha2_out = layers.Add()([mha2, mha2_ff])\n",
185 | " mha2_out = LayerNormalization()(mha2_out)\n",
186 | " \n",
187 | " lstm = layers.Bidirectional(layers.LSTM(128, return_sequences=True))(mha2_out)\n",
188 | "\n",
189 | " avg_pool = layers.GlobalAveragePooling1D()(lstm)\n",
190 | " max_pool = layers.GlobalMaxPool1D()(lstm)\n",
191 | "\n",
192 | " x = layers.Concatenate()([avg_pool, max_pool])\n",
193 | "\n",
194 | " x = layers.Dense(128, activation='relu')(x)\n",
195 | " x = layers.BatchNormalization()(x)\n",
196 | "\n",
197 | " x = layers.Dense(64, activation='relu')(x)\n",
198 | " x = layers.BatchNormalization()(x)\n",
199 | "\n",
200 | " x = layers.Dropout(0.2)(x)\n",
201 | "\n",
202 | " out = layers.Dense(num_classes, activation='softmax')(x)\n",
203 | " model = keras.Model(inputs=inp, outputs=out)\n",
204 | " model.compile(loss='sparse_categorical_crossentropy',\n",
205 | " optimizer=keras.optimizers.Adam(1e-4),\n",
206 | " metrics=['accuracy'])\n",
207 | " \n",
208 | " return model\n",
209 | "\n",
210 | "model = build_model()"
211 | ]
212 | },
213 | {
214 | "cell_type": "code",
215 | "execution_count": null,
216 | "metadata": {},
217 | "outputs": [],
218 | "source": [
219 | "model.summary()"
220 | ]
221 | },
222 | {
223 | "cell_type": "code",
224 | "execution_count": null,
225 | "metadata": {},
226 | "outputs": [],
227 | "source": [
228 | "class Evaluator(callbacks.Callback):\n",
229 | " def __init__(self):\n",
230 | " super().__init__()\n",
231 | " self.best_val_f1 = 0.\n",
232 | "\n",
233 | " def evaluate(self):\n",
234 | " y_true, y_pred = list(), list()\n",
235 | " for x, y in valid_generator:\n",
236 | " y_true.append(y)\n",
237 | " y_pred.append(self.model.predict(x).argmax(axis=1))\n",
238 | " y_true = np.concatenate(y_true)\n",
239 | " y_pred = np.concatenate(y_pred)\n",
240 | " f1 = f1_score(y_true, y_pred, average='macro')\n",
241 | " return f1\n",
242 | "\n",
243 | " def on_epoch_end(self, epoch, logs=None):\n",
244 | " val_f1 = self.evaluate()\n",
245 | " if val_f1 > self.best_val_f1:\n",
246 | " self.best_val_f1 = val_f1\n",
247 | " logs['val_f1'] = val_f1\n",
248 | " print(f'val_f1: {val_f1:.5f}, best_val_f1: {self.best_val_f1:.5f}')"
249 | ]
250 | },
251 | {
252 | "cell_type": "code",
253 | "execution_count": null,
254 | "metadata": {},
255 | "outputs": [],
256 | "source": [
257 | "callbacks = [\n",
258 | " Evaluator(),\n",
259 | " callbacks.EarlyStopping(\n",
260 | " monitor='val_accuracy', \n",
261 | " mode='max',\n",
262 | " patience=5, \n",
263 | " verbose=1\n",
264 | " ),\n",
265 | " callbacks.ModelCheckpoint(\n",
266 | " './models/model.h5',\n",
267 | " monitor='val_f1',\n",
268 | " save_weights_only=True,\n",
269 | " save_best_only=True,\n",
270 | " verbose=1,\n",
271 | " mode='max'\n",
272 | " ),\n",
273 | " callbacks.ReduceLROnPlateau(\n",
274 | " monitor='val_f1',\n",
275 | " factor=0.1,\n",
276 | " patience=2,\n",
277 | " verbose=1,\n",
278 | " mode='max',\n",
279 | " epsilon=1e-6\n",
280 | " )\n",
281 | " \n",
282 | "]"
283 | ]
284 | },
285 | {
286 | "cell_type": "code",
287 | "execution_count": null,
288 | "metadata": {
289 | "scrolled": false
290 | },
291 | "outputs": [],
292 | "source": [
293 | "model.fit(\n",
294 | " train_generator.forfit(),\n",
295 | " steps_per_epoch=len(train_generator),\n",
296 | " epochs=100,\n",
297 | " callbacks=callbacks,\n",
298 | " validation_data=valid_generator.forfit(),\n",
299 | " validation_steps=len(valid_generator)\n",
300 | ")"
301 | ]
302 | },
303 | {
304 | "cell_type": "code",
305 | "execution_count": null,
306 | "metadata": {},
307 | "outputs": [],
308 | "source": [
309 | "df_test['label'] = 0\n",
310 | "test_data = load_data(df_test)\n",
311 | "test_generator = data_generator(test_data, batch_size)"
312 | ]
313 | },
314 | {
315 | "cell_type": "code",
316 | "execution_count": null,
317 | "metadata": {},
318 | "outputs": [],
319 | "source": [
320 | "result = model.predict_generator(test_generator.forfit(), steps=len(test_generator))\n",
321 | "result = result.argmax(axis=1)"
322 | ]
323 | },
324 | {
325 | "cell_type": "code",
326 | "execution_count": null,
327 | "metadata": {},
328 | "outputs": [],
329 | "source": [
330 | "df_test['label'] = result\n",
331 | "df_test.to_csv('submission.csv', index=False, columns=['label'])"
332 | ]
333 | },
334 | {
335 | "cell_type": "code",
336 | "execution_count": null,
337 | "metadata": {},
338 | "outputs": [],
339 | "source": []
340 | }
341 | ],
342 | "metadata": {
343 | "kernelspec": {
344 | "display_name": "Python 3",
345 | "language": "python",
346 | "name": "python3"
347 | },
348 | "language_info": {
349 | "codemirror_mode": {
350 | "name": "ipython",
351 | "version": 3
352 | },
353 | "file_extension": ".py",
354 | "mimetype": "text/x-python",
355 | "name": "python",
356 | "nbconvert_exporter": "python",
357 | "pygments_lexer": "ipython3",
358 | "version": "3.7.4"
359 | }
360 | },
361 | "nbformat": 4,
362 | "nbformat_minor": 2
363 | }
364 |
--------------------------------------------------------------------------------
/competitions/tianchi_elm_delivery/part2_feature.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {
7 | "ExecuteTime": {
8 | "end_time": "2020-03-21T02:06:28.017323Z",
9 | "start_time": "2020-03-21T02:06:27.405178Z"
10 | }
11 | },
12 | "outputs": [],
13 | "source": [
14 | "import numpy as np\n",
15 | "import pandas as pd\n",
16 | "import warnings\n",
17 | "import os\n",
18 | "from tqdm import tqdm\n",
19 | "from sklearn import preprocessing, metrics\n",
20 | "import lightgbm as lgb\n",
21 | "import matplotlib.pyplot as plt\n",
22 | "import seaborn as sns\n",
23 | "from joblib import Parallel, delayed\n",
24 | "\n",
25 | "%matplotlib inline\n",
26 | "\n",
27 | "pd.set_option('display.max_columns', None)\n",
28 | "pd.set_option('display.max_rows', None)\n",
29 | "\n",
30 | "warnings.filterwarnings('ignore')"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": 2,
36 | "metadata": {
37 | "ExecuteTime": {
38 | "end_time": "2020-03-21T02:06:28.027158Z",
39 | "start_time": "2020-03-21T02:06:28.018988Z"
40 | }
41 | },
42 | "outputs": [],
43 | "source": [
44 | "next_action = pd.read_csv('./temp/next_action.csv')"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": 3,
50 | "metadata": {
51 | "ExecuteTime": {
52 | "end_time": "2020-03-21T02:06:28.037318Z",
53 | "start_time": "2020-03-21T02:06:28.028192Z"
54 | }
55 | },
56 | "outputs": [
57 | {
58 | "data": {
59 | "text/html": [
60 | "
\n",
61 | "\n",
74 | "
\n",
75 | " \n",
76 | " \n",
77 | " | \n",
78 | " id | \n",
79 | "
\n",
80 | " \n",
81 | " \n",
82 | " \n",
83 | " | 0 | \n",
84 | " 98263 | \n",
85 | "
\n",
86 | " \n",
87 | " | 1 | \n",
88 | " 116276 | \n",
89 | "
\n",
90 | " \n",
91 | " | 2 | \n",
92 | " 153284 | \n",
93 | "
\n",
94 | " \n",
95 | " | 3 | \n",
96 | " 16134 | \n",
97 | "
\n",
98 | " \n",
99 | " | 4 | \n",
100 | " 23009 | \n",
101 | "
\n",
102 | " \n",
103 | "
\n",
104 | "
"
105 | ],
106 | "text/plain": [
107 | " id\n",
108 | "0 98263\n",
109 | "1 116276\n",
110 | "2 153284\n",
111 | "3 16134\n",
112 | "4 23009"
113 | ]
114 | },
115 | "execution_count": 3,
116 | "metadata": {},
117 | "output_type": "execute_result"
118 | }
119 | ],
120 | "source": [
121 | "next_action.head()"
122 | ]
123 | },
124 | {
125 | "cell_type": "code",
126 | "execution_count": 4,
127 | "metadata": {
128 | "ExecuteTime": {
129 | "end_time": "2020-03-21T02:06:28.442488Z",
130 | "start_time": "2020-03-21T02:06:28.039872Z"
131 | }
132 | },
133 | "outputs": [
134 | {
135 | "name": "stdout",
136 | "output_type": "stream",
137 | "text": [
138 | "(223104, 12)\n",
139 | "(48277, 12)\n"
140 | ]
141 | }
142 | ],
143 | "source": [
144 | "df_feature = pd.read_pickle('./temp/base_feature.plk')\n",
145 | "print(df_feature.shape)\n",
146 | "df_feature = next_action.merge(df_feature, how='left')\n",
147 | "print(df_feature.shape)"
148 | ]
149 | },
150 | {
151 | "cell_type": "code",
152 | "execution_count": 5,
153 | "metadata": {
154 | "ExecuteTime": {
155 | "end_time": "2020-03-21T02:06:28.451429Z",
156 | "start_time": "2020-03-21T02:06:28.443697Z"
157 | }
158 | },
159 | "outputs": [
160 | {
161 | "data": {
162 | "text/plain": [
163 | "train 44058\n",
164 | "test 4219\n",
165 | "Name: type, dtype: int64"
166 | ]
167 | },
168 | "execution_count": 5,
169 | "metadata": {},
170 | "output_type": "execute_result"
171 | }
172 | ],
173 | "source": [
174 | "df_feature['type'].value_counts()"
175 | ]
176 | },
177 | {
178 | "cell_type": "code",
179 | "execution_count": 6,
180 | "metadata": {
181 | "ExecuteTime": {
182 | "end_time": "2020-03-21T02:06:28.464082Z",
183 | "start_time": "2020-03-21T02:06:28.452870Z"
184 | }
185 | },
186 | "outputs": [
187 | {
188 | "data": {
189 | "text/html": [
190 | "\n",
191 | "\n",
204 | "
\n",
205 | " \n",
206 | " \n",
207 | " | \n",
208 | " id | \n",
209 | " courier_id | \n",
210 | " wave_index | \n",
211 | " tracking_id | \n",
212 | " courier_wave_start_lng | \n",
213 | " courier_wave_start_lat | \n",
214 | " action_type | \n",
215 | " expect_time | \n",
216 | " date | \n",
217 | " type | \n",
218 | " target | \n",
219 | " group | \n",
220 | "
\n",
221 | " \n",
222 | " \n",
223 | " \n",
224 | " | 0 | \n",
225 | " 98263 | \n",
226 | " 10330725 | \n",
227 | " 9 | \n",
228 | " 2100075923187730175 | \n",
229 | " 121.481429 | \n",
230 | " 39.299365 | \n",
231 | " PICKUP | \n",
232 | " 1582888651 | \n",
233 | " 20200228 | \n",
234 | " train | \n",
235 | " 1.0 | \n",
236 | " 20200228103307259 | \n",
237 | "
\n",
238 | " \n",
239 | " | 1 | \n",
240 | " 116276 | \n",
241 | " 10053442 | \n",
242 | " 1 | \n",
243 | " 2100075314534647435 | \n",
244 | " 121.479587 | \n",
245 | " 39.248115 | \n",
246 | " PICKUP | \n",
247 | " 1582084880 | \n",
248 | " 20200219 | \n",
249 | " train | \n",
250 | " 1.0 | \n",
251 | " 20200219100534421 | \n",
252 | "
\n",
253 | " \n",
254 | " | 2 | \n",
255 | " 153284 | \n",
256 | " 118787313 | \n",
257 | " 4 | \n",
258 | " 2100075078536791439 | \n",
259 | " 121.440498 | \n",
260 | " 39.203471 | \n",
261 | " DELIVERY | \n",
262 | " 1581671584 | \n",
263 | " 20200214 | \n",
264 | " train | \n",
265 | " 1.0 | \n",
266 | " 202002141187873134 | \n",
267 | "
\n",
268 | " \n",
269 | " | 3 | \n",
270 | " 16134 | \n",
271 | " 116706233 | \n",
272 | " 3 | \n",
273 | " 2100074825841346124 | \n",
274 | " 121.543010 | \n",
275 | " 39.258822 | \n",
276 | " DELIVERY | \n",
277 | " 1581075896 | \n",
278 | " 20200207 | \n",
279 | " train | \n",
280 | " 1.0 | \n",
281 | " 202002071167062333 | \n",
282 | "
\n",
283 | " \n",
284 | " | 4 | \n",
285 | " 23009 | \n",
286 | " 118333873 | \n",
287 | " 5 | \n",
288 | " 2100074746653279446 | \n",
289 | " 121.406669 | \n",
290 | " 39.364738 | \n",
291 | " PICKUP | \n",
292 | " 1580906387 | \n",
293 | " 20200205 | \n",
294 | " train | \n",
295 | " 1.0 | \n",
296 | " 202002051183338735 | \n",
297 | "
\n",
298 | " \n",
299 | "
\n",
300 | "
"
301 | ],
302 | "text/plain": [
303 | " id courier_id wave_index tracking_id \\\n",
304 | "0 98263 10330725 9 2100075923187730175 \n",
305 | "1 116276 10053442 1 2100075314534647435 \n",
306 | "2 153284 118787313 4 2100075078536791439 \n",
307 | "3 16134 116706233 3 2100074825841346124 \n",
308 | "4 23009 118333873 5 2100074746653279446 \n",
309 | "\n",
310 | " courier_wave_start_lng courier_wave_start_lat action_type expect_time \\\n",
311 | "0 121.481429 39.299365 PICKUP 1582888651 \n",
312 | "1 121.479587 39.248115 PICKUP 1582084880 \n",
313 | "2 121.440498 39.203471 DELIVERY 1581671584 \n",
314 | "3 121.543010 39.258822 DELIVERY 1581075896 \n",
315 | "4 121.406669 39.364738 PICKUP 1580906387 \n",
316 | "\n",
317 | " date type target group \n",
318 | "0 20200228 train 1.0 20200228103307259 \n",
319 | "1 20200219 train 1.0 20200219100534421 \n",
320 | "2 20200214 train 1.0 202002141187873134 \n",
321 | "3 20200207 train 1.0 202002071167062333 \n",
322 | "4 20200205 train 1.0 202002051183338735 "
323 | ]
324 | },
325 | "execution_count": 6,
326 | "metadata": {},
327 | "output_type": "execute_result"
328 | }
329 | ],
330 | "source": [
331 | "df_feature.head()"
332 | ]
333 | },
334 | {
335 | "cell_type": "code",
336 | "execution_count": 7,
337 | "metadata": {
338 | "ExecuteTime": {
339 | "end_time": "2020-03-21T02:06:28.560850Z",
340 | "start_time": "2020-03-21T02:06:28.465154Z"
341 | }
342 | },
343 | "outputs": [],
344 | "source": [
345 | "df_feature.to_pickle('./temp/part2_feature.plk')"
346 | ]
347 | },
348 | {
349 | "cell_type": "code",
350 | "execution_count": null,
351 | "metadata": {},
352 | "outputs": [],
353 | "source": []
354 | }
355 | ],
356 | "metadata": {
357 | "kernelspec": {
358 | "display_name": "Python [conda env:dm] *",
359 | "language": "python",
360 | "name": "conda-env-dm-py"
361 | },
362 | "language_info": {
363 | "codemirror_mode": {
364 | "name": "ipython",
365 | "version": 3
366 | },
367 | "file_extension": ".py",
368 | "mimetype": "text/x-python",
369 | "name": "python",
370 | "nbconvert_exporter": "python",
371 | "pygments_lexer": "ipython3",
372 | "version": "3.6.9"
373 | }
374 | },
375 | "nbformat": 4,
376 | "nbformat_minor": 2
377 | }
378 |
--------------------------------------------------------------------------------
/competitions/tianchi_elm_delivery/data.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {
7 | "ExecuteTime": {
8 | "end_time": "2020-03-19T11:03:48.904730Z",
9 | "start_time": "2020-03-19T11:03:48.293439Z"
10 | }
11 | },
12 | "outputs": [],
13 | "source": [
14 | "import numpy as np\n",
15 | "import pandas as pd\n",
16 | "import warnings\n",
17 | "import os\n",
18 | "from tqdm import tqdm\n",
19 | "from sklearn import preprocessing, metrics\n",
20 | "import lightgbm as lgb\n",
21 | "import matplotlib.pyplot as plt\n",
22 | "import seaborn as sns\n",
23 | "from joblib import Parallel, delayed\n",
24 | "\n",
25 | "%matplotlib inline\n",
26 | "\n",
27 | "pd.set_option('display.max_columns', None)\n",
28 | "pd.set_option('display.max_rows', None)\n",
29 | "\n",
30 | "warnings.filterwarnings('ignore')"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": 2,
36 | "metadata": {
37 | "ExecuteTime": {
38 | "end_time": "2020-03-19T11:03:48.908869Z",
39 | "start_time": "2020-03-19T11:03:48.906472Z"
40 | }
41 | },
42 | "outputs": [],
43 | "source": [
44 | "train_path = './raw_data/eleme_round1_train_20200313'\n",
45 | "test_path = './raw_data/eleme_round1_testA_20200313'"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": 3,
51 | "metadata": {
52 | "ExecuteTime": {
53 | "end_time": "2020-03-19T11:03:48.992499Z",
54 | "start_time": "2020-03-19T11:03:48.910145Z"
55 | }
56 | },
57 | "outputs": [],
58 | "source": [
59 | "courier_list = []\n",
60 | "# courier 数据\n",
61 | "for f in os.listdir(os.path.join(train_path, 'courier')):\n",
62 | " date = f.split('.')[0].split('_')[1]\n",
63 | " df = pd.read_csv(os.path.join(train_path, 'courier', f))\n",
64 | " df['date'] = date\n",
65 | " courier_list.append(df)\n",
66 | "\n",
67 | "for f in os.listdir(os.path.join(test_path, 'courier')):\n",
68 | " date = f.split('.')[0].split('_')[1]\n",
69 | " df = pd.read_csv(os.path.join(test_path, 'courier', f))\n",
70 | " df['date'] = date\n",
71 | " courier_list.append(df)\n",
72 | "\n",
73 | "df_courier = pd.concat(courier_list, sort=False)\n",
74 | "df_courier.to_pickle('./temp/courier.plk')"
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": 4,
80 | "metadata": {
81 | "ExecuteTime": {
82 | "end_time": "2020-03-19T11:03:49.759178Z",
83 | "start_time": "2020-03-19T11:03:48.993787Z"
84 | }
85 | },
86 | "outputs": [],
87 | "source": [
88 | "order_list = []\n",
89 | "# order 数据\n",
90 | "for f in os.listdir(os.path.join(train_path, 'order')):\n",
91 | " date = f.split('.')[0].split('_')[1]\n",
92 | " df = pd.read_csv(os.path.join(train_path, 'order', f))\n",
93 | " df['date'] = date\n",
94 | " order_list.append(df)\n",
95 | "\n",
96 | "for f in os.listdir(os.path.join(test_path, 'order')):\n",
97 | " date = f.split('.')[0].split('_')[1]\n",
98 | " df = pd.read_csv(os.path.join(test_path, 'order', f))\n",
99 | " df['date'] = date\n",
100 | " order_list.append(df)\n",
101 | "\n",
102 | "df_order = pd.concat(order_list, sort=False)\n",
103 | "df_order.to_pickle('./temp/order.plk')"
104 | ]
105 | },
106 | {
107 | "cell_type": "code",
108 | "execution_count": 5,
109 | "metadata": {
110 | "ExecuteTime": {
111 | "end_time": "2020-03-19T11:04:25.926110Z",
112 | "start_time": "2020-03-19T11:03:49.760577Z"
113 | }
114 | },
115 | "outputs": [],
116 | "source": [
117 | "distance_list = []\n",
118 | "# distance 数据\n",
119 | "for f in os.listdir(os.path.join(train_path, 'distance')):\n",
120 | " date = f.split('.')[0].split('_')[1]\n",
121 | " df = pd.read_csv(os.path.join(train_path, 'distance', f))\n",
122 | " df['date'] = date\n",
123 | " distance_list.append(df)\n",
124 | "\n",
125 | "for f in os.listdir(os.path.join(test_path, 'distance')):\n",
126 | " date = f.split('.')[0].split('_')[1]\n",
127 | " df = pd.read_csv(os.path.join(test_path, 'distance', f))\n",
128 | " df['date'] = date\n",
129 | " distance_list.append(df)\n",
130 | "\n",
131 | "df_distance = pd.concat(distance_list, sort=False)\n",
132 | "df_distance['group'] = df_distance['date'].astype(\n",
133 | " 'str') + df_distance['courier_id'].astype('str') + df_distance['wave_index'].astype('str')\n",
134 | "df_distance.to_pickle('./temp/distance.plk')"
135 | ]
136 | },
137 | {
138 | "cell_type": "code",
139 | "execution_count": 6,
140 | "metadata": {
141 | "ExecuteTime": {
142 | "end_time": "2020-03-19T11:04:25.937956Z",
143 | "start_time": "2020-03-19T11:04:25.929096Z"
144 | }
145 | },
146 | "outputs": [],
147 | "source": [
148 | "# 后 55% 为待预测行为\n",
149 | "ratio = 0.55\n",
150 | "\n",
151 | "\n",
152 | "def read_feat(df):\n",
153 | " label_list = []\n",
154 | " history_list = []\n",
155 | " type = df['type'].values[0]\n",
156 | "\n",
157 | " # 划分数据集\n",
158 | " groups = df.groupby(['courier_id', 'wave_index'])\n",
159 | " for name, group in tqdm(groups):\n",
160 | " if type == 'train':\n",
161 | " label_data = group.tail(int(group.shape[0] * ratio))\n",
162 | " history_data = group.drop(label_data.index)\n",
163 | "\n",
164 | " if label_data.shape[0] < 3:\n",
165 | " continue\n",
166 | " else:\n",
167 | " # 第一个数据为正样本,其余为负样本\n",
168 | " label_data['target'] = 0\n",
169 | " label_data.reset_index(drop=True, inplace=True)\n",
170 | " label_data.loc[0, 'target'] = 1\n",
171 | " label_list.append(label_data)\n",
172 | " history_list.append(history_data)\n",
173 | " else:\n",
174 | " label_data = group[group['expect_time'] == 0]\n",
175 | " history_data = group.drop(label_data.index)\n",
176 | "\n",
177 | " label_data['target'] = None\n",
178 | " label_list.append(label_data)\n",
179 | " history_list.append(history_data)\n",
180 | "\n",
181 | " return pd.concat(label_list, sort=False), pd.concat(history_list, sort=False)"
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": 7,
187 | "metadata": {
188 | "ExecuteTime": {
189 | "end_time": "2020-03-19T11:04:57.252940Z",
190 | "start_time": "2020-03-19T11:04:25.940141Z"
191 | },
192 | "scrolled": true
193 | },
194 | "outputs": [
195 | {
196 | "name": "stderr",
197 | "output_type": "stream",
198 | "text": [
199 | "100%|██████████| 35/35 [00:13<00:00, 2.53it/s]\n"
200 | ]
201 | }
202 | ],
203 | "source": [
204 | "df_actions = []\n",
205 | "for f in os.listdir(os.path.join(train_path, 'action')):\n",
206 | " date = f.split('.')[0].split('_')[1]\n",
207 | " df = pd.read_csv(os.path.join(train_path, 'action', f))\n",
208 | " df['date'] = date\n",
209 | " df['type'] = 'train'\n",
210 | " df_actions.append(df)\n",
211 | "\n",
212 | "for f in os.listdir(os.path.join(test_path, 'action')):\n",
213 | " date = f.split('.')[0].split('_')[1]\n",
214 | " df = pd.read_csv(os.path.join(test_path, 'action', f))\n",
215 | " df['date'] = date\n",
216 | " df['type'] = 'test'\n",
217 | " df_actions.append(df)\n",
218 | "\n",
219 | "res = Parallel(n_jobs=12)(delayed(read_feat)(df) for df in tqdm(df_actions))\n",
220 | "df_feature = [item[0] for item in res]\n",
221 | "df_history = [item[1] for item in res]\n",
222 | "\n",
223 | "df_feature = pd.concat(df_feature, sort=False)\n",
224 | "df_history = pd.concat(df_history, sort=False)"
225 | ]
226 | },
227 | {
228 | "cell_type": "code",
229 | "execution_count": 8,
230 | "metadata": {
231 | "ExecuteTime": {
232 | "end_time": "2020-03-19T11:04:57.814938Z",
233 | "start_time": "2020-03-19T11:04:57.254610Z"
234 | }
235 | },
236 | "outputs": [],
237 | "source": [
238 | "df_feature['group'] = df_feature['date'].astype(\n",
239 | " 'str') + df_feature['courier_id'].astype('str') + df_feature['wave_index'].astype('str')\n",
240 | "df_history['group'] = df_history['date'].astype(\n",
241 | " 'str') + df_history['courier_id'].astype('str') + df_history['wave_index'].astype('str')\n",
242 | "df_feature['target'] = df_feature['target'].astype('float')\n",
243 | "df_feature['id'] = range(df_feature.shape[0])"
244 | ]
245 | },
246 | {
247 | "cell_type": "code",
248 | "execution_count": 9,
249 | "metadata": {
250 | "ExecuteTime": {
251 | "end_time": "2020-03-19T11:04:57.831102Z",
252 | "start_time": "2020-03-19T11:04:57.816186Z"
253 | }
254 | },
255 | "outputs": [
256 | {
257 | "data": {
258 | "text/html": [
259 | "\n",
260 | "\n",
273 | "
\n",
274 | " \n",
275 | " \n",
276 | " | \n",
277 | " courier_id | \n",
278 | " wave_index | \n",
279 | " tracking_id | \n",
280 | " courier_wave_start_lng | \n",
281 | " courier_wave_start_lat | \n",
282 | " action_type | \n",
283 | " expect_time | \n",
284 | " date | \n",
285 | " type | \n",
286 | " target | \n",
287 | " group | \n",
288 | " id | \n",
289 | "
\n",
290 | " \n",
291 | " \n",
292 | " \n",
293 | " | 0 | \n",
294 | " 10007871 | \n",
295 | " 0 | \n",
296 | " 2100074550065333539 | \n",
297 | " 121.630997 | \n",
298 | " 39.142343 | \n",
299 | " DELIVERY | \n",
300 | " 1580528963 | \n",
301 | " 20200201 | \n",
302 | " train | \n",
303 | " 1.0 | \n",
304 | " 20200201100078710 | \n",
305 | " 0 | \n",
306 | "
\n",
307 | " \n",
308 | " | 1 | \n",
309 | " 10007871 | \n",
310 | " 0 | \n",
311 | " 2100074550779577850 | \n",
312 | " 121.630997 | \n",
313 | " 39.142343 | \n",
314 | " PICKUP | \n",
315 | " 1580529129 | \n",
316 | " 20200201 | \n",
317 | " train | \n",
318 | " 0.0 | \n",
319 | " 20200201100078710 | \n",
320 | " 1 | \n",
321 | "
\n",
322 | " \n",
323 | " | 2 | \n",
324 | " 10007871 | \n",
325 | " 0 | \n",
326 | " 2100074550779577850 | \n",
327 | " 121.630997 | \n",
328 | " 39.142343 | \n",
329 | " DELIVERY | \n",
330 | " 1580529444 | \n",
331 | " 20200201 | \n",
332 | " train | \n",
333 | " 0.0 | \n",
334 | " 20200201100078710 | \n",
335 | " 2 | \n",
336 | "
\n",
337 | " \n",
338 | " | 0 | \n",
339 | " 10007871 | \n",
340 | " 1 | \n",
341 | " 2100074555638285402 | \n",
342 | " 121.631208 | \n",
343 | " 39.142519 | \n",
344 | " PICKUP | \n",
345 | " 1580532225 | \n",
346 | " 20200201 | \n",
347 | " train | \n",
348 | " 1.0 | \n",
349 | " 20200201100078711 | \n",
350 | " 3 | \n",
351 | "
\n",
352 | " \n",
353 | " | 1 | \n",
354 | " 10007871 | \n",
355 | " 1 | \n",
356 | " 2100074554118800474 | \n",
357 | " 121.631208 | \n",
358 | " 39.142519 | \n",
359 | " PICKUP | \n",
360 | " 1580532227 | \n",
361 | " 20200201 | \n",
362 | " train | \n",
363 | " 0.0 | \n",
364 | " 20200201100078711 | \n",
365 | " 4 | \n",
366 | "
\n",
367 | " \n",
368 | "
\n",
369 | "
"
370 | ],
371 | "text/plain": [
372 | " courier_id wave_index tracking_id courier_wave_start_lng \\\n",
373 | "0 10007871 0 2100074550065333539 121.630997 \n",
374 | "1 10007871 0 2100074550779577850 121.630997 \n",
375 | "2 10007871 0 2100074550779577850 121.630997 \n",
376 | "0 10007871 1 2100074555638285402 121.631208 \n",
377 | "1 10007871 1 2100074554118800474 121.631208 \n",
378 | "\n",
379 | " courier_wave_start_lat action_type expect_time date type target \\\n",
380 | "0 39.142343 DELIVERY 1580528963 20200201 train 1.0 \n",
381 | "1 39.142343 PICKUP 1580529129 20200201 train 0.0 \n",
382 | "2 39.142343 DELIVERY 1580529444 20200201 train 0.0 \n",
383 | "0 39.142519 PICKUP 1580532225 20200201 train 1.0 \n",
384 | "1 39.142519 PICKUP 1580532227 20200201 train 0.0 \n",
385 | "\n",
386 | " group id \n",
387 | "0 20200201100078710 0 \n",
388 | "1 20200201100078710 1 \n",
389 | "2 20200201100078710 2 \n",
390 | "0 20200201100078711 3 \n",
391 | "1 20200201100078711 4 "
392 | ]
393 | },
394 | "execution_count": 9,
395 | "metadata": {},
396 | "output_type": "execute_result"
397 | }
398 | ],
399 | "source": [
400 | "df_feature.head()"
401 | ]
402 | },
403 | {
404 | "cell_type": "code",
405 | "execution_count": 10,
406 | "metadata": {
407 | "ExecuteTime": {
408 | "end_time": "2020-03-19T11:04:57.984646Z",
409 | "start_time": "2020-03-19T11:04:57.832199Z"
410 | }
411 | },
412 | "outputs": [],
413 | "source": [
414 | "df_history.to_pickle('./temp/action_history.plk')\n",
415 | "df_feature.to_pickle('./temp/base_feature.plk')"
416 | ]
417 | },
418 | {
419 | "cell_type": "code",
420 | "execution_count": null,
421 | "metadata": {},
422 | "outputs": [],
423 | "source": []
424 | }
425 | ],
426 | "metadata": {
427 | "kernelspec": {
428 | "display_name": "Python [conda env:dm] *",
429 | "language": "python",
430 | "name": "conda-env-dm-py"
431 | },
432 | "language_info": {
433 | "codemirror_mode": {
434 | "name": "ipython",
435 | "version": 3
436 | },
437 | "file_extension": ".py",
438 | "mimetype": "text/x-python",
439 | "name": "python",
440 | "nbconvert_exporter": "python",
441 | "pygments_lexer": "ipython3",
442 | "version": "3.6.9"
443 | }
444 | },
445 | "nbformat": 4,
446 | "nbformat_minor": 2
447 | }
448 |
--------------------------------------------------------------------------------
/competitions/pingan_baoxian/model.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "ExecuteTime": {
8 | "end_time": "2021-04-28T14:23:46.655980Z",
9 | "start_time": "2021-04-28T14:23:45.926568Z"
10 | },
11 | "id": "4AA5668B2F574911B6C76E41E143BDEF",
12 | "jupyter": {},
13 | "scrolled": false,
14 | "slideshow": {
15 | "slide_type": "slide"
16 | },
17 | "tags": []
18 | },
19 | "outputs": [],
20 | "source": [
21 | "import pandas as pd\n",
22 | "from tqdm import tqdm\n",
23 | "import warnings\n",
24 | "import gc\n",
25 | "import os\n",
26 | "import lightgbm as lgb\n",
27 | "from sklearn.model_selection import StratifiedKFold\n",
28 | "from sklearn.preprocessing import LabelEncoder\n",
29 | "from gensim.models import Word2Vec\n",
30 | "from collections import OrderedDict\n",
31 | "from sklearn.feature_extraction.text import TfidfVectorizer\n",
32 | "from sklearn.metrics import roc_auc_score\n",
33 | "import time\n",
34 | "from itertools import combinations\n",
35 | "\n",
36 | "pd.set_option('display.max_columns', None)\n",
37 | "pd.set_option('display.max_rows', None)\n",
38 | "\n",
39 | "warnings.filterwarnings('ignore')"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": null,
45 | "metadata": {
46 | "ExecuteTime": {
47 | "end_time": "2021-04-28T14:23:46.659536Z",
48 | "start_time": "2021-04-28T14:23:46.657559Z"
49 | },
50 | "id": "760ABA20B7B7478C82CC9C50D5C81CB9",
51 | "jupyter": {},
52 | "notebookId": "60881e4fbb1fc90018675604",
53 | "scrolled": false,
54 | "slideshow": {
55 | "slide_type": "slide"
56 | },
57 | "tags": []
58 | },
59 | "outputs": [],
60 | "source": [
61 | "seed = 2021"
62 | ]
63 | },
64 | {
65 | "cell_type": "code",
66 | "execution_count": null,
67 | "metadata": {
68 | "ExecuteTime": {
69 | "end_time": "2021-04-28T14:23:51.465721Z",
70 | "start_time": "2021-04-28T14:23:46.660592Z"
71 | },
72 | "id": "7B41CECDA2B7418F862AEF5346103031",
73 | "jupyter": {},
74 | "scrolled": false,
75 | "slideshow": {
76 | "slide_type": "slide"
77 | },
78 | "tags": []
79 | },
80 | "outputs": [],
81 | "source": [
82 | "df_train = pd.read_csv('/home/mw/input/pre8881/train.csv')\n",
83 | "df_test = pd.read_csv('/home/mw/input/pretest_a3048/test_a.csv')"
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "execution_count": null,
89 | "metadata": {
90 | "ExecuteTime": {
91 | "end_time": "2021-04-28T14:23:51.475018Z",
92 | "start_time": "2021-04-28T14:23:51.467257Z"
93 | },
94 | "id": "1041E7F5204B43DB885E1EADAA56669E",
95 | "jupyter": {},
96 | "scrolled": false,
97 | "slideshow": {
98 | "slide_type": "slide"
99 | },
100 | "tags": []
101 | },
102 | "outputs": [],
103 | "source": [
104 | "df_train.shape, df_test.shape"
105 | ]
106 | },
107 | {
108 | "cell_type": "code",
109 | "execution_count": null,
110 | "metadata": {
111 | "ExecuteTime": {
112 | "end_time": "2021-04-28T14:23:52.049755Z",
113 | "start_time": "2021-04-28T14:23:51.476567Z"
114 | },
115 | "id": "0164F7942CA04865822AE48A5F351390",
116 | "jupyter": {},
117 | "notebookId": "60881e4fbb1fc90018675604",
118 | "scrolled": false,
119 | "slideshow": {
120 | "slide_type": "slide"
121 | },
122 | "tags": []
123 | },
124 | "outputs": [],
125 | "source": [
126 | "df_feature = df_train.append(df_test, sort=False)"
127 | ]
128 | },
129 | {
130 | "cell_type": "code",
131 | "execution_count": null,
132 | "metadata": {
133 | "ExecuteTime": {
134 | "end_time": "2021-04-28T14:23:52.097660Z",
135 | "start_time": "2021-04-28T14:23:52.051164Z"
136 | },
137 | "id": "769C1C13ADB244DCB0CB1D1C4A5DF522",
138 | "jupyter": {},
139 | "scrolled": false,
140 | "slideshow": {
141 | "slide_type": "slide"
142 | },
143 | "tags": []
144 | },
145 | "outputs": [],
146 | "source": [
147 | "df_feature.head()"
148 | ]
149 | },
150 | {
151 | "cell_type": "code",
152 | "execution_count": null,
153 | "metadata": {
154 | "ExecuteTime": {
155 | "end_time": "2021-04-28T14:23:52.107115Z",
156 | "start_time": "2021-04-28T14:23:52.100602Z"
157 | }
158 | },
159 | "outputs": [],
160 | "source": [
161 | "df_feature['tp_ratio'] = df_feature['nprem_tp'] / df_feature['si_tp']"
162 | ]
163 | },
164 | {
165 | "cell_type": "code",
166 | "execution_count": null,
167 | "metadata": {
168 | "ExecuteTime": {
169 | "end_time": "2021-04-28T14:24:19.500082Z",
170 | "start_time": "2021-04-28T14:23:52.108544Z"
171 | }
172 | },
173 | "outputs": [],
174 | "source": [
175 | "# 计数\n",
176 | "for f in [['dpt'], ['client_no'], ['trademark_cn'], ['brand_cn'], ['make_cn'], ['series']]:\n",
177 | " df_temp = df_feature.groupby(f).size().reset_index()\n",
178 | " df_temp.columns = f + ['{}_count'.format('_'.join(f))]\n",
179 | " df_feature = df_feature.merge(df_temp, how='left')"
180 | ]
181 | },
182 | {
183 | "cell_type": "code",
184 | "execution_count": null,
185 | "metadata": {
186 | "ExecuteTime": {
187 | "end_time": "2021-04-28T14:24:19.912533Z",
188 | "start_time": "2021-04-28T14:24:19.501393Z"
189 | },
190 | "id": "2C0A8CBFDC414C8993D60075871CDD30",
191 | "jupyter": {},
192 | "notebookId": "60881e4fbb1fc90018675604",
193 | "scrolled": false,
194 | "slideshow": {
195 | "slide_type": "slide"
196 | },
197 | "tags": []
198 | },
199 | "outputs": [],
200 | "source": [
201 | "df_feature['birth_month'] = df_feature['birth_month'].apply(\n",
202 | " lambda x: int(x[:-1]) if type(x) != float else 0)"
203 | ]
204 | },
205 | {
206 | "cell_type": "code",
207 | "execution_count": null,
208 | "metadata": {
209 | "ExecuteTime": {
210 | "end_time": "2021-04-28T14:24:19.918591Z",
211 | "start_time": "2021-04-28T14:24:19.913888Z"
212 | }
213 | },
214 | "outputs": [],
215 | "source": [
216 | "# 简单统计\n",
217 | "def stat(df, df_merge, group_by, agg):\n",
218 | " group = df.groupby(group_by).agg(agg)\n",
219 | "\n",
220 | " columns = []\n",
221 | " for on, methods in agg.items():\n",
222 | " for method in methods:\n",
223 | " columns.append('{}_{}_{}'.format('_'.join(group_by), on, method))\n",
224 | " group.columns = columns\n",
225 | " group.reset_index(inplace=True)\n",
226 | " df_merge = df_merge.merge(group, on=group_by, how='left')\n",
227 | "\n",
228 | " del (group)\n",
229 | " gc.collect()\n",
230 | "\n",
231 | " return df_merge\n",
232 | "\n",
233 | "\n",
234 | "def statis_feat(df_know, df_unknow):\n",
235 | " for f in tqdm(['p1_census_register', 'dpt']):\n",
236 | " df_unknow = stat(df_know, df_unknow, [f], {\n",
237 | " 'y1_is_purchase': ['mean']})\n",
238 | "\n",
239 | " return df_unknow"
240 | ]
241 | },
242 | {
243 | "cell_type": "code",
244 | "execution_count": null,
245 | "metadata": {
246 | "ExecuteTime": {
247 | "end_time": "2021-04-28T14:24:32.066233Z",
248 | "start_time": "2021-04-28T14:24:19.919591Z"
249 | },
250 | "code_folding": [
251 | 1
252 | ]
253 | },
254 | "outputs": [],
255 | "source": [
256 | "# 5折交叉\n",
257 | "df_train = df_feature[~df_feature['y1_is_purchase'].isnull()]\n",
258 | "df_train = df_train.reset_index(drop=True)\n",
259 | "df_test = df_feature[df_feature['y1_is_purchase'].isnull()]\n",
260 | "\n",
261 | "df_stas_feat = None\n",
262 | "kfold = StratifiedKFold(n_splits=5, random_state=seed, shuffle=True)\n",
263 | "for train_index, val_index in kfold.split(df_train, df_train['y1_is_purchase']):\n",
264 | " df_fold_train = df_train.iloc[train_index]\n",
265 | " df_fold_val = df_train.iloc[val_index]\n",
266 | "\n",
267 | " df_fold_val = statis_feat(df_fold_train, df_fold_val)\n",
268 | " df_stas_feat = pd.concat([df_stas_feat, df_fold_val], axis=0)\n",
269 | "\n",
270 | " del(df_fold_train)\n",
271 | " del(df_fold_val)\n",
272 | " gc.collect()\n",
273 | "\n",
274 | "df_test = statis_feat(df_train, df_test)\n",
275 | "df_feature = pd.concat([df_stas_feat, df_test], axis=0)\n",
276 | "\n",
277 | "del(df_stas_feat)\n",
278 | "del(df_train)\n",
279 | "del(df_test)\n",
280 | "gc.collect()"
281 | ]
282 | },
283 | {
284 | "cell_type": "code",
285 | "execution_count": null,
286 | "metadata": {
287 | "ExecuteTime": {
288 | "end_time": "2021-04-28T14:24:32.113010Z",
289 | "start_time": "2021-04-28T14:24:32.067408Z"
290 | }
291 | },
292 | "outputs": [],
293 | "source": [
294 | "df_feature.head()"
295 | ]
296 | },
297 | {
298 | "cell_type": "markdown",
299 | "metadata": {},
300 | "source": [
301 | "# 模型训练"
302 | ]
303 | },
304 | {
305 | "cell_type": "code",
306 | "execution_count": null,
307 | "metadata": {
308 | "ExecuteTime": {
309 | "end_time": "2021-04-28T14:24:39.717353Z",
310 | "start_time": "2021-04-28T14:24:32.114095Z"
311 | },
312 | "id": "EF950A935E6B41AB8F1E0CEC03DF8CCA",
313 | "jupyter": {},
314 | "notebookId": "60881e4fbb1fc90018675604",
315 | "scrolled": false,
316 | "slideshow": {
317 | "slide_type": "slide"
318 | },
319 | "tags": []
320 | },
321 | "outputs": [],
322 | "source": [
323 | "for f in list(df_feature.select_dtypes('object')):\n",
324 | " if f in ['carid', 'regdate']:\n",
325 | " continue\n",
326 | " le = LabelEncoder()\n",
327 | " df_feature[f] = le.fit_transform(\n",
328 | " df_feature[f].astype('str')).astype('int')"
329 | ]
330 | },
331 | {
332 | "cell_type": "code",
333 | "execution_count": null,
334 | "metadata": {
335 | "ExecuteTime": {
336 | "end_time": "2021-04-28T14:24:40.023172Z",
337 | "start_time": "2021-04-28T14:24:39.718886Z"
338 | },
339 | "id": "784C1D25FDC94691AD7C71DD61336944",
340 | "jupyter": {},
341 | "notebookId": "60881e4fbb1fc90018675604",
342 | "scrolled": false,
343 | "slideshow": {
344 | "slide_type": "slide"
345 | },
346 | "tags": []
347 | },
348 | "outputs": [],
349 | "source": [
350 | "df_train = df_feature[df_feature['y1_is_purchase'].notnull()]\n",
351 | "df_test = df_feature[df_feature['y1_is_purchase'].isnull()]"
352 | ]
353 | },
354 | {
355 | "cell_type": "code",
356 | "execution_count": null,
357 | "metadata": {
358 | "ExecuteTime": {
359 | "end_time": "2021-04-28T14:40:28.582706Z",
360 | "start_time": "2021-04-28T14:24:40.024569Z"
361 | },
362 | "id": "B5D42123DC18447D8817A94ADD49B2AB",
363 | "jupyter": {},
364 | "notebookId": "60881e4fbb1fc90018675604",
365 | "scrolled": true,
366 | "slideshow": {
367 | "slide_type": "slide"
368 | },
369 | "tags": []
370 | },
371 | "outputs": [],
372 | "source": [
373 | "ycol = 'y1_is_purchase'\n",
374 | "feature_names = list(\n",
375 | " filter(lambda x: x not in [ycol, 'regdate', 'carid'], df_train.columns))\n",
376 | "\n",
377 | "model = lgb.LGBMClassifier(num_leaves=64,\n",
378 | " max_depth=10,\n",
379 | " learning_rate=0.01,\n",
380 | " n_estimators=10000,\n",
381 | " subsample=0.8,\n",
382 | " feature_fraction=0.8,\n",
383 | " reg_alpha=0.5,\n",
384 | " reg_lambda=0.5,\n",
385 | " random_state=seed,\n",
386 | " metric=None)\n",
387 | "\n",
388 | "oof = []\n",
389 | "prediction = df_test[['carid']]\n",
390 | "prediction['label'] = 0\n",
391 | "df_importance_list = []\n",
392 | "\n",
393 | "kfold = StratifiedKFold(n_splits=5, random_state=seed, shuffle=True)\n",
394 | "for fold_id, (trn_idx, val_idx) in enumerate(kfold.split(\n",
395 | " df_train[feature_names], df_train[ycol])):\n",
396 | " X_train = df_train.iloc[trn_idx][feature_names]\n",
397 | " Y_train = df_train.iloc[trn_idx][ycol]\n",
398 | "\n",
399 | " X_val = df_train.iloc[val_idx][feature_names]\n",
400 | " Y_val = df_train.iloc[val_idx][ycol]\n",
401 | "\n",
402 | " print('\\nFold_{} Training ================================\\n'.format(fold_id+1))\n",
403 | "\n",
404 | " lgb_model = model.fit(X_train,\n",
405 | " Y_train,\n",
406 | " eval_names=['valid'],\n",
407 | " eval_set=[(X_val, Y_val)],\n",
408 | " verbose=500,\n",
409 | " eval_metric='auc',\n",
410 | " early_stopping_rounds=50)\n",
411 | "\n",
412 | " pred_val = lgb_model.predict_proba(\n",
413 | " X_val, num_iteration=lgb_model.best_iteration_)[:, 1]\n",
414 | " df_oof = df_train.iloc[val_idx][[\n",
415 | " 'carid', ycol]].copy()\n",
416 | " df_oof['pred'] = pred_val\n",
417 | " oof.append(df_oof)\n",
418 | "\n",
419 | " pred_test = lgb_model.predict_proba(\n",
420 | " df_test[feature_names], num_iteration=lgb_model.best_iteration_)[:, 1]\n",
421 | " prediction['label'] += pred_test / 5\n",
422 | "\n",
423 | " df_importance = pd.DataFrame({\n",
424 | " 'column': feature_names,\n",
425 | " 'importance': lgb_model.feature_importances_,\n",
426 | " })\n",
427 | " df_importance_list.append(df_importance)\n",
428 | "\n",
429 | " del lgb_model, pred_val, pred_test, X_train, Y_train, X_val, Y_val\n",
430 | " gc.collect()"
431 | ]
432 | },
433 | {
434 | "cell_type": "code",
435 | "execution_count": null,
436 | "metadata": {
437 | "ExecuteTime": {
438 | "end_time": "2021-04-28T14:40:28.597051Z",
439 | "start_time": "2021-04-28T14:40:28.583851Z"
440 | },
441 | "id": "25F80E8277D0459495CFBA4A007964EB",
442 | "jupyter": {},
443 | "notebookId": "60881e4fbb1fc90018675604",
444 | "scrolled": true,
445 | "slideshow": {
446 | "slide_type": "slide"
447 | },
448 | "tags": []
449 | },
450 | "outputs": [],
451 | "source": [
452 | "df_importance = pd.concat(df_importance_list)\n",
453 | "df_importance = df_importance.groupby(['column'])['importance'].agg(\n",
454 | " 'mean').sort_values(ascending=False).reset_index()\n",
455 | "df_importance"
456 | ]
457 | },
458 | {
459 | "cell_type": "code",
460 | "execution_count": null,
461 | "metadata": {
462 | "ExecuteTime": {
463 | "end_time": "2021-04-28T14:40:28.862348Z",
464 | "start_time": "2021-04-28T14:40:28.598023Z"
465 | },
466 | "id": "F9ABEBA707364ED3834E0E074CA2F989",
467 | "jupyter": {},
468 | "notebookId": "60881e4fbb1fc90018675604",
469 | "scrolled": false,
470 | "slideshow": {
471 | "slide_type": "slide"
472 | },
473 | "tags": []
474 | },
475 | "outputs": [],
476 | "source": [
477 | "df_oof = pd.concat(oof)\n",
478 | "score = roc_auc_score(df_oof['y1_is_purchase'], df_oof['pred'])\n",
479 | "score"
480 | ]
481 | },
482 | {
483 | "cell_type": "code",
484 | "execution_count": null,
485 | "metadata": {
486 | "ExecuteTime": {
487 | "end_time": "2021-04-28T14:40:28.865884Z",
488 | "start_time": "2021-04-28T14:40:28.863367Z"
489 | }
490 | },
491 | "outputs": [],
492 | "source": [
493 | "score"
494 | ]
495 | },
496 | {
497 | "cell_type": "code",
498 | "execution_count": null,
499 | "metadata": {
500 | "ExecuteTime": {
501 | "end_time": "2021-04-28T14:54:44.470340Z",
502 | "start_time": "2021-04-28T14:54:44.455375Z"
503 | },
504 | "id": "FE1D582019674017823D0BA541210420",
505 | "jupyter": {},
506 | "notebookId": "60881e4fbb1fc90018675604",
507 | "scrolled": false,
508 | "slideshow": {
509 | "slide_type": "slide"
510 | },
511 | "tags": []
512 | },
513 | "outputs": [],
514 | "source": [
515 | "df_oof.head(20)"
516 | ]
517 | },
518 | {
519 | "cell_type": "code",
520 | "execution_count": null,
521 | "metadata": {
522 | "ExecuteTime": {
523 | "end_time": "2021-04-28T14:40:28.925397Z",
524 | "start_time": "2021-04-28T14:40:28.875204Z"
525 | }
526 | },
527 | "outputs": [],
528 | "source": [
529 | "prediction.head()"
530 | ]
531 | },
532 | {
533 | "cell_type": "code",
534 | "execution_count": null,
535 | "metadata": {
536 | "ExecuteTime": {
537 | "end_time": "2021-04-28T14:40:29.825598Z",
538 | "start_time": "2021-04-28T14:40:28.926460Z"
539 | }
540 | },
541 | "outputs": [],
542 | "source": [
543 | "os.makedirs('sub', exist_ok=True)\n",
544 | "prediction.to_csv(f'sub/{score}.csv', index=False)\n",
545 | "prediction.to_csv(f'sub/sub.csv', index=False)"
546 | ]
547 | },
548 | {
549 | "cell_type": "code",
550 | "execution_count": null,
551 | "metadata": {},
552 | "outputs": [],
553 | "source": []
554 | }
555 | ],
556 | "metadata": {
557 | "kernelspec": {
558 | "display_name": "Python [conda env:dm] *",
559 | "language": "python",
560 | "name": "conda-env-dm-py"
561 | },
562 | "language_info": {
563 | "codemirror_mode": {
564 | "name": "ipython",
565 | "version": 3
566 | },
567 | "file_extension": ".py",
568 | "mimetype": "text/x-python",
569 | "name": "python",
570 | "nbconvert_exporter": "python",
571 | "pygments_lexer": "ipython3",
572 | "version": "3.6.9"
573 | }
574 | },
575 | "nbformat": 4,
576 | "nbformat_minor": 1
577 | }
578 |
--------------------------------------------------------------------------------