├── README.md └── baseline.py /README.md: -------------------------------------------------------------------------------- 1 | # 厦门国际银行金融营销建模大赛Baseline 2 | 3 | ### 成绩 4 | 5 | 线上:0.46343(拉格朗日亲测反馈),本人因提交次数宝贵没有测试本baseline。 6 | 7 | 该baseline是我目前最好分数的阉割版,所谓阉割就是该baseline只使用了官方提供的aum点资产数据进行特征工程,所以大家只要拓展特征维度很容易上分,甚至很快就可以超越我。 8 | 9 | ### 心路历程 10 | 11 | 拿到题目,无论什么比赛必须得理解清楚赛题背景和目的,其次是搞清楚label表示的含义。因此我在交流群问的第一个问题就是label的含义,大佬们也是非常热情给出了官方的解答,-1 下降,0维稳, 1上升。 12 | 13 | 结合题目的目的是预测客户的资金变动,自然而然想到最重要的数据就是aum点资产数据。因此自己的第一个baseline也是基于此完成的。 14 | 15 | 所谓变动,即变化,因此资产的特征必须围绕其展开。有季度内的变化和季度间的变化,因为自己的一个小失误发现当使用3,4季度的数据预测3季度的标签的时候,Kappa值非常高(穿越),这其实也就说明了季度间的特征是非常重要的,同时也表明很多用户的标签的来源是季度交替间产生的资产流动变化导致的。所以仅使用当季度的资产变化并不能有很好的效果,这个信息对打标三季度未打标的用户有非常大的作用。 16 | 17 | ### 可以尝试的方向 18 | 19 | 1:对包含连续两季度点资产数据的用户,使用上一季度的标签对未打标的用户进行预测,选取置信度高的用户参与训练;(未尝试) 20 | 21 | 2:使用加类别权重的训练方式进行训练(已尝试) 22 | 23 | 3:挖掘规则(尝试中) 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /baseline.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | import warnings 4 | import math 5 | import os 6 | import lightgbm as lgb 7 | from sklearn.model_selection import KFold,StratifiedKFold 8 | from sklearn.metrics import roc_auc_score,f1_score,cohen_kappa_score 9 | from sklearn.preprocessing import LabelEncoder 10 | warnings.filterwarnings('ignore') 11 | 12 | def gen_pesudo_data(): 13 | tmp_aum = pd.read_csv('./train/aum/aum_m10.csv') 14 | tmp_aum[['X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X7', 'X8']] = np.nan 15 | tmp_aum.to_csv('./train/aum/aum_m6.csv',index=None) 16 | tmp_cunkuan = pd.read_csv('./train/cunkuan/cunkuan_m10.csv') 17 | tmp_cunkuan[['C1', 'C2']] = np.nan 18 | tmp_cunkuan.to_csv('./train/cunkuan/cunkuan_m6.csv',index=None) 19 | tmp_beh = pd.read_csv('./train/behavior/behavior_m9.csv') 20 | tmp_beh[['B1', 'B2','B3', 'B4','B5', 'B6','B7']] = np.nan 21 | tmp_beh.to_csv('./train/behavior/behavior_m6.csv',index=None) 22 | 23 | class GetData(object): 24 | 25 | def load_aum_data(self,path='./',m=1,iftrain=True): 26 | data = pd.read_csv(path + 'aum_m{}.csv'.format(m) ) 27 | m = m if iftrain else m+12 28 | data.columns = ['cust_no', 'X1_{}'.format(m), 'X2_{}'.format(m), 'X3_{}'.format(m), 'X4_{}'.format(m), 29 | 'X5_{}'.format(m), 'X6_{}'.format(m), 'X7_{}'.format(m), 'X8_{}'.format(m)] 30 | return data 31 | 32 | def load_cunkuan_data(self,path='./',m=1,iftrain=True): 33 | data = pd.read_csv(path + 'cunkuan_m{}.csv'.format(m) ) 34 | data['C3'] = data['C1'] / (1+data['C2']) 35 | m = m if iftrain else m+12 36 | data.columns = ['cust_no', 'C1_{}'.format(m), 'C2_{}'.format(m), 'C3_{}'.format(m)] 37 | return data 38 | 39 | def load_beh_data(self,path='./',m=1,iftrain=True): 40 | data = pd.read_csv(path + 'behavior_m{}.csv'.format(m) ) 41 | m = m if iftrain else m+12 42 | if m in [3,6,9,12,15]: 43 | data['x'] = pd.to_datetime(data['B6']) + pd.tseries.offsets.QuarterEnd() 44 | data['B6'] = (pd.to_datetime(data['x']) - pd.to_datetime(data['B6'])).dt.days # 该季度与最后一次交易时间距离的天数 45 | data = data.drop('x',axis=1) 46 | data.columns = ['cust_no', 'B1_{}'.format(m), 'B2_{}'.format(m), 'B3_{}'.format(m), 'B4_{}'.format(m), 47 | 'B5_{}'.format(m), 'B6_{}'.format(m),'B7_{}'.format(m)] 48 | else: 49 | data['B6'] = np.nan 50 | data['B7'] = np.nan 51 | data.columns = ['cust_no', 'B1_{}'.format(m), 'B2_{}'.format(m), 'B3_{}'.format(m), 'B4_{}'.format(m), 52 | 'B5_{}'.format(m),'B6_{}'.format(m),'B7_{}'.format(m)] 53 | return data 54 | def get_data(data_file='',param=''): 55 | df_aum = pd.DataFrame() 56 | for mon in range(3,0,-1): 57 | df = getattr(GetData(), param)(path='./test/{}/'.format(data_file),m= mon,iftrain=False) 58 | if len(df_aum): 59 | df_aum = df_aum.merge(df,on='cust_no',how='left') 60 | else: 61 | df_aum = df 62 | for mon in range(12,5,-1): 63 | df = getattr(GetData(), param)(path='./train/{}/'.format(data_file),m= mon,iftrain=True) 64 | df_aum = df_aum.merge(df,on='cust_no',how='left') 65 | return df_aum 66 | 67 | def get_quater_data(df,data_type = '',colhead='X',num_col=3,quater=3): 68 | if data_type == 'aum': 69 | columns = ['cust_no'] 70 | rename_col = ['cust_no'] 71 | for i in range(1,num_col + 1): 72 | for j in range(4): 73 | columns.append('{}{}_{}'.format(colhead,i,quater*3-j)) 74 | rename_col.append('{}{}_{}'.format(colhead,i,4 - j)) 75 | tmp = df[columns] 76 | tmp.columns = rename_col 77 | return tmp 78 | def statistics_feature_aum(df): 79 | 80 | for i in range(1,9): 81 | df['X{}_mean'.format(i)] = df[['X{}_4'.format(i),'X{}_3'.format(i),'X{}_2'.format(i)]].mean(axis=1) 82 | df['X{}_std'.format(i)] = df[['X{}_4'.format(i),'X{}_3'.format(i),'X{}_2'.format(i)]].std(axis=1) 83 | df['X{}_sum'.format(i)] = df[['X{}_4'.format(i),'X{}_3'.format(i),'X{}_2'.format(i)]].sum(axis=1) 84 | df['X{}_max'.format(i)] = df[['X{}_4'.format(i),'X{}_3'.format(i),'X{}_2'.format(i)]].max(axis=1) 85 | df['X{}_min'.format(i)] = df[['X{}_4'.format(i),'X{}_3'.format(i),'X{}_2'.format(i)]].min(axis=1) 86 | return df 87 | 88 | def aum_feat_engineering(df): 89 | df['X3_sub1'] = df['X3_4'] - df['X3_3'] 90 | df['X3_sub2'] = df['X3_4'] - df['X3_2'] 91 | df['X3_sub3'] = df['X3_3'] - df['X3_2'] 92 | df['X3_sub4'] = df['X3_2'] - df['X3_1'] 93 | df['X3_sub5'] = df['X3_3'] - df['X3_1'] 94 | 95 | df['X1_sub1'] = df['X1_4'] - df['X1_3'] 96 | df['X1_sub2'] = df['X1_4'] - df['X1_2'] 97 | df['X1_sub3'] = df['X1_4'] - df['X1_1'] 98 | df['X1_sub4'] = df['X1_4'] - df['X1_1'] 99 | 100 | df['X7_sub1'] = df['X7_4'] - df['X7_3'] 101 | df['X7_sub2'] = df['X7_4'] - df['X7_2'] 102 | df['X7_sub3'] = df['X7_4'] - df['X7_1'] 103 | 104 | df['sum_X123_4'] = df[['X1_4','X2_4','X3_4']].sum(axis=1) 105 | df['sum_X123_3'] = df[['X1_3','X2_3','X3_3']].sum(axis=1) 106 | df['sum_X123_2'] = df[['X1_2','X2_2','X3_2']].sum(axis=1) 107 | df['sum_X123_1'] = df[['X1_1','X2_1','X3_1']].sum(axis=1) 108 | 109 | df['X123_sub1'] = df['sum_X123_4'] - df['sum_X123_3'] 110 | df['X123_sub2'] = df['sum_X123_4'] - df['sum_X123_2'] 111 | df['X123_sub3'] = df['sum_X123_4'] - df['sum_X123_1'] 112 | 113 | df['X7_div_X123_sum'] = df['X7_4'] / (1e-3 + df['sum_X123_4']) 114 | df['X7_sub_X123_sum'] = df['X7_4'] - df['sum_X123_4'] 115 | return df 116 | 117 | def eval_error(pred,train_set): 118 | 119 | labels = train_set.get_label() 120 | pred = pred.reshape((3,int(len(pred)/3))).T 121 | y_pred = pred.argmax(axis=1) 122 | score = cohen_kappa_score(labels,y_pred) 123 | return 'kappa_score',score,True 124 | 125 | 126 | def model_train(df,trainlabel,cate_cols,test_,feature,num_class): 127 | ''' 128 | @param df: 训练数据 DataFrame 129 | @param trainlabel:训练标签 string eg. 'label' 130 | @param cate_cols: 类别变量名 list eg. ['col1','col2'...] 131 | @param test_ : 测试数据 DataFrame 132 | @param feature :所有训练特征 list eg. ['feat1','feat2'...] 133 | 134 | @return sub_preds: 预测数据 135 | 136 | ''' 137 | train_= df.copy() 138 | auc = [] 139 | n_splits = 5 140 | oof_lgb = np.zeros([len(train_),num_class]) 141 | folds = KFold(n_splits=n_splits, shuffle=True, random_state=2019) 142 | stratifiedKfold = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=2019) 143 | sub_preds = np.zeros([test_.shape[0],num_class]) 144 | sub_preds1 = np.zeros([test_.shape[0],n_splits]) 145 | use_cart = True 146 | cate_cols = cate_cols 147 | label = trainlabel 148 | pred = list(feature) 149 | params = { 150 | 'learning_rate': 0.02, 151 | 'boosting_type': 'gbdt', 152 | 'objective':'multiclass', 153 | #'metric':'multi-error', 154 | 'num_class':num_class, 155 | 'num_leaves':60, 156 | 'feature_fraction': 0.8, 157 | 'bagging_fraction': 0.8, 158 | 'bagging_freq': 5, 159 | 'seed': 1, 160 | 'bagging_seed': 1, 161 | 'feature_fraction_seed': 5, 162 | 'min_data_in_leaf': 20, 163 | 'max_depth':-1, 164 | 'nthread': 8, 165 | 'verbose': 1, 166 | # 'is_unbalanace':True, 167 | # 'lambda_l1': 0.4, 168 | # 'lambda_l2': 0.5, 169 | 'device': 'gpu' 170 | } 171 | for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_[pred],train_[[label]]), start=1): 172 | print('the %s training start ...'%n_fold) 173 | 174 | train_x, train_y,train_weight = train_[pred].iloc[train_idx], train_[[label]].iloc[train_idx],train_[['weight']].iloc[train_idx] 175 | valid_x, valid_y,valid_weight = train_[pred].iloc[valid_idx], train_[[label]].iloc[valid_idx],train_[['weight']].iloc[valid_idx] 176 | #x = train_['bid'].iloc[valid_idx] 177 | print(train_y.shape) 178 | if use_cart: 179 | dtrain = lgb.Dataset(train_x, label=train_y, categorical_feature=cate_cols,weight=train_weight.values.flatten(order='F')) 180 | dvalid = lgb.Dataset(valid_x, label=valid_y, categorical_feature=cate_cols) 181 | #dvalid1 = lgb.Dataset(test_[pred], label=test_[['label']], categorical_feature=cate_cols) 182 | 183 | else: 184 | dtrain = lgb.Dataset(train_x, label= train_y) 185 | dvalid = lgb.Dataset(valid_x, label= valid_y) 186 | 187 | clf = lgb.train( 188 | params=params, 189 | train_set=dtrain, 190 | num_boost_round=1000, 191 | valid_sets=[dvalid], 192 | # early_stopping_rounds = 100, 193 | verbose_eval=100 194 | ,feval=eval_error 195 | ) 196 | 197 | sub_preds += clf.predict(test_[pred].values,num_iteration=1000)/ folds.n_splits 198 | sub_preds1[:,n_fold-1] = clf.predict(test_[pred].values,num_iteration=400).argmax(axis=1) 199 | train_pred = clf.predict(valid_x,num_iteration=clf.best_iteration) 200 | y_pred = train_pred.argmax(axis=1) 201 | oof_lgb[valid_idx] = train_pred 202 | #print('MEAN AUC:',np.mean(auc)) 203 | 204 | return sub_preds,oof_lgb,clf,sub_preds1 205 | 206 | def main(): 207 | gen_pesudo_data() 208 | df_aum = get_data(data_file='aum',param='load_aum_data') 209 | df_aum_test = get_quater_data(df_aum,data_type='aum',quater=5,colhead='X',num_col=8) 210 | df_aum_test_Q4 = get_quater_data(df_aum,data_type='aum',quater=4,colhead='X',num_col=8) 211 | df_aum_test_Q3 = get_quater_data(df_aum,data_type='aum',quater=3,colhead='X',num_col=8) 212 | 213 | df_aum_test = aum_feat_engineering(df_aum_test) 214 | df_aum_test_Q4 = aum_feat_engineering(df_aum_test_Q4) 215 | df_aum_test_Q3 = aum_feat_engineering(df_aum_test_Q3) 216 | df_aum_test = statistics_feature_aum(df_aum_test) 217 | df_aum_test_Q4 = statistics_feature_aum(df_aum_test_Q4) 218 | df_aum_test_Q3 = statistics_feature_aum(df_aum_test_Q3) 219 | 220 | cust_avli_Q3 = pd.read_csv('./train/avli/cust_avli_Q3.csv') 221 | cust_avli_Q4 = pd.read_csv('./train/avli/cust_avli_Q4.csv') 222 | cust_avli_test = pd.read_csv('./test/avli/cust_avli_Q1.csv') 223 | 224 | cust_avli_Q3 = df_aum_test_Q3.loc[df_aum_test_Q3.cust_no.isin(cust_avli_Q3.cust_no.values)] 225 | cust_avli_Q4 = df_aum_test_Q4.loc[df_aum_test_Q4.cust_no.isin(cust_avli_Q4.cust_no.values)] 226 | cust_avli_test = df_aum_test.loc[df_aum_test.cust_no.isin(cust_avli_test.cust_no.values)] 227 | 228 | label_Q3 = pd.read_csv('./train/y/y_Q3_3.csv') 229 | label_Q4 = pd.read_csv('./train/y/y_Q4_3.csv') 230 | label_Q3['label'] = label_Q3.label + 1 231 | label_Q4['label'] = label_Q4.label + 1 232 | cust_avli_Q3 = cust_avli_Q3.merge(label_Q3,on='cust_no',how='left') 233 | cust_avli_Q4 = cust_avli_Q4.merge(label_Q4,on='cust_no',how='left') 234 | 235 | cust_avli_Q3['pre_label'] = np.nan 236 | last_label = label_Q3[['cust_no','label']] 237 | last_label.columns = ['cust_no','pre_label'] 238 | cust_avli_Q4 = cust_avli_Q4.merge(last_label,on='cust_no',how='left') 239 | 240 | last_label = label_Q4[['cust_no','label']] 241 | last_label.columns = ['cust_no','pre_label'] 242 | cust_avli_test = cust_avli_test.merge(last_label,on='cust_no',how='left') 243 | 244 | Train_data = pd.concat([cust_avli_Q3,cust_avli_Q4],axis=0) 245 | Train_data['weight'] = Train_data.label.map({1:1.03,2:0.58,0:1}) 246 | feature = Train_data.drop(['cust_no','label','weight'],axis=1).columns 247 | sub_preds,oof_lgb,clf,sub_preds1 = model_train(Train_data,trainlabel='label',cate_cols=[],test_=cust_avli_test,feature=feature,num_class=3) 248 | cust_avli_test['label'] = sub_preds.argmax(axis=1) - 1 249 | cust_avli_test[['cust_no','label']].to_csv('./test/baseline.csv',index=None) 250 | 251 | if __name__ == "__main__": 252 | main() 253 | --------------------------------------------------------------------------------