├── .gitignore ├── script ├── run_tensorboard.py ├── tensorflow_run.py ├── Makefile.json ├── console.py ├── official_benchmark.py ├── README.md ├── head.py ├── baseline.py ├── make.py ├── datarepo.py ├── mean_baseline.py ├── tensorflow_main.py ├── main.py └── features.py └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | *.txt 2 | *.csv 3 | *.pyc 4 | *.png 5 | *.pkl 6 | test.py 7 | script/test_tensorflow.py 8 | input/* 9 | temp/* 10 | *.zip 11 | *.dump -------------------------------------------------------------------------------- /script/run_tensorboard.py: -------------------------------------------------------------------------------- 1 | import os 2 | tboard = 'D:/Software/Python35/Scripts/tensorboard.exe' 3 | log_dir = '../temp/tf_log' 4 | os.system('%s --logdir=%s'%(tboard, log_dir)) -------------------------------------------------------------------------------- /script/tensorflow_run.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | tf_python = 'D:/Software/Python35/python.exe' 3 | exec_file = 'tensorflow_main.py' 4 | if len(sys.argv) > 1: 5 | exec_file = sys.argv[1] 6 | os.system('%s %s'%(tf_python, exec_file)) -------------------------------------------------------------------------------- /script/Makefile.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "obj":[], 4 | "dep":["1.txt", "2.txt"], 5 | "act":["all"] 6 | }, 7 | { 8 | "obj":["1.txt", "3.txt"], 9 | "dep":["4.txt"], 10 | "act":["type 4.txt > 1.txt", "type 4.txt > 3.txt"] 11 | }, 12 | { 13 | "obj":["4.txt"], 14 | "dep":["5.txt"], 15 | "act":["type 5.txt > 4.txt"] 16 | }, 17 | { 18 | "obj":["2.txt", "3.txt"], 19 | "dep":["6.txt"], 20 | "act":["type 6.txt > 2.txt", "type 6.txt > 3.txt"] 21 | } 22 | ] -------------------------------------------------------------------------------- /script/console.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Wed Mar 15 16:27:59 2017 4 | 5 | @author: LiuYangkai 6 | 写这段脚本是为了方便调试程序,因为跑算法的代码通常需要加载上百兆的数据,而且计算很多 7 | 中间结果比较耗时,所以这段脚本将需要加载的数据和中间结果缓存到内存,并且可以重复调用 8 | 待调试的代码,这样在反复调试的过程中就可以显著减少加载数据和计算中间结果的时间。 9 | """ 10 | import logging, argparse 11 | from datarepo import Repo 12 | def main(): 13 | '''有两个命令行参数 package和function,function指定待调试的函数,而package指定 14 | 该函数在哪个包。每次执行完,都可以选择继续执行,或者终止执行''' 15 | parser = argparse.ArgumentParser(description='用于调试package.function。\ 16 | 每次执行完,都可以选择继续执行,或者终止执行') 17 | parser.add_argument('package', help='需要调试的函数所在的包') 18 | parser.add_argument('function', help='需要调试的函数') 19 | args = parser.parse_args() 20 | #导入待调试的函数 21 | eval('from %s import %s'%(args.package, args.function)) 22 | #初始化数据管理对象 23 | Repo() 24 | while True: 25 | try: 26 | eval('%s()'%args.function) 27 | except Exception as msg: 28 | logging.warn(msg) 29 | pass 30 | if len(input('\n按回车重新执行%s.%s(),任意字符终止执行.\n' 31 | %(args.package, args.function))) > 0: 32 | break 33 | 34 | if __name__ == '__main__': 35 | main() 36 | -------------------------------------------------------------------------------- /script/official_benchmark.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | import numpy as np 3 | import pandas as pd 4 | 5 | # your path to table user_pay 6 | user_pay = 'user_pay.txt' 7 | 8 | # load data 9 | print('loading data...') 10 | user_pay_df = pd.read_table(user_pay, sep=',', header=None, \ 11 | names=['user_id', 'shop_id', 'time_stamp'], \ 12 | dtype={'user_id':'str', 'shop_id':'str', 'time_stamp':'str'}) 13 | 14 | # generate customer flow 15 | print('generating customer flow...') 16 | user_pay_df['time_stamp'] = user_pay_df['time_stamp'].str[:10] 17 | customer_flow = user_pay_df.groupby(['shop_id', 'time_stamp']).size() 18 | # predict 19 | fid = open('prediction_example.csv', 'w') 20 | for shop_id in xrange(1, 2001): 21 | print('predicting: %4d/2000'%shop_id) 22 | weekly_flow = pd.Series(np.zeros(7, dtype=int), 23 | [d.strftime('%Y-%m-%d') for d in pd.date_range('10/25/2016', periods=7)]) 24 | flow = customer_flow.loc[str(shop_id), '2016-10-25':'2016-10-31'] 25 | weekly_flow[flow.index.get_level_values(1)] = flow 26 | # use latest week's customer flow to predict following 2 weeks' customer flow 27 | predictons = ','.join([str(x) for x in list(weekly_flow)*2]) 28 | fid.write('%d,%s\n'%(shop_id, predictons)) 29 | fid.close() 30 | print('Finish') -------------------------------------------------------------------------------- /script/README.md: -------------------------------------------------------------------------------- 1 | # 部分脚本功能说明 2 | - [main.py](main.py)。基于xgboost的混合模型的代码。 3 | - [features.py](features.py)。提取特征和标签的代码。 4 | - [tensorflow.py](tensorflow.py)。神经网络模型的代码。 5 | - [head.py](head.py)。工具函数,可抽取部分数据用于调试程序。 6 | ``` 7 | usage: head.py [-h] [--number NUMBER] [--random] [--count] file 8 | 9 | 查看文件的前N行或者随机N行,也可以统计文件的行数 10 | 11 | positional arguments: 12 | file 输入文件 13 | 14 | optional arguments: 15 | -h, --help show this help message and exit 16 | --number NUMBER, -n NUMBER 17 | 行数 18 | --random, -r 是否启用随机模式 19 | --count, -c 统计文件有多少行 20 | ``` 21 | - [datarepo.py](datarepo.py)。用于管理占用空间大和计算比较耗时的数据。里面的Repo类设计成单例模式,方便在不同脚本之间统一管理数据。使用方法很简单: 22 | ```python 23 | rep = Repo() 24 | dat = rep(lambda x:x, pd.DataFrame(np.random.randn(4, 4)) 25 | ``` 26 | 首先获取Repo对象,然后把该对象当做函数来调用,注意第一个参数是个可执行的函数,用于计算待存储的数据,后面的参数都是该可执行函数的参数。Repo对象会将计算结果缓存起来,如果计算时间超过20秒,还会将计算结果保存到辅存。 27 | - [console.py](console.py)。写这段脚本是为了方便调试程序,因为跑算法的代码通常需要加载上百兆的数据,而且计算很多中间结果比较耗时,所以这段脚本结合_datarepo.py_将需要加载的数据和中间结果缓存到内存,并且可以重复调用待调试的代码,这样在反复调试的过程中就可以显著减少加载数据和计算中间结果的时间。 28 | ``` 29 | usage: console.py [-h] package function 30 | 31 | 用于调试package.function。 每次执行完,都可以选择继续执行,或者终止执行 32 | 33 | positional arguments: 34 | package 需要调试的函数所在的包 35 | function 需要调试的函数 36 | 37 | optional arguments: 38 | -h, --help show this help message and exit 39 | ``` -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # IJCAI-17 口碑商家客流量预测解题代码 2 | 3 | ## 问题描述 4 | > 在这次比赛中,每只队伍需要预测测试集中所有商家在未来14天 5 | > (2016.11.01-2016.11.14)内各自每天(00:00:00-23:59:59)的 6 | > 客户流量。预测结果为非负整数。 7 | 8 | 天池平台提供2015-07-01到2016-10-31(除去2015-12-12)的商家数据、 9 | 用户支付行为数据以及用户浏览行为数据,要求参赛选手预测2000家商户在2016-11-01-2016-11-14的客户流量。 10 | 11 | Loss函数的定义如下: 12 | ![L = \frac{1}{nT}\sum_i^n \sum_t^T\left|\frac{c_{it} - c_{it}^g}{c_{it} + c_{it}^g}\right|](http://mathurl.com/kctnfdv.png) 13 | 14 | 其中![c_{it}](http://mathurl.com/l3le8ue.png)是第t天商家i的客户流量预测值,![c_{it}^g](http://mathurl.com/k7ryzbr.png)是第t天商家i的客户流量实际值。 15 | 16 | ## 解题思路 17 | ### 构造数据集 18 | 针对每个商户(sid)的每一天(stamp)可以提取一个样本,具体做法是把sid在stamp-1至stamp-14的销售量等数据作为特征,而在stamp至stamp+14的销售量作为对应的标签。 19 | 20 | ### 特征及标签 21 | > sid,stamp,day1,day2,...,day14,maxt1,desc1,maxt2,desc2,...,maxt14,desc14 22 | 23 | 特征的数据组织形式。sid是商家的id,stamp是日期yyyy-mm-dd,day1-day14是stamp前14天的销量(不包括stamp),maxt1-maxt14是未来14天的最高温度(包括stamp),desc1-desc14表示未来14天是否下雨(包括stamp)。 24 | 25 | > sid,stamp,day1,day2,...,day14 26 | 27 | 标签的数据组织形式。sid和stamp同上,day1-day14是未来14天的销量,其中day1即是stamp当天的销量。 28 | 29 | 当验证完特征的sid以及stamp和标签的一一对应后,就会移除两者的sid和stamp域,组成最终的数据集。 30 | 31 | ### 模型 32 | #### 基于xgboost的混合模型 33 | 用WarpModel包装基本的回归模型(比如xgboost)使之可以输出14天的预测值,基本思想是在WarpModel里包含14个基本的模型的对象,每个对象负责预测某一天的销量,14个模型的训练和预测过程相互独立。 34 | 35 | 在WarpModel的基础上分别使用xgboost、GBDT和RandomForest作为它的基本模型,得到三个结果,赋予相应的权重获得最终结果。 36 | 37 | #### 三层神经网络 38 | 特征、标签和xgboost方法的一样,不过只使用了前14天的销量。每一天的销售量用一个三层的子网络训练得到,14个子网络分别迭代训练若干次后,再用官方的loss函数训练整个网络。 39 | 40 | ### 结果 41 | 基于xgboost的混合模型效果要明显好于神经网络的方法,提交结果后的loss值是**0.092552**,而神经网络线下测试的loss是**0.623244**,效果太差就没提交上去。 42 | 43 | --- 44 | 45 | 团队:[@tecore](https://tianchi.aliyun.com/science/scientistDetail.htm?userId=1095279118908) [@巴尔干2016](https://tianchi.aliyun.com/science/scientistDetail.htm?spm=5176.100170.222.4.fxW0P2&userId=1095279120707) 46 | -------------------------------------------------------------------------------- /script/head.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 功能: 3 | - 统计文件有多少行 4 | - 查看文件的前N行 5 | - 随机取N行作为测试样本 6 | 7 | usage: head.py [-h] [--number NUMBER] [--random] [--count] file 8 | 9 | 查看文件的前N行或者随机N行,也可以统计文件的行数 10 | 11 | positional arguments: 12 | file 输入文件 13 | 14 | optional arguments: 15 | -h, --help show this help message and exit 16 | --number NUMBER, -n NUMBER 17 | 行数 18 | --random, -r 是否启用随机模式 19 | --count, -c 统计文件有多少行 20 | ''' 21 | import argparse 22 | from time import clock 23 | from random import choice 24 | def lineCounter(hfile): 25 | '''统计文件有多少行''' 26 | cnt = 0 27 | line = hfile.readline() 28 | while line: 29 | cnt += 1 30 | if cnt % 10000 == 0: 31 | print('当前行数: %d.'%cnt) 32 | line = hfile.readline() 33 | print('共有%d行.' % cnt) 34 | def main(): 35 | parser = argparse.ArgumentParser( 36 | description='查看文件的前N行或者随机N行,也可以统计文件的行数') 37 | parser.add_argument('file', help='输入文件') 38 | parser.add_argument('--number', '-n', help='行数', type=int, default=10) 39 | parser.add_argument('--random', '-r', help='是否启用随机模式', 40 | action='store_true', default=False) 41 | parser.add_argument('--count', '-c', help='统计文件有多少行', 42 | action='store_true', default=False) 43 | args = parser.parse_args() 44 | if args.count: 45 | clock() 46 | with open(args.file) as file: 47 | lineCounter(file) 48 | print('耗时%f秒.' % (clock())) 49 | else: 50 | with open(args.file) as file: 51 | line = file.readline() 52 | cnt = 0 53 | while line and cnt < args.number: 54 | if not args.random or choice([True, False]): 55 | print(line.strip()) 56 | cnt += 1 57 | line = file.readline() 58 | if __name__ == '__main__': 59 | main() -------------------------------------------------------------------------------- /script/baseline.py: -------------------------------------------------------------------------------- 1 | #-*- coding:utf-8 -*- 2 | #直接用倒数第三个星期的数据预测(2016.10.25-2016.10.31) 3 | import pandas as pd 4 | from time import clock 5 | def main(): 6 | #names指定列名,便于后续的sql查询 7 | dat = pd.read_csv('../input/dataset/user_pay.txt', 8 | header = None, 9 | names = ['uid', 'sid', 'stamp']) 10 | #select * from dat where stamp >= '' and stamp <= '' 11 | dat = dat[(dat['stamp'] >= '2016-10-25 00:00:00') 12 | & (dat['stamp'] <= '2016-10-31 23:59:59')] 13 | 14 | #用于后续的left join操作 15 | day7 = pd.DataFrame([str(e) for e in range(1, 2001)], 16 | columns=['sid']) 17 | #统计倒数第三个星期的数据 18 | for k in range(25, 32): 19 | dat1 = dat[(dat['stamp'] >= ('2016-10-%d 00:00:00' % k)) 20 | & (dat['stamp'] <= ('2016-10-%d 23:59:59' % k))] 21 | #group by语句,统计每个group的元素个数,注意是Series float类型 22 | #后续转换成DataFrame才能用join操作 23 | dat1 = dat1.groupby('sid').size() 24 | 25 | #将dat1(Series)的行索引取出来(int list),也就是商店id, 26 | #并转换为对应的str list,和dat1的数据组合成一个两列的DataFrame 27 | sid = [] 28 | for j in range(len(dat1.axes[0])): 29 | sid.append(str(dat1.axes[0][j])) 30 | dayi = 'day%d' % (k - 24) 31 | #注意这种初始化DataFrame的方式 32 | dat1 = pd.DataFrame({'sid':sid, dayi:dat1.values}) 33 | #左连接,有可能产生np.NaN数据 34 | day7 = day7.merge(dat1, how = 'left', on = 'sid') 35 | #update day7 set dayi = 0 where dayi is null 36 | #将NaN置为0 37 | day7.loc[day7[dayi].isnull(), dayi] = 0 38 | last7 = day7 39 | nmap = {} 40 | for k in range(1, 8): 41 | nmap['day%d' % k] = 'day%d' % (k+7) 42 | #注意这种给列重命名的方式 43 | last7 = last7.rename_axis(nmap, axis="columns") 44 | day14 = day7.merge(last7, how = 'left', on = 'sid') 45 | #指定float_format可只保存float类型数据的整数部分 46 | day14.to_csv('../temp/baseline.csv', header=False, index=False, encoding='utf-8', 47 | float_format='%0.0f') 48 | # 49 | if __name__ == '__main__': 50 | clock() 51 | main() 52 | print(clock()) -------------------------------------------------------------------------------- /script/make.py: -------------------------------------------------------------------------------- 1 | import json, sys, os.path, os 2 | def comp(obj, dep): 3 | ''' 4 | 判断目标文件是否比依赖文件新,如果是返回True 5 | ''' 6 | min1 = -1 7 | for e in obj: 8 | if not os.path.exists(e): 9 | print("%s doesn't exists!" % e) 10 | sys.exit(1) 11 | t = os.path.getmtime(e) 12 | if min1 < 0 or t < min1: 13 | min1 = t 14 | max2 = 0 15 | for e in dep: 16 | if not os.path.exists(e): 17 | print("%s doesn't exists!" % e) 18 | sys.exit(1) 19 | t = os.path.getmtime(e) 20 | if t > max2: 21 | max2 = t 22 | return min1 > max2 23 | # 24 | def check(node, path, data, deps): 25 | '''自上而下检查依赖关系树,如果子节点出现过时的状态,依赖它的所有父节点都要更新''' 26 | if node in path: 27 | print('Circular Dependency!') 28 | sys.exit(1) 29 | 30 | #检查该节点的依赖的所有节点 31 | status = False 32 | path.add(node) 33 | for k in range(len(deps)): 34 | if deps[node][k]: 35 | status = check(k, path, data, deps) or status 36 | path.remove(node) 37 | #子节点过或者当前节点的状态过时都要执行更新命令 38 | if status or (not comp(data[node]['obj'], data[node]['dep'])): 39 | for e in data[node]['act']: 40 | print(e) 41 | os.system(e) 42 | status = True 43 | return status 44 | # 45 | if len(sys.argv) < 2: 46 | fileName = 'Makefile.json' 47 | else: 48 | fileName = sys.argv[1] 49 | data = json.load(open(fileName)) 50 | N = len(data) 51 | #依赖关系用一个二维数组表示,deps[n][k]如果为True,那么n节点就依赖k节点 52 | deps = [[] for k in range(N)] 53 | for k in range(N): 54 | deps[k] = [False for k in range(N)] 55 | for m in range(N - 1): 56 | for n in range(m+1, N): 57 | #自动判断依赖关系,如果节点间的obj和dep由交集,那么两者就存在依赖关系 58 | if len(set(data[m]['obj']) & set(data[n]['dep'])) > 0: 59 | deps[n][m] = True 60 | if len(set(data[n]['obj']) & set(data[m]['dep'])) > 0: 61 | deps[m][n] = True 62 | if deps[m][n] and deps[n][m]: 63 | print('Circular Dependency!') 64 | #找到根节点,依次check 65 | for k in range(N): 66 | f = True 67 | for n in range(N): 68 | if deps[n][k]: 69 | f = False 70 | break 71 | if f: 72 | check(k, set(), data, deps) 73 | # -------------------------------------------------------------------------------- /script/datarepo.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Tue Mar 14 20:41:23 2017 4 | 5 | @author: LiuYangkai 6 | """ 7 | from time import clock 8 | import logging, os, re, time 9 | import pandas as pd 10 | class Repo: 11 | '''用于管理占用空间大和计算比较耗时的数据。该类设计成单例模式,方便在不同脚本之间 12 | 统一管理数据。使用方法很简单: 13 | rep = Repo() 14 | dat = rep(lambda x:x, pd.DataFrame(np.random.randn(4, 4)) 15 | 首先获取Repo对象,然后把该对象当做函数来调用,注意第一个参数是个可执行的函数,用于 16 | 计算待存储的数据,后面的参数都是该可执行函数的参数。Repo对象会将计算结果缓存起来, 17 | 如果计算时间超过20秒,还会将计算结果保存到辅存。 18 | ''' 19 | 20 | __ins = None #单例模式下用于保存那个唯一的对象 21 | def __init__(self, baseDir='../temp/repo'): 22 | '''baseDir是缓存数据的文件夹''' 23 | self.dir = baseDir 24 | self.data = {} 25 | if not os.path.exists(self.dir): 26 | os.makedirs(self.dir) 27 | logging.info('新建文件夹: %s'%self.dir) 28 | 29 | #如果缓存文件夹里有数据,就将它们加载到内存 30 | for p in os.listdir(self.dir): 31 | if os.path.isfile( 32 | os.path.join(self.dir, p)): 33 | key = re.split(r'.', p)[0] 34 | path = os.path.join(self.dir, p) 35 | t = pd.read_pickle(path) 36 | logging.info('从%s中加载%s.'%(path, key)) 37 | self.data[key] = t 38 | def __new__(cls, *args, **kwargs): 39 | '''在__init__之前调用,保证每次调用构造的时候,返回的都是同一个对象''' 40 | if not cls.__ins: 41 | cls.__ins = super(Repo, cls).__new__( 42 | cls, *args, **kwargs) 43 | return cls.__ins 44 | def __call__(self, func, *args, **kwargs): 45 | '''重写了ins(func, args, kwargs)方法. 46 | 这里的func是个可调用的对象,其余的是func的参数。''' 47 | if not callable(func): 48 | raise Exception('%s不是可调用的对象!' % str(func)) 49 | #用函数名作为数据的名称 50 | key = getattr(func, '__name__') 51 | if key in self.data: 52 | #返回数据的副本 53 | return self.data[key].copy(deep=True) 54 | clock() 55 | t = func(*args, **kwargs) 56 | dur = clock() 57 | #计算时间超过20s就把结果缓存下来 58 | if dur >= 20: 59 | path = os.path.join(self.dir, key + '.pkl') 60 | t.to_pickle(path) 61 | logging.info('已将%s缓存到%s.' % (key, path)) 62 | self.data[key] = t 63 | #返回数据的副本 64 | return t.copy(deep=True) 65 | def saveResult(self, result, name='none'): 66 | '''用于管理最终的结果,每次保存结果会加上时间信息,方便跟踪结果的改进过程''' 67 | stamp = time.strftime('%m%d_%H:%M',time.localtime(time.time())) 68 | path = os.path.join(self.dir, '%s_%s_result.csv'%(stamp, name)) 69 | result.to_csv(path, index=False, encoding='utf-8') 70 | -------------------------------------------------------------------------------- /script/mean_baseline.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Thu Feb 9 11:24:40 2017 4 | 5 | @author: dxy 6 | """ 7 | ## INSTRUCTION: 8 | ## 1. Put 'user_pay.txt' in the same file with this script 9 | ## 2. The result will be saved in the current file, namely 'average.csv' 10 | 11 | import pandas as pd 12 | import numpy as np 13 | 14 | ############################################################################## 15 | ## statistic about how many day of week from 2015-7-1 to 2016-10-31 16 | ## expect 2015-12-12 17 | start_date=pd.datetime(2015,7,1) 18 | end_date=pd.datetime(2016,10,31) 19 | index=pd.date_range(start_date,end_date) 20 | dayofweek=pd.Series(index.dayofweek,index=index) 21 | dayofweek=dayofweek.drop(pd.datetime(2015,12,12)) 22 | dayofweek=dayofweek.groupby(dayofweek).size() 23 | # answer: 24 | # 0(Mon) 1(Tues) 2(Wed) 3(Thur) 4(Fri) 5(Sat) 6(Sun) 25 | # 70 69 70 70 70 69 70 26 | 27 | ############################################################################## 28 | #fileloc='../../data/dataset/' 29 | 30 | # Open user_pay.txt and 31 | # add a column with value from 0-6 representing the day of week 32 | fileloc='' 33 | filename='../input/dataset/user_pay.txt' 34 | user_pay=pd.read_csv(fileloc+filename,sep=',',header=None) 35 | user_pay.columns=['user_id','shop_id','time_stamp'] 36 | user_pay['time_stamp'] = pd.to_datetime(user_pay['time_stamp']) 37 | user_pay['day_of_week'] = user_pay['time_stamp'].dt.dayofweek 38 | 39 | ############################################################################## 40 | # For a fixed store, id=1 for example, calculate all purchasing times happened 41 | # in Monday, then divide how many Mondays in this period of time and finally, 42 | # put the value into a column called purchase_time. 43 | # (The algorithm is same for Thuesday to Sunday) 44 | user_pay_grouped=user_pay.groupby(['shop_id','day_of_week']).size() 45 | 46 | user_pay_grouped=user_pay_grouped.div(dayofweek,level=1) # Series type 47 | user_pay_grouped=user_pay_grouped.to_frame() 48 | user_pay_grouped=user_pay_grouped.reset_index() 49 | user_pay_grouped=user_pay_grouped.rename(columns = {0:'purchase_times'}) 50 | 51 | ############################################################################## 52 | # Generate the final table 53 | ds=pd.pivot_table(user_pay_grouped,values='purchase_times', 54 | index='shop_id', columns='day_of_week', 55 | fill_value=0) 56 | ds=ds.round().astype(np.int) 57 | 58 | start=(end_date.weekday()+1)%7 59 | result = pd.concat([ds,ds,ds],axis=1,ignore_index=True) 60 | result = result.loc[:,start:start+13] 61 | 62 | ############################################################################## 63 | # save result 64 | path='../temp/average.csv' 65 | result.to_csv(path,sep=',',header=False,index=True,encoding='utf-8') 66 | #user_pay.groupby(['shop_id', 67 | # user_pay['time_stamp'].dt.date]).size() 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | -------------------------------------------------------------------------------- /script/tensorflow_main.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 用神经网络的方法来做预测。特征、标签和xgboost方法的一样,不过只使用了前14天的销量 3 | 网络结构:每一天的销售量用一个三层的子网络训练得到,14个子网络分别迭代训练若干次后, 4 | 在用官方的loss函数训练整个网络。''' 5 | import tensorflow as tf 6 | import pandas as pd 7 | import numpy as np 8 | from random import randint 9 | import os 10 | class DataBatch: 11 | '''把数据分成多个batches,每次取一个batch来训练网络''' 12 | def __init__(self, X, y, size=1000): 13 | self.X = X 14 | self.y = y 15 | self.index = 0 16 | self.size = size 17 | def next_batch(self): 18 | if self.index + self.size <= self.X.shape[0]: 19 | s = self.index 20 | e = self.index + self.size 21 | self.index = e 22 | return (self.X[s:e, :], self.y[s:e, :]) 23 | else: 24 | s = self.index 25 | self.index = 0 26 | return (self.X[s:, :], self.y[s:, :]) 27 | def select_test(n, count): 28 | '''从大小为n的样本中随机选择count个作为 29 | 测试,其余的用来训练''' 30 | index = [False for _ in range(n)] 31 | p = 0 32 | while p < count: 33 | ii = randint(0, n-1) 34 | if index[ii]: 35 | continue 36 | index[ii] = True 37 | p += 1 38 | return index 39 | def load(): 40 | '''加载数据,并将数据分为训练集合测试集''' 41 | feature = pd.read_csv('../temp/train_features.csv', 42 | dtype=np.float) 43 | label = pd.read_csv('../temp/train_labels.csv', 44 | dtype=np.float) 45 | #只取了前14天的销售量这一个特征 46 | feature = feature.iloc[:, 0:14] 47 | label = label.iloc[:, 0:14] 48 | index1 = feature['day1'] > 0 49 | index2 = label['day1'] > 0 50 | for k in range(2, 15): 51 | index1 = index1 | feature['day%d'%k] > 0 52 | index2 = index2 | label['day%d'%k] > 0 53 | index = index1 & index2 54 | feature = feature[index] 55 | label = label[index] 56 | test_set = select_test(feature.shape[0], round(feature.shape[0]*0.1)) 57 | test_feature = feature.loc[test_set, :] 58 | test_label = label.loc[test_set, :] 59 | for k in range(len(test_set)): 60 | test_set[k] = not test_set[k] 61 | feature = feature.loc[test_set, :] 62 | label = label.loc[test_set, :] 63 | return (feature.values, label.values, test_feature, 64 | test_label) 65 | def load_predict_feature(): 66 | '''加载待预测数据,也只取了前14天的销售量''' 67 | dtype = {'sid':np.str} 68 | for k in range(1, 15): 69 | dtype['day%k'] = np.float 70 | odat = pd.read_csv('../temp/predict_features.csv', dtype=dtype) 71 | sid = odat.iloc[:, 0] 72 | feature = odat.iloc[:, 1:] 73 | return (sid, feature.values) 74 | def main(): 75 | nFeature = 14 76 | nLabel = 14 77 | nHidden = 10 78 | #基本思路是每一天的销售量用一个3层的神经网络预测 79 | #并分别进行优化,然后在用官方给的Loss函数做个整体的优化 80 | with tf.name_scope('Input'): 81 | X = tf.placeholder(tf.float32, [None, nFeature]) 82 | y = tf.placeholder(tf.float32, [None, nLabel]) 83 | with tf.name_scope('Hidden'): 84 | hidden_weights = [] 85 | hidden_biases = [] 86 | layer1s = [] 87 | for k in range(nLabel): 88 | hidden_weights.append( 89 | tf.Variable(tf.random_normal([nFeature, nHidden]), 90 | name='hidden_weight%d'%k)) 91 | hidden_biases.append( 92 | tf.Variable(tf.random_normal([nHidden]), 93 | name='hidden_bias%d'%k)) 94 | layer1s.append(tf.nn.relu(tf.matmul(X, hidden_weights[k]) + hidden_biases[k])) 95 | with tf.name_scope('Output'): 96 | output_weights = [] 97 | output_biases = [] 98 | layer2s = [] 99 | for k in range(nLabel): 100 | output_weights.append( 101 | tf.Variable(tf.random_normal([nHidden, 1]), 102 | name='output_weights%d'%k)) 103 | output_biases.append( 104 | tf.Variable(tf.random_normal([1]), 105 | name='output_biases%d'%k)) 106 | layer2s.append( 107 | tf.nn.relu(tf.matmul(layer1s[k], output_weights[k]) + output_biases[k])) 108 | with tf.name_scope('Loss'): 109 | sep_loss = [] 110 | zeros1 = tf.squeeze(tf.matmul(X, tf.zeros([nFeature, 1]))) 111 | for k in range(nLabel) : 112 | #对每天的预测进行评价,后面会优化 113 | sq = tf.squeeze(layer2s[k]) 114 | su = tf.abs(sq - y[:,k]) 115 | ad = tf.abs(sq + y[:,k]) 116 | div_nan = tf.truediv(su, ad) 117 | div_all = tf.where(tf.is_nan(div_nan), zeros1, div_nan) 118 | sep_loss.append(tf.reduce_mean(div_all)) 119 | tf.summary.scalar('loss_%d'%k, sep_loss[k]) 120 | #整体优化 121 | layer2_all = tf.concat(layer2s, 1) 122 | abs_sub = tf.abs(layer2_all - y) 123 | abs_add = tf.abs(layer2_all + y) 124 | loss_nan = tf.truediv(abs_sub, abs_add) 125 | 126 | #生成和loss_all一样大小的tensor 127 | zeros = tf.matmul(X, tf.zeros([nFeature, nLabel])) 128 | loss_all = tf.where(tf.is_nan(loss_nan), zeros, loss_nan) 129 | loss = tf.reduce_mean(loss_all) 130 | tf.summary.scalar('loss', loss) 131 | opt = tf.train.GradientDescentOptimizer(0.01) 132 | merge_sum = tf.summary.merge_all() 133 | 134 | #预测结果 135 | if os.path.exists('../temp/tf_model/checkpoint'): 136 | with tf.Session() as sess: 137 | saver = tf.train.Saver() 138 | saver.restore(sess, tf.train.latest_checkpoint('../temp/tf_model')) 139 | sid, X_data = load_predict_feature() 140 | #如果run的操作不依赖某个placeholder的话,可以不送数据 141 | y_predict = sess.run(layer2, feed_dict={X:X_data}) 142 | data = pd.DataFrame(y_predict, columns=['day%d'%k for k in range(1, 15)]) 143 | data.insert(0, 'sid', sid) 144 | data.to_csv('../temp/tensorflow_result.csv', 145 | index=False, header=False, encoding='utf-8', float_format='%0.0f') 146 | return 147 | #训练 148 | X_data, y_data, X_test, y_test = load() 149 | bch = DataBatch(X_data, y_data) 150 | print('Data loaded!') 151 | with tf.Session() as sess: 152 | #训练过程可视化 153 | train_writer = tf.summary.FileWriter('../temp/tf_log', sess.graph) 154 | sess.run(tf.global_variables_initializer()) 155 | #print(sess.run([su[0], ad[0],sep_loss[0]], feed_dict={X:X_data, y:y_data})) 156 | saver = tf.train.Saver() 157 | #分别训练 158 | for k in range(100): 159 | bx, by = bch.next_batch() 160 | for e in range(14): 161 | train = opt.minimize(sep_loss[e]) 162 | ms, _, los = sess.run([merge_sum, train, sep_loss[e]], 163 | feed_dict={X:bx, y:by}) 164 | print('%d - day%d: %f.'%(k, e, los)) 165 | train_writer.add_summary(ms, k) 166 | #整体优化 167 | for k in range(100): 168 | train = opt.minimize(loss) 169 | bx, by = bch.next_batch() 170 | ms, _, los = sess.run([merge_sum, train, loss], 171 | feed_dict={X:bx, y:by}) 172 | train_writer.add_summary(ms, k+10) 173 | if k % 100 == 0: 174 | saver.save(sess, '../temp/tf_model/nn_model', global_step=k) 175 | print('%d/%d: %f' % (k, 10, los)) 176 | 177 | los = sess.run(loss, feed_dict={X:X_test, y:y_test}) 178 | print('测试结果: %f.'%los) 179 | saver.save(sess, '../temp/tf_model/nn_model', global_step=10100) 180 | # 181 | if __name__ == '__main__': 182 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' 183 | main() -------------------------------------------------------------------------------- /script/main.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Fri Feb 24 14:46:52 2017 4 | 5 | @author: LiuYangkai 6 | 训练流程:随机取10%的数据作为测试集,其余的作为训练集。用训练集对模型进行交叉验证, 7 | 接着用所有的训练集训练模型,最后用测试集评估模型的loss值。 8 | 关于模型:用WarpModel包装基本的回归模型使之可以输出14天的预测值,基本思想是 9 | WarpModel里包含了14个基本的模型的对象,每个对象负责预测某一天的销量,14个模型的 10 | 训练和预测过程相互独立。 11 | 模型融合:分别使用xgboost、GBDT和RandomForest作为WarpModel的基本模型,得到三个 12 | 结果,赋予相应的权重得到最终结果。 13 | 特征:前14天的销售量和待预测14天的天气和最高温度。注意这些特征在模型训练和预测时的 14 | 使用。 15 | 16 | """ 17 | import logging, xgboost, os 18 | from features import extractAll 19 | from time import clock 20 | from sklearn.externals import joblib 21 | from sklearn.model_selection import KFold, cross_val_score 22 | import pandas as pd 23 | from sklearn.base import BaseEstimator 24 | from sklearn.ensemble import GradientBoostingRegressor 25 | from sklearn.ensemble import RandomForestRegressor 26 | from random import randint 27 | def blend(lst, w): 28 | '''将多个结果根据权重融合起来''' 29 | r = lst[0] * w[0] 30 | for k in range(1, len(lst)): 31 | r = r + lst[k]*w[k] 32 | return r.round() 33 | def select_test(n, count): 34 | '''从大小为n的样本中随机选择count个作为测试,其余的用来训练''' 35 | index = [False for _ in range(n)] 36 | p = 0 37 | while p < count: 38 | ii = randint(0, n-1) 39 | if index[ii]: 40 | continue 41 | index[ii] = True 42 | p += 1 43 | return index 44 | def official_loss(estimator, X, y): 45 | '''官方定义的loss函数''' 46 | #注意重置index,不然会出现意想不到的问题 47 | y_ = y.reset_index(drop=True) 48 | y_p = estimator.predict(X) 49 | adds = (y_p + y_).abs() 50 | subs = (y_p - y_).abs() 51 | divs = subs / adds 52 | N = divs.shape[0] * divs.shape[1] 53 | return divs.sum().sum() / N 54 | class WarpModel(BaseEstimator): 55 | '''包装基本的回归模型,使得该类可以输出14天的预测值。''' 56 | def __init__(self, model): 57 | klass = model.__class__ 58 | self.modelList = [] 59 | for k in range(14): 60 | self.modelList.append(klass(**(model.get_params(deep=False)))) 61 | self.__initParams = {} 62 | self.__initParams['model'] = model 63 | def get_params(self, deep=False): 64 | '''返回构造该类的参数, 因为交叉验证的函数会clone传进去的model对象,会调用该方法 65 | ''' 66 | return self.__initParams 67 | def fit(self, X, y): 68 | '''X=[n_samples, n_features] 69 | y=[n_samples, n_targets]''' 70 | #注意重置index,不然模型计算过程中会出错 71 | X_ = X.reset_index(drop=True) 72 | for k in range(14): 73 | #前14天的销量和第k天的天气和最高温度作为第k个模型的训练特征 74 | #其余的特征并未使用。predict也是这样处理的。 75 | xt = X_.iloc[:,0:14] 76 | xt.insert(14, 'maxt', X_.iloc[:, 14+2*k]) 77 | xt.insert(15, 'desc', X_.iloc[:, 14+2*k+1]) 78 | y_ = y.iloc[:, k].reset_index(drop=True) 79 | self.modelList[k].fit(xt, y_) 80 | return self 81 | def predict(self, X): 82 | '''X=[n_samples, n_features] 83 | 返回 y=[n_samples, n_labels]''' 84 | #注意重置index,不然模型计算过程中会出错 85 | dat = X.reset_index(drop=True) 86 | labels = pd.DataFrame() 87 | for k in range(14): 88 | xt = dat.iloc[:,0:14] 89 | xt.insert(14, 'maxt', dat.iloc[:,14+2*k]) 90 | xt.insert(15, 'desc', dat.iloc[:,14+2*k+1]) 91 | p = self.modelList[k].predict(xt) 92 | p = pd.DataFrame({'tmp%d'%k:p}) 93 | labels.insert(k, 'day%d'%(k+1), p) 94 | labels.columns = ['day%d'%k for k in range(1, 15)] 95 | return labels.round() 96 | def main(): 97 | model = None 98 | modelPath = '../temp/model.pkl' 99 | 100 | #模型名及对应的权重 101 | modelWeight = [0.3, 0.4, 0.3] 102 | modelName = ['xgboost', 'GBDT', 'RandomForest'] 103 | 104 | #如果模型已经训练好,就进行预测得出最终结果 105 | if os.path.exists(modelPath): 106 | logging.info('从%s中加载模型...'%modelPath) 107 | #joblib.load加载的是保存到磁盘中的对象,不仅仅是训练好的模型 108 | modelList = joblib.load(modelPath) 109 | feature = extractAll('predict') 110 | X = feature.iloc[:, 1:] 111 | resList = [] 112 | for k in range(len(modelName)): 113 | model = modelList[k] 114 | logging.info('%s:预测中...'%modelName[k]) 115 | resList.append(model.predict(X)) 116 | logging.info('%s:预测结束.'%modelName[k]) 117 | logging.info('融合模型...') 118 | y = blend(resList, modelWeight) 119 | y.insert(0, 'sid', feature['sid']) 120 | y.to_csv('../temp/result.csv', header=False, index=False, 121 | encoding='utf-8', float_format='%0.0f') 122 | logging.info('已将结果保存到../temp/result.csv') 123 | else: 124 | (feature, label) = extractAll() 125 | logging.info('共有%d条训练数据.' % feature.shape[0]) 126 | 127 | #过滤一部分无效数据 128 | index1 = feature['day1'] > 0 129 | index2 = label['day1'] > 0 130 | for k in range(2, 15): 131 | index1 = index1 | feature['day%d'%k] > 0 132 | index2 = index2 | label['day%d'%k] > 0 133 | index = index1 & index2 134 | feature = feature[index] 135 | label = label[index] 136 | logging.info('去掉无效数据后还剩%d条.' % (feature.shape[0])) 137 | 138 | #划分训练集和测试集 139 | test_set = select_test(feature.shape[0], round(feature.shape[0]*0.1)) 140 | test_feature = feature.loc[test_set, :] 141 | test_label = label.loc[test_set, :] 142 | logging.info('用%d个样本用作最终的测试.'%test_feature.shape[0]) 143 | for k in range(len(test_set)): 144 | test_set[k] = not test_set[k] 145 | feature = feature.loc[test_set, :] 146 | label = label.loc[test_set, :] 147 | logging.info('用%d个样本用作训练.'%feature.shape[0]) 148 | 149 | #交叉验证 150 | modelList = [WarpModel(xgboost.XGBRegressor( 151 | silent=True, n_estimators=100)), 152 | WarpModel(GradientBoostingRegressor()), 153 | WarpModel(RandomForestRegressor())] 154 | 155 | for k in range(len(modelList)): 156 | logging.info('%s: 交叉验证...'%modelName[k]) 157 | model = modelList[k] 158 | #注意n_jobs使用多CPU时,不可以调试,否则会抛出异常 159 | scores = cross_val_score(model, feature, label, 160 | cv=KFold(n_splits=3, shuffle=False), 161 | #n_jobs=-1, 162 | scoring=official_loss 163 | ) 164 | logging.info('交叉验证结果:%s' % str(scores)) 165 | logging.info('用所有的训练数据训练模型...') 166 | model.fit(feature, label) 167 | modelList[k] = model 168 | logging.info('%s测试模型...' % modelName[k]) 169 | logging.info('测试结果: %f' % official_loss(model, test_feature, \ 170 | test_label)) 171 | joblib.dump(modelList, modelPath) 172 | logging.info('已将训练好的模型保存到%s.'%modelPath) 173 | # 174 | if __name__ == '__main__': 175 | logging.basicConfig( 176 | level = logging.DEBUG, 177 | format = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', 178 | datefmt = '%y%m%d %H:%M:%S', 179 | filename = '../temp/log.txt', 180 | filemode = 'w'); 181 | console = logging.StreamHandler(); 182 | console.setLevel(logging.INFO); 183 | console.setFormatter(logging.Formatter('%(asctime)s %(filename)s: %(levelname)-8s %(message)s')); 184 | logging.getLogger('').addHandler(console); 185 | clock() 186 | main() 187 | logging.info('共耗时%f分钟.' % (clock()/60)) 188 | -------------------------------------------------------------------------------- /script/features.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sat Feb 25 15:20:03 2017 4 | 5 | @author: LiuYangkai 6 | 提取特征的代码,只提取了前14天的销量和未来14天的天气数据。 7 | 8 | 最终特征的组织格式如下 9 | sid,stamp,day1,day2,...,day14,maxt1,desc1,maxt2,desc2,...,maxt14,desc14 10 | sid是商家的id,stamp是日期yyyy-mm-dd,day1-day14是stamp前14天的销量(不包括stamp), 11 | maxt1-maxt14是未来14天的最高温度(包括stamp),desc1-desc14表示未来14天是否下雨(包 12 | 括stamp)。 13 | 14 | 标签的组织格式 15 | sid,stamp,day1,day2,...,day14 16 | sid和stamp同上,day1-day14是未来14天的销量,其中day1即是stamp当天的销量。 17 | 18 | 当验证完特征的sid以及stamp和标签的一一对应后,就会移除两者的sid和stamp域,组成最终的 19 | 数据集。 20 | """ 21 | import os, logging 22 | import pandas as pd 23 | import numpy as np 24 | def crossJoin(df1, df2): 25 | '''两个DataFrame的笛卡尔积''' 26 | df1['_temp_key'] = 1 27 | df2['_temp_key'] = 1 28 | df = df1.merge(df2, on='_temp_key') 29 | df = df.drop('_temp_key', axis='columns') 30 | return df 31 | def getLabels(dat, mode='train'): 32 | '''获取未来14天的销量''' 33 | cache_path = '../temp/label_with_sid_stamp.csv' 34 | if os.path.exists(cache_path): 35 | logging.info('从%s读取数据.'%cache_path) 36 | dtype={'sid':np.str, 'stamp':np.str} 37 | for k in range(14): 38 | dtype['day%d'%(k+1)] = np.int 39 | return pd.read_csv(cache_path, 40 | dtype=dtype) 41 | dat = dat[(dat['stamp'] >= '2015-07-01') & 42 | (dat['stamp'] <= '2016-10-31')] 43 | dat['stamp'] = dat['stamp'].str[:10] 44 | dat = dat.groupby('sid') 45 | days = None 46 | for sid in [str(k) for k in range(1, 2001)]: 47 | shop = dat.get_group(sid) 48 | logging.info('getLabels:%s.' % sid) 49 | shop = shop.drop('sid', axis='columns') 50 | shop = shop.groupby('stamp').size().reset_index() 51 | shop.rename_axis({0:'sales'}, axis='columns', inplace=True) 52 | shop = shop.sort_values('stamp') 53 | N = shop.shape[0] 54 | if N < 14: 55 | logging.warn('%s的数据条数不足14个.'%sid) 56 | continue 57 | full = pd.DataFrame({'stamp': 58 | pd.date_range(shop['stamp'][0], '2016-10-31').\ 59 | strftime('%Y-%m-%d')}) 60 | shop = full.merge(shop, how='left', on='stamp') 61 | shop.fillna(0, inplace=True) 62 | #双12的销量用前后两天的均值表示 63 | idx = shop[shop['stamp'] == '2015-12-12'].axes[0] 64 | if len(idx) >= 1: 65 | if idx[0] > 0: 66 | shop.loc[idx, 'sales'] = round((shop.loc[idx-1, 'sales'].\ 67 | values[0] + shop.loc[idx+1, 'sales'].values[0])/2) 68 | else: 69 | shop.loc[idx, 'sales'] = shop.loc[idx+1, 'sales'].values[0] 70 | #前14天用于提取特征 71 | temp = pd.DataFrame({'stamp':shop['stamp'][14:-14].reset_index(drop=True)}) 72 | N = shop.shape[0] 73 | for n in range(14): 74 | t = shop['sales'][14+n:N+n-14].reset_index(drop=True) 75 | temp.insert(n+1, 'day%d'%(n+1), t) 76 | temp['sid'] = sid 77 | if days is None: 78 | days = temp 79 | else: 80 | days = days.append(temp) 81 | days.to_csv(cache_path, index=None) 82 | return days 83 | def extractAll(mode = 'train'): 84 | featurePath = os.path.join('../temp/', mode + '_features.csv') 85 | labelPath = os.path.join('../temp/', mode + '_labels.csv') 86 | labels = None 87 | if mode == 'train': 88 | if os.path.exists(featurePath) and\ 89 | os.path.exists(labelPath): 90 | return (pd.read_csv(featurePath), 91 | pd.read_csv(labelPath)) 92 | else: 93 | if os.path.exists(featurePath): 94 | return pd.read_csv(featurePath) 95 | #提取特征 96 | logging.info('加载user_pay.txt...') 97 | user_pay = pd.read_csv('../input/dataset/user_pay.txt', 98 | header = None, names = ['uid', 'sid', 'stamp'], 99 | dtype = np.str) 100 | 101 | if mode == 'train' and labels is None: 102 | logging.info('提取Label...') 103 | labels = getLabels(user_pay) 104 | f1 = Last_week_sales(mode=mode) 105 | logging.info('提取最近14天的销量数据...') 106 | f1 = f1.extract(user_pay) 107 | f2 = Weather(mode=mode) 108 | logging.info('提取天气数据...') 109 | f2 = f2.extract() 110 | if mode == 'train': 111 | features = f1.merge(f2, on=['sid','stamp'], how='left') 112 | else: 113 | features = f1.merge(f2, on=['sid'], how='left') 114 | if features.isnull().any().any(): 115 | raise Exception('存在无效数据!') 116 | features = features.reset_index(drop=True) 117 | if mode == 'train': 118 | labels = labels.reset_index(drop=True) 119 | if mode == 'train': 120 | if not (features['sid'].equals(labels['sid'])) or\ 121 | not (features['stamp'].equals(labels['stamp'])): 122 | features.to_csv(featurePath+'.dump', index=False, encoding='utf-8') 123 | labels.to_csv(labelPath+'.dump', index=False, encoding='utf-8') 124 | raise Exception('特征和标签不匹配!数据已保存到dump。') 125 | #保存计算的features到outPath 126 | features = features.drop(['sid', 'stamp'], axis='columns') 127 | labels = labels.drop(['sid', 'stamp'], axis='columns') 128 | logging.info('保存提取的label...') 129 | labels.to_csv(labelPath, index=False, encoding='utf-8') 130 | logging.info('保存提取的特征...') 131 | features.to_csv(featurePath, index=False, encoding='utf-8') 132 | if mode == 'train': 133 | return (features, labels) 134 | return features 135 | # 136 | class BaseFeature: 137 | def __init__(self, outDir = '../temp/', 138 | featureName = 'base', mode = 'train', 139 | dtype=np.str): 140 | self.outFile = os.path.join(outDir, mode + '_' + featureName + '.csv') 141 | self.name = featureName 142 | self.mode = mode 143 | self.data = None 144 | if os.path.exists(self.outFile): 145 | self.data = pd.read_csv(self.outFile, dtype = dtype) 146 | logging.info('从%s中载入特征%s.' % (self.outFile, self.name)) 147 | def extract(self, indata): 148 | return self.data 149 | # 150 | class Last_week_sales(BaseFeature): 151 | '''过去14天的销量,最终得到的数据格式如下: 152 | sid,stamp,day1,day2,day3,...,day14 153 | sid是商家id,stamp是日期yyyy-mm-dd,day1-day14分别是前14天到前1天的销售量 154 | ''' 155 | def __init__(self, mode = 'train'): 156 | dtype = {'sid':np.str, 'stamp':np.str} 157 | for k in range(1, 15): 158 | dtype['day%d'%k] = np.int 159 | BaseFeature.__init__(self, 160 | featureName = 'Last_two_weeks_sales', 161 | mode = mode, 162 | dtype=dtype) 163 | def extract(self, indata): 164 | if self.data is not None: 165 | return self.data 166 | if isinstance(indata, str): 167 | indata = pd.read_csv(indata, header = None) 168 | #提取特征 169 | dat = indata 170 | dat['stamp'] = dat['stamp'].str[:10] 171 | if self.mode == 'train': 172 | dat = dat[(dat['stamp'] >= '2015-07-01') & 173 | (dat['stamp'] <= '2016-10-18')] 174 | else: 175 | dat = dat[(dat['stamp'] >= '2016-10-18') & 176 | (dat['stamp'] <= '2016-10-31')] 177 | if self.mode != 'train': 178 | dat = dat.groupby('sid') 179 | cols = ['sid'] 180 | for k in range(14): 181 | cols.append('day%d'%(k+1)) 182 | days = pd.DataFrame(columns=cols) 183 | for sid in [str(k) for k in range(1, 2001)]: 184 | tmp = {} 185 | tmp['sid'] = sid 186 | try: 187 | sale = dat.get_group(sid) 188 | sale = sale.groupby('stamp').size() 189 | for k in range(14): 190 | try: 191 | tmp['day%d'%(k+1)] = sale.loc['2016-10-%d'%(18+k)] 192 | except: 193 | tmp['day%d'%(k+1)] = 0 194 | except: 195 | logging.warn('%s在提取特征时间段没有销售量'%sid) 196 | for k in range(1, 15): 197 | tmp['day%d'%k] = 0 198 | days = days.append(tmp, ignore_index=True) 199 | days.to_csv(self.outFile, index=False, encoding='utf-8', 200 | float_format='%0.0f') 201 | logging.info('已将最近14天的销售数据保存到%s.'%self.outFile) 202 | return days 203 | dat = dat.groupby('sid') 204 | days = None 205 | for sid in [str(k) for k in range(1, 2001)]: 206 | shop = dat.get_group(sid) 207 | logging.info('last_week_sales:%s.' % sid) 208 | shop = shop.drop('sid', axis='columns') 209 | shop = shop.groupby('stamp').size().reset_index() 210 | shop.rename_axis({0:'sales'}, axis='columns', inplace=True) 211 | shop = shop.sort_values('stamp') 212 | N = shop.shape[0] 213 | if N < 14: 214 | logging.warn('%s的数据条数不足14个.'%sid) 215 | continue 216 | full = pd.DataFrame({'stamp': 217 | pd.date_range(shop['stamp'][0], '2016-10-17').\ 218 | strftime('%Y-%m-%d')}) 219 | shop = full.merge(shop, how='left', on='stamp') 220 | shop.fillna(0, inplace=True) 221 | 222 | #双12的销量用前后两天的均值填充 223 | idx = shop[shop['stamp'] == '2015-12-12'].axes[0] 224 | if len(idx) >= 1: 225 | if idx[0] > 0: 226 | shop.loc[idx, 'sales'] = round((shop.loc[idx-1, 'sales'].\ 227 | values[0] + shop.loc[idx+1, 'sales'].values[0])/2) 228 | else: 229 | shop.loc[idx, 'sales'] = shop.loc[idx+1, 'sales'].values[0] 230 | temp = pd.DataFrame({'stamp':shop['stamp'][14:].reset_index(drop=True)}) 231 | N = shop.shape[0] 232 | for n in range(14): 233 | t = shop['sales'][n:N-14+n].reset_index(drop=True) 234 | temp.insert(n+1, 'day%d'%(n+1), t) 235 | temp['sid'] = sid 236 | if days is None: 237 | days = temp 238 | else: 239 | days = days.append(temp) 240 | days.to_csv(self.outFile, index=False, encoding='utf-8', 241 | float_format='%0.0f') 242 | logging.info('已将最近14天的销售数据保存到%s.'%self.outFile) 243 | return days 244 | class Weather(BaseFeature): 245 | '''提取每家店所在城市的天气数据,雨天为True,否则False;还包括了最高温度。 246 | 数据格式如下: 247 | sid,stamp,maxt1,desc1,maxt2,desc2,...,maxt14,desc14 248 | sid和stamp和前面一样,maxtk和desck分别表示第k天的最高温度和天气,desck为True, 249 | 表示第k天下雨,否则不下雨。''' 250 | def __init__(self, mode='train'): 251 | dtype = {'sid':np.str, 'stamp':np.str} 252 | for k in range(1, 15): 253 | dtype['maxt%d'%k] = np.float 254 | dtype['desc%d'%k] = np.bool 255 | BaseFeature.__init__(self, 256 | featureName = 'weather', 257 | mode = mode, dtype=dtype) 258 | def extract(self, indata=None): 259 | if self.data is not None: 260 | return self.data 261 | logging.info('读取文件%s.'%'../input/weather_all.csv') 262 | wh = pd.read_csv('../input/weather_all.csv', 263 | header=None, names=['city', 264 | 'stamp', 'maxt', 'desc'], 265 | usecols=[0,1,2,4], dtype={'city':np.str, 266 | 'stamp':np.str,'maxt':np.int,'desc':np.str}) 267 | if self.mode == 'train': 268 | wh = wh[(wh['stamp'] >= '2015-07-15') & 269 | (wh['stamp'] <= '2016-10-31')] 270 | else: 271 | wh = wh[(wh['stamp'] >= '2016-11-01') & 272 | (wh['stamp'] <= '2016-11-14')] 273 | wh.loc[:, 'desc'] = wh.desc.apply(lambda s:'雨' in s) 274 | logging.info('读取文件%s.'%'../input/dataset/shop_info.txt') 275 | shop_info = pd.read_csv('../input/dataset/shop_info.txt', 276 | header=None, names=['sid', 'city'], 277 | usecols=[0,1], dtype=np.str) 278 | weather = shop_info.merge(wh, on='city', how='left') 279 | weather = weather.drop('city', axis='columns') 280 | gb = weather.groupby('sid') 281 | weather = None 282 | if self.mode != 'train': 283 | for sid in [str(e) for e in range(1, 2001)]: 284 | logging.info('weather:%s.'%sid) 285 | print(sid) 286 | dat = gb.get_group(sid).reset_index(drop=True) 287 | dat = dat.sort_values('stamp') 288 | dat = dat.drop('stamp', axis='columns') 289 | tmp = pd.DataFrame({'sid':[sid]}) 290 | for k in range(14): 291 | tmp.insert(2*k+1, 'maxt%d'%(k+1),dat.loc[k, 'maxt']) 292 | tmp.insert(2*k+2, 'desc%d'%(k+1),dat.loc[k, 'desc']) 293 | if weather is None: 294 | weather = tmp 295 | else: 296 | weather = weather.append(tmp) 297 | weather.to_csv(self.outFile, index=False, encoding='utf-8') 298 | logging.info('已将天气特征保存到%s.'%self.outFile) 299 | return weather 300 | for sid in [str(e) for e in range(1, 2001)]: 301 | logging.info('weather:%s.'%sid) 302 | print(sid) 303 | dat = gb.get_group(sid).reset_index(drop=True) 304 | dat = dat.drop('sid', axis='columns') 305 | dat = dat.sort_values('stamp') 306 | tmp = pd.DataFrame({'stamp': 307 | pd.date_range('2015-07-15', '2016-10-18').\ 308 | strftime('%Y-%m-%d')}) 309 | N = dat.shape[0] 310 | for k in range(14): 311 | tmp.insert(2*k+1, 'maxt%d'%(k+1), dat.loc[k:N-14+k, 'maxt']\ 312 | .reset_index(drop=True)) 313 | tmp.insert(2*k+2, 'desc%d'%(k+1), dat.loc[k:N-14+k, 'desc']\ 314 | .reset_index(drop=True)) 315 | tmp['sid'] = sid 316 | if weather is None: 317 | weather = tmp 318 | else: 319 | weather = weather.append(tmp) 320 | weather.to_csv(self.outFile, index=False, encoding='utf-8') 321 | logging.info('已将天气特征保存到%s.'%self.outFile) 322 | return weather 323 | --------------------------------------------------------------------------------