├── Leek.iml ├── README.md ├── conf_linux_server ├── (flume-a)flume-conf.properties ├── (flume-b)flume-conf.properties ├── (jstorm-a)storm.yaml ├── (jstorm-b)storm.yaml ├── (kafka-a)server.properties ├── (kafka-b)server.properties ├── (zookeeper-a)zoo.cfg ├── (zookeeper-b)zoo.cfg └── (zookeeper-c)zoo.cfg ├── pom.xml ├── src └── main │ ├── java │ └── com │ │ └── yingjun │ │ └── stock │ │ ├── bolt │ │ ├── ReportBolt.java │ │ ├── StockFilterBolt.java │ │ ├── StockStrategyBolt1.java │ │ ├── StockStrategyBolt2.java │ │ └── StockStrategyBolt3.java │ │ ├── dto │ │ ├── ResultStock.java │ │ └── StockRealTimeEvent.java │ │ ├── mysql │ │ └── ConnectionPool.java │ │ ├── spout │ │ └── StockSpout.java │ │ ├── topology │ │ └── StockStategyTopology.java │ │ └── utils │ │ └── EventScheme.java │ └── resources │ ├── config.properties │ ├── logback.xml │ └── scheme.sql └── stock ├── stock.log └── test.sh /Leek.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | ##Leek — 韭菜的自我救赎 3 | >Leek是一款基于分布式的简易版智能实时选股系统,计算能力超强,代码量极少。 4 | 5 | ####基于(Flume+KafKa+JStorm+Esper+MySQL) 6 | - 分布式架构、可自由调节计算能力。 7 | - Flume 实时日志数据提取。 8 | - KafKa 连接 Flume 和 JStorm,用于消息数据中转。 9 | - JStorm + Esper 分布式实时数据处理。 10 | - MySQL 保存数据筛选结果。 11 | 12 | ####目前实现了几种简单的策略: 13 | - 大单卖(stock-stategy-1): 选出股票的卖5档总手数大于买5档口总手数100倍时的股票; 14 | - 大单买(stock-stategy-2): 选出股票的买5档总手数大于卖5档口总手数100倍时的股票; 15 | - 放巨量(stock-stategy-3): 选出在10秒内成交量超过1000万时的股票; 16 | 17 | ####相关部署 18 | - [ZooKeeper 高可用集群的安装及配置](http://wosyingjun.iteye.com/blog/2312960) 19 | - [Kafka 集群的部署与测试](http://wosyingjun.iteye.com/blog/2316508) 20 | - [JStorm2.1.1集群的安装和使用](http://wosyingjun.iteye.com/blog/2317034) 21 | - Flume 集群负载均衡(待完善) 22 | 23 | 24 | 25 | ####架构图: 26 | ![](http://i.imgur.com/Cmruowc.png) 27 | ####Storm计算节点: 28 | ![](http://i.imgur.com/7cWrlwy.png) 29 | ####Esper计算模型: 30 | 31 | ![](http://i.imgur.com/pVzaWf9.jpg) 32 | 33 | ![](http://i.imgur.com/i8BwisZ.jpg) -------------------------------------------------------------------------------- /conf_linux_server/(flume-a)flume-conf.properties: -------------------------------------------------------------------------------- 1 | #下面的agent1是代理名称,对应有source,名称是s1,有一个sink,名称是k1,有一个channel,名称是c1. 2 | agent1.sources = s1 s2 3 | agent1.channels = c1 4 | agent1.sinks = k1 5 | 6 | #配置数据源source(用于接受服务器B的数据) 7 | agent1.sources.s1.type = avro 8 | agent1.sources.s1.bind= 0.0.0.0 9 | agent1.sources.s1.port= 44444 10 | agent1.sources.s1.channels= c1 11 | agent1.sources.s1.batchSize=1000 12 | agent1.sources.s1.batchTimeout=1000 13 | 14 | agent1.sources.s2.type = exec 15 | agent1.sources.s2.channels = c1 16 | agent1.sources.s2.command = tail -F /usr/local/stock/stock.log 17 | agent1.sources.s2.batchSize=10000 18 | agent1.sources.s2.batchTimeout=1000 19 | 20 | # 配置内存 channel 21 | agent1.channels.c1.type = memory 22 | agent1.channels.c1.capacity = 100000 23 | agent1.channels.c1.transactionCapacity = 10000 24 | 25 | # 配置 sinks(发送到文件) 26 | #agent1.sinks.k1.channel = c1 27 | #agent1.sinks.k1.type = file_roll 28 | #agent1.sinks.k1.sink.directory = /tmp/flume 29 | #默认值为30,即每30秒生成一个文件 30 | #为0时表示只有一个文件存放数据 31 | #agent1.sinks.k1.sink.rollInterval = 0 32 | 33 | #设置Kafka接收器(发送到设置Kafka接收器) 34 | agent1.sinks.k1.type= org.apache.flume.sink.kafka.KafkaSink 35 | #设置Kafka的broker地址和端口号 36 | agent1.sinks.k1.brokerList=192.168.xx.100:9092,192.168.xx.101:9092 37 | #设置Kafka的Topic 38 | agent1.sinks.k1.topic=stock 39 | agent1.sinks.k1.batchSize = 1000 40 | #设置为0时延时最低,数据的持久化保证最弱 41 | agent1.sinks.k1.requiredAcks=0 42 | agent1.sinks.k1.channel=c1 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /conf_linux_server/(flume-b)flume-conf.properties: -------------------------------------------------------------------------------- 1 | #下面的agent1是代理名称,对应有source,名称是s1,有一个sink,名称是k1,有一个channel,名称是c1. 2 | agent1.sources = s1 3 | agent1.channels = c1 4 | agent1.sinks = k1 5 | 6 | #配置数据源source 7 | agent1.sources.s1.type = exec 8 | agent1.sources.s1.channels = c1 9 | agent1.sources.s1.command = tail -f /usr/local/stock/stock.log 10 | 11 | 12 | # 配置内存 channel 13 | agent1.channels.c1.type = memory 14 | agent1.channels.c1.capacity = 10000 15 | agent1.channels.c1.transactionCapacity = 10000 16 | 17 | # 配置 sinks 18 | # 数据被转换成 Avro Event ,然后发送到指定的服务端口上。 19 | agent1.sinks.k1.channel = c1 20 | agent1.sinks.k1.type = avro 21 | agent1.sinks.k1.hostname = 192.168.xx.100 22 | agent1.sinks.k1.port = 44444 23 | #这个值越大,吞吐量越好,但是延时严重. 24 | agent1.sinks.k1.batch-size=20 25 | 26 | -------------------------------------------------------------------------------- /conf_linux_server/(jstorm-a)storm.yaml: -------------------------------------------------------------------------------- 1 | ########### These MUST be filled in for a storm configuration 2 | #zookeeper默认端口号都为2181 3 | storm.zookeeper.servers: 4 | - "192.168.xx.100" 5 | - "192.168.xx.101" 6 | - "192.168.xx.102" 7 | 8 | nimbus.host: "192.168.xx.100" 9 | storm.zookeeper.root: "/jstorm" 10 | 11 | storm.local.dir: "%JSTORM_HOME%/data" 12 | 13 | supervisor.slots.ports: 14 | - 6800 15 | - 6801 16 | - 6802 17 | 18 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /conf_linux_server/(jstorm-b)storm.yaml: -------------------------------------------------------------------------------- 1 | ########### These MUST be filled in for a storm configuration 2 | #zookeeper默认端口号都为2181 3 | storm.zookeeper.servers: 4 | - "192.168.xx.100" 5 | - "192.168.xx.101" 6 | - "192.168.xx.102" 7 | 8 | nimbus.host: "192.168.xx.100" 9 | storm.zookeeper.root: "/jstorm" 10 | 11 | storm.local.dir: "%JSTORM_HOME%/data" 12 | 13 | supervisor.slots.ports: 14 | - 6800 15 | - 6801 16 | - 6802 17 | 18 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /conf_linux_server/(kafka-a)server.properties: -------------------------------------------------------------------------------- 1 | ############################# Server Basics ############################# 2 | 3 | # The id of the broker. This must be set to a unique integer for each broker. 4 | broker.id=0 5 | 6 | ############################# Socket Server Settings ############################# 7 | 8 | listeners=PLAINTEXT://192.168.xx.100:9092 9 | 10 | # The port the socket server listens on 11 | port=9092 12 | 13 | # Hostname the broker will bind to. If not set, the server will bind to all interfaces 14 | host.name=192.168.xx.100 15 | 16 | # Hostname the broker will advertise to producers and consumers. If not set, it uses the 17 | # value for "host.name" if configured. Otherwise, it will use the value returned from 18 | # java.net.InetAddress.getCanonicalHostName(). 19 | advertised.host.name=192.168.xx.100 20 | 21 | # The port to publish to ZooKeeper for clients to use. If this is not set, 22 | # it will publish the same port that the broker binds to. 23 | #advertised.host.port=9092 24 | 25 | # The number of threads handling network requests 26 | num.network.threads=3 27 | 28 | # The number of threads doing disk I/O 29 | num.io.threads=8 30 | 31 | # The send buffer (SO_SNDBUF) used by the socket server 32 | socket.send.buffer.bytes=102400 33 | 34 | # The receive buffer (SO_RCVBUF) used by the socket server 35 | socket.receive.buffer.bytes=102400 36 | 37 | # The maximum size of a request that the socket server will accept (protection against OOM) 38 | socket.request.max.bytes=104857600 39 | 40 | 41 | ############################# Log Basics ############################# 42 | 43 | # A comma seperated list of directories under which to store log files 44 | log.dirs=/usr/local/kafka_2.11-0.9.0.1/kafka-logs 45 | 46 | # The default number of log partitions per topic. More partitions allow greater 47 | # parallelism for consumption, but this will also result in more files across 48 | # the brokers. 49 | num.partitions=5 50 | 51 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. 52 | # This value is recommended to be increased for installations with data dirs located in RAID array. 53 | num.recovery.threads.per.data.dir=1 54 | 55 | ############################# Log Flush Policy ############################# 56 | 57 | # Messages are immediately written to the filesystem but by default we only fsync() to sync 58 | # the OS cache lazily. The following configurations control the flush of data to disk. 59 | # There are a few important trade-offs here: 60 | # 1. Durability: Unflushed data may be lost if you are not using replication. 61 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. 62 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. 63 | # The settings below allow one to configure the flush policy to flush data after a period of time or 64 | # every N messages (or both). This can be done globally and overridden on a per-topic basis. 65 | 66 | # The number of messages to accept before forcing a flush of data to disk 67 | #log.flush.interval.messages=10000 68 | 69 | # The maximum amount of time a message can sit in a log before we force a flush 70 | #log.flush.interval.ms=1000 71 | 72 | ############################# Log Retention Policy ############################# 73 | 74 | # The following configurations control the disposal of log segments. The policy can 75 | # be set to delete segments after a period of time, or after a given size has accumulated. 76 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens 77 | # from the end of the log. 78 | 79 | # The minimum age of a log file to be eligible for deletion 80 | log.retention.hours=168 81 | 82 | # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining 83 | # segments don't drop below log.retention.bytes. 84 | #log.retention.bytes=1073741824 85 | 86 | # The maximum size of a log segment file. When this size is reached a new log segment will be created. 87 | log.segment.bytes=1073741824 88 | 89 | # The interval at which log segments are checked to see if they can be deleted according 90 | # to the retention policies 91 | log.retention.check.interval.ms=300000 92 | 93 | ############################# Zookeeper ############################# 94 | 95 | # Zookeeper connection string (see zookeeper docs for details). 96 | # This is a comma separated host:port pairs, each corresponding to a zk 97 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". 98 | # You can also append an optional chroot string to the urls to specify the 99 | # root directory for all kafka znodes. 100 | zookeeper.connect=192.168.xx.100:2181,192.168.xx.101:2181,192.168.xx.102:2181 101 | 102 | # Timeout in ms for connecting to zookeeper 103 | zookeeper.connection.timeout.ms=6000 104 | 105 | -------------------------------------------------------------------------------- /conf_linux_server/(kafka-b)server.properties: -------------------------------------------------------------------------------- 1 | ############################# Server Basics ############################# 2 | 3 | # The id of the broker. This must be set to a unique integer for each broker. 4 | broker.id=1 5 | 6 | ############################# Socket Server Settings ############################# 7 | 8 | listeners=PLAINTEXT://192.168.xx.101:9092 9 | 10 | # The port the socket server listens on 11 | port=9092 12 | 13 | # Hostname the broker will bind to. If not set, the server will bind to all interfaces 14 | host.name=192.168.xx.101 15 | 16 | # Hostname the broker will advertise to producers and consumers. If not set, it uses the 17 | # value for "host.name" if configured. Otherwise, it will use the value returned from 18 | # java.net.InetAddress.getCanonicalHostName(). 19 | advertised.host.name=192.168.xx.101 20 | 21 | # The port to publish to ZooKeeper for clients to use. If this is not set, 22 | # it will publish the same port that the broker binds to. 23 | #advertised.host.port=9092 24 | 25 | # The number of threads handling network requests 26 | num.network.threads=3 27 | 28 | # The number of threads doing disk I/O 29 | num.io.threads=8 30 | 31 | # The send buffer (SO_SNDBUF) used by the socket server 32 | socket.send.buffer.bytes=102400 33 | 34 | # The receive buffer (SO_RCVBUF) used by the socket server 35 | socket.receive.buffer.bytes=102400 36 | 37 | # The maximum size of a request that the socket server will accept (protection against OOM) 38 | socket.request.max.bytes=104857600 39 | 40 | 41 | ############################# Log Basics ############################# 42 | 43 | # A comma seperated list of directories under which to store log files 44 | log.dirs=/usr/local/kafka_2.11-0.9.0.1/kafka-logs 45 | 46 | # The default number of log partitions per topic. More partitions allow greater 47 | # parallelism for consumption, but this will also result in more files across 48 | # the brokers. 49 | num.partitions=5 50 | 51 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. 52 | # This value is recommended to be increased for installations with data dirs located in RAID array. 53 | num.recovery.threads.per.data.dir=1 54 | 55 | ############################# Log Flush Policy ############################# 56 | 57 | # Messages are immediately written to the filesystem but by default we only fsync() to sync 58 | # the OS cache lazily. The following configurations control the flush of data to disk. 59 | # There are a few important trade-offs here: 60 | # 1. Durability: Unflushed data may be lost if you are not using replication. 61 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. 62 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. 63 | # The settings below allow one to configure the flush policy to flush data after a period of time or 64 | # every N messages (or both). This can be done globally and overridden on a per-topic basis. 65 | 66 | # The number of messages to accept before forcing a flush of data to disk 67 | #log.flush.interval.messages=10000 68 | 69 | # The maximum amount of time a message can sit in a log before we force a flush 70 | #log.flush.interval.ms=1000 71 | 72 | ############################# Log Retention Policy ############################# 73 | 74 | # The following configurations control the disposal of log segments. The policy can 75 | # be set to delete segments after a period of time, or after a given size has accumulated. 76 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens 77 | # from the end of the log. 78 | 79 | # The minimum age of a log file to be eligible for deletion 80 | log.retention.hours=168 81 | 82 | # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining 83 | # segments don't drop below log.retention.bytes. 84 | #log.retention.bytes=1073741824 85 | 86 | # The maximum size of a log segment file. When this size is reached a new log segment will be created. 87 | log.segment.bytes=1073741824 88 | 89 | # The interval at which log segments are checked to see if they can be deleted according 90 | # to the retention policies 91 | log.retention.check.interval.ms=300000 92 | 93 | ############################# Zookeeper ############################# 94 | 95 | # Zookeeper connection string (see zookeeper docs for details). 96 | # This is a comma separated host:port pairs, each corresponding to a zk 97 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". 98 | # You can also append an optional chroot string to the urls to specify the 99 | # root directory for all kafka znodes. 100 | zookeeper.connect=192.168.xx.100:2181,192.168.xx.101:2181,192.168.xx.102:2181 101 | 102 | # Timeout in ms for connecting to zookeeper 103 | zookeeper.connection.timeout.ms=6000 104 | 105 | -------------------------------------------------------------------------------- /conf_linux_server/(zookeeper-a)zoo.cfg: -------------------------------------------------------------------------------- 1 | #注意:在/usr/local/zookeeper-3.4.6/data/文件夹下创建一个myid文件,内容为1 2 | tickTime=2000 3 | initLimit=10 4 | syncLimit=5 5 | dataDir=/usr/local/zookeeper-3.4.6/data 6 | dataLogDir=/usr/local/zookeeper-3.4.6/logs 7 | clientPort=2181 8 | server.1=192.168.xx.100:2881:3881 9 | server.2=192.168.xx.101:2881:3881 10 | server.3=192.168.xx.102:2881:3881 11 | 12 | -------------------------------------------------------------------------------- /conf_linux_server/(zookeeper-b)zoo.cfg: -------------------------------------------------------------------------------- 1 | #注意:在/usr/local/zookeeper-3.4.6/data/文件夹下创建一个myid文件,内容为2 2 | tickTime=2000 3 | initLimit=10 4 | syncLimit=5 5 | dataDir=/usr/local/zookeeper-3.4.6/data 6 | dataLogDir=/usr/local/zookeeper-3.4.6/logs 7 | clientPort=2181 8 | server.1=192.168.xx.100:2881:3881 9 | server.2=192.168.xx.101:2881:3881 10 | server.3=192.168.xx.102:2881:3881 11 | 12 | -------------------------------------------------------------------------------- /conf_linux_server/(zookeeper-c)zoo.cfg: -------------------------------------------------------------------------------- 1 | #注意:在/usr/local/zookeeper-3.4.6/data/文件夹下创建一个myid文件,内容为3 2 | tickTime=2000 3 | initLimit=10 4 | syncLimit=5 5 | dataDir=/usr/local/zookeeper-3.4.6/data 6 | dataLogDir=/usr/local/zookeeper-3.4.6/logs 7 | clientPort=2181 8 | server.1=192.168.xx.100:2881:3881 9 | server.2=192.168.xx.101:2881:3881 10 | server.3=192.168.xx.102:2881:3881 11 | 12 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 6 | 4.0.0 7 | 8 | com.yingjun.demo 9 | Leek 10 | 1.0-SNAPSHOT 11 | 12 | 13 | UTF-8 14 | 1.7 15 | 4.2.0.RELEASE 16 | 17 | 18 | 19 | 20 | 21 | org.slf4j 22 | slf4j-api 23 | 1.7.5 24 | 25 | 26 | org.slf4j 27 | log4j-over-slf4j 28 | 1.7.10 29 | provided 30 | 31 | 32 | ch.qos.logback 33 | logback-classic 34 | 1.0.13 35 | provided 36 | 37 | 38 | org.slf4j 39 | slf4j-log4j12 40 | 1.7.5 41 | provided 42 | 43 | 44 | 45 | com.alibaba 46 | fastjson 47 | 1.1.41 48 | 49 | 50 | commons-dbutils 51 | commons-dbutils 52 | 1.6 53 | 54 | 55 | mysql 56 | mysql-connector-java 57 | 5.1.34 58 | 59 | 60 | c3p0 61 | c3p0 62 | 0.9.1.2 63 | 64 | 65 | 66 | com.alibaba.jstorm 67 | jstorm-core 68 | 2.1.1 69 | 70 | provided 71 | 72 | 73 | org.slf4j 74 | slf4j-api 75 | 76 | 77 | org.slf4j 78 | slf4j-nop 79 | 80 | 81 | org.slf4j 82 | slf4j-jdk14 83 | 84 | 85 | maven-core 86 | org.apache.maven 87 | 88 | 89 | 90 | 91 | 92 | 93 | org.apache.kafka 94 | kafka_2.11 95 | 0.9.0.1 96 | 97 | 98 | org.slf4j 99 | slf4j-log4j12 100 | 101 | 102 | log4j 103 | log4j 104 | 105 | 106 | 107 | 108 | 109 | 110 | org.apache.storm 111 | storm-kafka 112 | 0.9.2-incubating 113 | 114 | 115 | 116 | 117 | com.espertech 118 | esper 119 | 5.3.0 120 | 121 | 122 | 123 | 124 | 125 | 126 | org.apache.maven.plugins 127 | maven-compiler-plugin 128 | 129 | 1.7 130 | 1.7 131 | 132 | 133 | 134 | org.apache.maven.plugins 135 | maven-shade-plugin 136 | 2.1 137 | 138 | 139 | package 140 | 141 | shade 142 | 143 | 144 | 145 | 147 | META-INF/spring.handlers 148 | 149 | 151 | META-INF/spring.schemas 152 | 153 | 155 | com.yingjun.stock.topology.StockStategyTopology 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | -------------------------------------------------------------------------------- /src/main/java/com/yingjun/stock/bolt/ReportBolt.java: -------------------------------------------------------------------------------- 1 | package com.yingjun.stock.bolt; 2 | 3 | import backtype.storm.task.TopologyContext; 4 | import backtype.storm.topology.BasicOutputCollector; 5 | import backtype.storm.topology.OutputFieldsDeclarer; 6 | import backtype.storm.topology.base.BaseBasicBolt; 7 | import backtype.storm.tuple.Tuple; 8 | import com.yingjun.stock.dto.ResultStock; 9 | import com.yingjun.stock.mysql.ConnectionPool; 10 | import org.apache.commons.dbutils.QueryRunner; 11 | import org.slf4j.Logger; 12 | import org.slf4j.LoggerFactory; 13 | 14 | import java.sql.SQLException; 15 | import java.util.Map; 16 | 17 | 18 | /** 19 | * 数据回报 20 | */ 21 | public class ReportBolt extends BaseBasicBolt { 22 | 23 | private final Logger log = LoggerFactory.getLogger(this.getClass()); 24 | private QueryRunner query; 25 | 26 | @Override 27 | public void prepare(Map stormConf, TopologyContext context) { 28 | log.info("----------ReportBolt 初始化..."); 29 | query = new QueryRunner(ConnectionPool.getInstance().getDataSource()); 30 | } 31 | 32 | /** 33 | * 完成自己的逻辑 34 | */ 35 | @Override 36 | public void execute(Tuple tuple, BasicOutputCollector basicOutputCollector) { 37 | ResultStock resultStock = (ResultStock) tuple.getValue(0); 38 | 39 | String sql = "insert into result(stock_code,stock_price,total_value,stategy_id) values(?,?,?,?)"; 40 | Object params[] = {resultStock.getStockCode(), resultStock.getNewPrice(), 41 | resultStock.getTotalValue(), resultStock.getStrategyId()}; 42 | 43 | try { 44 | query.update(sql,params); 45 | } catch (SQLException e) { 46 | log.error("SQLException:", e); 47 | } 48 | 49 | } 50 | 51 | /** 52 | * 没有后续的 bolt,所以这个方法可以不实现 53 | */ 54 | @Override 55 | public void declareOutputFields(OutputFieldsDeclarer declarer) { 56 | 57 | } 58 | 59 | } 60 | -------------------------------------------------------------------------------- /src/main/java/com/yingjun/stock/bolt/StockFilterBolt.java: -------------------------------------------------------------------------------- 1 | package com.yingjun.stock.bolt; 2 | 3 | import backtype.storm.task.OutputCollector; 4 | import backtype.storm.topology.BasicOutputCollector; 5 | import backtype.storm.topology.OutputFieldsDeclarer; 6 | import backtype.storm.topology.base.BaseBasicBolt; 7 | import backtype.storm.tuple.Fields; 8 | import backtype.storm.tuple.Tuple; 9 | import backtype.storm.tuple.Values; 10 | import com.espertech.esper.client.EPServiceProvider; 11 | import com.yingjun.stock.dto.StockRealTimeEvent; 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | 15 | 16 | /** 17 | * 过滤掉无效的行情以及集合竞价期间的行情数据 18 | */ 19 | public class StockFilterBolt extends BaseBasicBolt { 20 | 21 | private final Logger log = LoggerFactory.getLogger(this.getClass()); 22 | private OutputCollector collector; 23 | private EPServiceProvider epService; 24 | 25 | 26 | /** 27 | * 完成自己的逻辑 28 | */ 29 | @Override 30 | public void execute(Tuple tuple, BasicOutputCollector basicOutputCollector) { 31 | StockRealTimeEvent event = (StockRealTimeEvent) tuple.getValue(0); 32 | if (event.getNewPrice() == 0) { 33 | log.info("过滤掉无效行情:"+event); 34 | return; 35 | } 36 | if (event.getBuyPrice1() > 0 && event.getSellPrice1() > 0 37 | && (event.getBuyPrice2() + event.getSellPrice2()) == 0 38 | && (event.getBuyPrice5() + event.getSellPrice5()) == 0) { 39 | log.info("过滤掉无效行情:"+event); 40 | return; 41 | } 42 | basicOutputCollector.emit(new Values(event)); 43 | } 44 | 45 | /** 46 | * 生成tuple的Filelds ID 47 | */ 48 | @Override 49 | public void declareOutputFields(OutputFieldsDeclarer declarer) { 50 | declarer.declare(new Fields("filter-stock")); 51 | } 52 | 53 | } 54 | -------------------------------------------------------------------------------- /src/main/java/com/yingjun/stock/bolt/StockStrategyBolt1.java: -------------------------------------------------------------------------------- 1 | package com.yingjun.stock.bolt; 2 | 3 | import backtype.storm.task.TopologyContext; 4 | import backtype.storm.topology.BasicOutputCollector; 5 | import backtype.storm.topology.OutputFieldsDeclarer; 6 | import backtype.storm.topology.base.BaseBasicBolt; 7 | import backtype.storm.tuple.Fields; 8 | import backtype.storm.tuple.Tuple; 9 | import backtype.storm.tuple.Values; 10 | import com.espertech.esper.client.*; 11 | import com.yingjun.stock.dto.ResultStock; 12 | import com.yingjun.stock.dto.StockRealTimeEvent; 13 | import org.slf4j.Logger; 14 | import org.slf4j.LoggerFactory; 15 | 16 | import java.util.Map; 17 | 18 | 19 | 20 | /** 21 | * 股票策略1(大卖盘) 22 | * 当股票的卖5档总手数大于买5档口手数的100倍时选出股票。 23 | * 24 | * @author yingjun 25 | */ 26 | public class StockStrategyBolt1 extends BaseBasicBolt { 27 | 28 | private final Logger log = LoggerFactory.getLogger(this.getClass()); 29 | private BasicOutputCollector outputCollector; 30 | private EPServiceProvider epService; 31 | 32 | @Override 33 | public void prepare(Map stormConf, TopologyContext context) { 34 | log.info("----------股票策略1(大卖盘)初始化..."); 35 | Configuration configuration = new Configuration(); 36 | configuration.addEventType("StockRealTimeEvent", StockRealTimeEvent.class.getName()); 37 | epService = EPServiceProviderManager.getProvider("strategy1",configuration); 38 | //提取出卖盘口远大于买盘口的行情数据 39 | EPStatement stmt = epService.getEPAdministrator() 40 | .createEPL("select * from StockRealTimeEvent where " + 41 | "(buyCount5+buyCount4+buyCount3+buyCount2+buyCount1)*100" + 42 | "<=(sellCount5+sellCount4+sellCount3+sellCount2+sellCount1)"); 43 | 44 | stmt.addListener(new UpdateListener() { 45 | @Override 46 | public void update(EventBean[] newEvents, EventBean[] oldEvents) { 47 | if (newEvents != null) { 48 | EventBean theEvent = newEvents[0]; 49 | StockRealTimeEvent stockRTEvent = (StockRealTimeEvent)theEvent.getUnderlying(); 50 | log.info("---------- 股票策略1(大卖盘)选出股票:" + stockRTEvent.getStockCode() + " 最新价:" + stockRTEvent.getNewPrice()); 51 | outputCollector.emit(new Values(new ResultStock(stockRTEvent.getStockCode(),stockRTEvent.getNewPrice(),1))); 52 | } 53 | } 54 | }); 55 | } 56 | 57 | /** 58 | * 完成自己的逻辑 59 | */ 60 | @Override 61 | public void execute(Tuple tuple, BasicOutputCollector basicOutputCollector) { 62 | this.outputCollector=basicOutputCollector; 63 | StockRealTimeEvent stockRealTimeEvent = (StockRealTimeEvent) tuple.getValue(0); 64 | log.info("策略1(大卖盘)===> Esper:"+stockRealTimeEvent); 65 | epService.getEPRuntime().sendEvent(stockRealTimeEvent); 66 | } 67 | 68 | @Override 69 | public void declareOutputFields(OutputFieldsDeclarer declarer) { 70 | declarer.declare(new Fields("StockStrategy1")); 71 | } 72 | 73 | @Override 74 | public void cleanup() { 75 | if(!epService.isDestroyed()){ 76 | epService.destroy(); 77 | } 78 | } 79 | 80 | } 81 | -------------------------------------------------------------------------------- /src/main/java/com/yingjun/stock/bolt/StockStrategyBolt2.java: -------------------------------------------------------------------------------- 1 | package com.yingjun.stock.bolt; 2 | 3 | import backtype.storm.task.TopologyContext; 4 | import backtype.storm.topology.BasicOutputCollector; 5 | import backtype.storm.topology.OutputFieldsDeclarer; 6 | import backtype.storm.topology.base.BaseBasicBolt; 7 | import backtype.storm.tuple.Fields; 8 | import backtype.storm.tuple.Tuple; 9 | import backtype.storm.tuple.Values; 10 | import com.espertech.esper.client.*; 11 | import com.yingjun.stock.dto.ResultStock; 12 | import com.yingjun.stock.dto.StockRealTimeEvent; 13 | import org.slf4j.Logger; 14 | import org.slf4j.LoggerFactory; 15 | 16 | import java.util.Map; 17 | 18 | 19 | /** 20 | * 股票策略2(大买盘) 21 | * 当股票的买5档总手数大于卖5档口手数的100倍时选出股票。 22 | * 23 | * @author yingjun 24 | */ 25 | public class StockStrategyBolt2 extends BaseBasicBolt { 26 | 27 | private final Logger log = LoggerFactory.getLogger(this.getClass()); 28 | private BasicOutputCollector outputCollector; 29 | private EPServiceProvider epService; 30 | 31 | 32 | /** 33 | * 是当task起来后执行的初始化动作 34 | * 这里初始化Esper 35 | * 36 | * @param stormConf 37 | * @param context 38 | */ 39 | @Override 40 | public void prepare(Map stormConf, TopologyContext context) { 41 | log.info("----------股票策略2(大买盘)初始化..."); 42 | Configuration configuration = new Configuration(); 43 | configuration.addEventType("StockRealTimeEvent", StockRealTimeEvent.class.getName()); 44 | epService = EPServiceProviderManager.getProvider("strategy2",configuration); 45 | 46 | //提取出卖盘口远大于买盘口的行情数据 47 | EPStatement stmt = epService.getEPAdministrator() 48 | .createEPL("select * from StockRealTimeEvent where " + 49 | "(buyCount5+buyCount4+buyCount3+buyCount2+buyCount1)" + 50 | ">=(sellCount5+sellCount4+sellCount3+sellCount2+sellCount1)*100"); 51 | 52 | stmt.addListener(new UpdateListener() { 53 | @Override 54 | public void update(EventBean[] newEvents, EventBean[] oldEvents) { 55 | if (newEvents != null) { 56 | EventBean theEvent = newEvents[0]; 57 | StockRealTimeEvent stockRTEvent = (StockRealTimeEvent) theEvent.getUnderlying(); 58 | log.info("---------- 股票策略2(大买盘)选出股票:" + stockRTEvent.getStockCode() + " 最新价:" + stockRTEvent.getNewPrice()); 59 | outputCollector.emit(new Values(new ResultStock(stockRTEvent.getStockCode(),stockRTEvent.getNewPrice(),2))); 60 | } 61 | } 62 | }); 63 | } 64 | 65 | /** 66 | * 完成自己的逻辑 67 | */ 68 | @Override 69 | public void execute(Tuple tuple, BasicOutputCollector basicOutputCollector) { 70 | this.outputCollector=basicOutputCollector; 71 | StockRealTimeEvent stockRealTimeEvent = (StockRealTimeEvent) tuple.getValue(0); 72 | log.info("策略2(大买盘)===> Esper:" + stockRealTimeEvent); 73 | epService.getEPRuntime().sendEvent(stockRealTimeEvent); 74 | } 75 | 76 | @Override 77 | public void declareOutputFields(OutputFieldsDeclarer declarer) { 78 | declarer.declare(new Fields("StockStrategy2")); 79 | } 80 | 81 | @Override 82 | public void cleanup() { 83 | if(!epService.isDestroyed()){ 84 | epService.destroy(); 85 | } 86 | } 87 | 88 | } 89 | -------------------------------------------------------------------------------- /src/main/java/com/yingjun/stock/bolt/StockStrategyBolt3.java: -------------------------------------------------------------------------------- 1 | package com.yingjun.stock.bolt; 2 | 3 | import backtype.storm.task.TopologyContext; 4 | import backtype.storm.topology.BasicOutputCollector; 5 | import backtype.storm.topology.OutputFieldsDeclarer; 6 | import backtype.storm.topology.base.BaseBasicBolt; 7 | import backtype.storm.tuple.Fields; 8 | import backtype.storm.tuple.Tuple; 9 | import backtype.storm.tuple.Values; 10 | import com.espertech.esper.client.*; 11 | import com.yingjun.stock.dto.ResultStock; 12 | import com.yingjun.stock.dto.StockRealTimeEvent; 13 | import org.slf4j.Logger; 14 | import org.slf4j.LoggerFactory; 15 | 16 | import java.util.Map; 17 | 18 | 19 | /** 20 | * 股票策略3(放巨量) 21 | * 选出在10秒内成交量超过1000万的股票 22 | * 23 | * @author yingjun 24 | */ 25 | public class StockStrategyBolt3 extends BaseBasicBolt { 26 | 27 | private final Logger log = LoggerFactory.getLogger(this.getClass()); 28 | private BasicOutputCollector outputCollector; 29 | private EPServiceProvider epService; 30 | 31 | /** 32 | * 是当task起来后执行的初始化动作 33 | * 这里初始化Esper 34 | * 35 | * @param stormConf 36 | * @param context 37 | */ 38 | @Override 39 | public void prepare(Map stormConf, TopologyContext context) { 40 | log.info("----------股票策略3(放巨量)初始化..."); 41 | Configuration configuration = new Configuration(); 42 | configuration.addEventType("StockRealTimeEvent", StockRealTimeEvent.class.getName()); 43 | epService = EPServiceProviderManager.getProvider("strategy3", configuration); 44 | 45 | //筛选出10秒内成交量大于1000万的股票 46 | EPStatement stmt = epService.getEPAdministrator() 47 | .createEPL("select stockCode,newPrice,sum(newPrice*current*100) as totalValue " + 48 | "from StockRealTimeEvent.win:time(10 sec) group by stockCode " + 49 | "having sum(newPrice*current*100)>10000000"); 50 | 51 | stmt.addListener(new UpdateListener() { 52 | @Override 53 | public void update(EventBean[] newEvents, EventBean[] oldEvents) { 54 | if (newEvents != null) { 55 | EventBean theEvent = newEvents[0]; 56 | double totalValue = (Double) theEvent.get("totalValue"); 57 | double newPrice = (Double) theEvent.get("newPrice"); 58 | String stockCode = (String) theEvent.get("stockCode"); 59 | log.info("---------- 股票策略3(放巨量)选出股票:" + stockCode + " 最新价:" + newPrice + " 成交额:" + totalValue); 60 | outputCollector.emit(new Values(new ResultStock(stockCode,newPrice,3,totalValue))); 61 | } 62 | } 63 | }); 64 | } 65 | 66 | /** 67 | * 完成自己的逻辑 68 | * 69 | * @param input 70 | */ 71 | @Override 72 | public void execute(Tuple tuple, BasicOutputCollector basicOutputCollector) { 73 | this.outputCollector=basicOutputCollector; 74 | StockRealTimeEvent stockRealTimeEvent = (StockRealTimeEvent) tuple.getValue(0); 75 | log.info("策略3(放巨量)===> Esper:" + stockRealTimeEvent); 76 | epService.getEPRuntime().sendEvent(stockRealTimeEvent); 77 | } 78 | 79 | @Override 80 | public void declareOutputFields(OutputFieldsDeclarer declarer) { 81 | declarer.declare(new Fields("StockStrategy3")); 82 | } 83 | 84 | @Override 85 | public void cleanup() { 86 | if (!epService.isDestroyed()) { 87 | epService.destroy(); 88 | } 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/main/java/com/yingjun/stock/dto/ResultStock.java: -------------------------------------------------------------------------------- 1 | package com.yingjun.stock.dto; 2 | 3 | import java.io.Serializable; 4 | 5 | /** 6 | * @author yingjun 7 | */ 8 | public class ResultStock implements Serializable { 9 | 10 | private String stockCode; 11 | private double newPrice; 12 | private double totalValue; 13 | private int strategyId; 14 | 15 | public ResultStock() { 16 | 17 | } 18 | 19 | public ResultStock(String stockCode, double newPrice, int strategyId) { 20 | this.stockCode = stockCode; 21 | this.newPrice = newPrice; 22 | this.strategyId = strategyId; 23 | } 24 | 25 | public ResultStock(String stockCode, double newPrice, int strategyId, double totalValue) { 26 | this.stockCode = stockCode; 27 | this.strategyId = strategyId; 28 | this.totalValue = totalValue; 29 | this.newPrice = newPrice; 30 | } 31 | 32 | public int getStrategyId() { 33 | return strategyId; 34 | } 35 | 36 | public void setStrategyId(int strategyId) { 37 | this.strategyId = strategyId; 38 | } 39 | 40 | public double getTotalValue() { 41 | return totalValue; 42 | } 43 | 44 | public void setTotalValue(double totalValue) { 45 | this.totalValue = totalValue; 46 | } 47 | 48 | public double getNewPrice() { 49 | return newPrice; 50 | } 51 | 52 | public void setNewPrice(double newPrice) { 53 | this.newPrice = newPrice; 54 | } 55 | 56 | public String getStockCode() { 57 | return stockCode; 58 | } 59 | 60 | public void setStockCode(String stockCode) { 61 | this.stockCode = stockCode; 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/main/java/com/yingjun/stock/dto/StockRealTimeEvent.java: -------------------------------------------------------------------------------- 1 | package com.yingjun.stock.dto; 2 | 3 | import java.io.Serializable; 4 | 5 | /** 6 | * 7 | * 行情 Bean 8 | * 9 | * @author yingjun 10 | * 11 | */ 12 | public class StockRealTimeEvent implements Serializable{ 13 | 14 | private String stockCode; 15 | private double newPrice; 16 | private double buyPrice1; 17 | private long buyCount1; 18 | private double buyPrice2; 19 | private long buyCount2; 20 | private double buyPrice3; 21 | private long buyCount3; 22 | private double buyPrice4; 23 | private long buyCount4; 24 | private double buyPrice5; 25 | private long buyCount5; 26 | private double sellPrice1; 27 | private long sellCount1; 28 | private double sellPrice2; 29 | private long sellCount2; 30 | private double sellPrice3; 31 | private long sellCount3; 32 | private double sellPrice4; 33 | private long sellCount4; 34 | private double sellPrice5; 35 | private long sellCount5; 36 | private int current; //当前成交手数 37 | 38 | 39 | public String getStockCode() { 40 | return stockCode; 41 | } 42 | 43 | public void setStockCode(String stockCode) { 44 | this.stockCode = stockCode; 45 | } 46 | 47 | public double getNewPrice() { 48 | return newPrice; 49 | } 50 | 51 | public void setNewPrice(double newPrice) { 52 | this.newPrice = newPrice; 53 | } 54 | 55 | 56 | public double getBuyPrice1() { 57 | return buyPrice1; 58 | } 59 | 60 | public void setBuyPrice1(double buyPrice1) { 61 | this.buyPrice1 = buyPrice1; 62 | } 63 | 64 | public long getBuyCount1() { 65 | return buyCount1; 66 | } 67 | 68 | public void setBuyCount1(long buyCount1) { 69 | this.buyCount1 = buyCount1; 70 | } 71 | 72 | public double getBuyPrice2() { 73 | return buyPrice2; 74 | } 75 | 76 | public void setBuyPrice2(double buyPrice2) { 77 | this.buyPrice2 = buyPrice2; 78 | } 79 | 80 | public long getBuyCount2() { 81 | return buyCount2; 82 | } 83 | 84 | public void setBuyCount2(long buyCount2) { 85 | this.buyCount2 = buyCount2; 86 | } 87 | 88 | public double getBuyPrice3() { 89 | return buyPrice3; 90 | } 91 | 92 | public void setBuyPrice3(double buyPrice3) { 93 | this.buyPrice3 = buyPrice3; 94 | } 95 | 96 | public long getBuyCount3() { 97 | return buyCount3; 98 | } 99 | 100 | public void setBuyCount3(long buyCount3) { 101 | this.buyCount3 = buyCount3; 102 | } 103 | 104 | public double getBuyPrice4() { 105 | return buyPrice4; 106 | } 107 | 108 | public void setBuyPrice4(double buyPrice4) { 109 | this.buyPrice4 = buyPrice4; 110 | } 111 | 112 | public long getBuyCount4() { 113 | return buyCount4; 114 | } 115 | 116 | public void setBuyCount4(long buyCount4) { 117 | this.buyCount4 = buyCount4; 118 | } 119 | 120 | public double getBuyPrice5() { 121 | return buyPrice5; 122 | } 123 | 124 | public void setBuyPrice5(double buyPrice5) { 125 | this.buyPrice5 = buyPrice5; 126 | } 127 | 128 | public long getBuyCount5() { 129 | return buyCount5; 130 | } 131 | 132 | public void setBuyCount5(long buyCount5) { 133 | this.buyCount5 = buyCount5; 134 | } 135 | 136 | 137 | public double getSellPrice1() { 138 | return sellPrice1; 139 | } 140 | 141 | public void setSellPrice1(double sellPrice1) { 142 | this.sellPrice1 = sellPrice1; 143 | } 144 | 145 | public long getSellCount1() { 146 | return sellCount1; 147 | } 148 | 149 | public void setSellCount1(long sellCount1) { 150 | this.sellCount1 = sellCount1; 151 | } 152 | 153 | public double getSellPrice2() { 154 | return sellPrice2; 155 | } 156 | 157 | public void setSellPrice2(double sellPrice2) { 158 | this.sellPrice2 = sellPrice2; 159 | } 160 | 161 | public long getSellCount2() { 162 | return sellCount2; 163 | } 164 | 165 | public void setSellCount2(long sellCount2) { 166 | this.sellCount2 = sellCount2; 167 | } 168 | 169 | public double getSellPrice3() { 170 | return sellPrice3; 171 | } 172 | 173 | public void setSellPrice3(double sellPrice3) { 174 | this.sellPrice3 = sellPrice3; 175 | } 176 | 177 | public long getSellCount3() { 178 | return sellCount3; 179 | } 180 | 181 | public void setSellCount3(long sellCount3) { 182 | this.sellCount3 = sellCount3; 183 | } 184 | 185 | public double getSellPrice4() { 186 | return sellPrice4; 187 | } 188 | 189 | public void setSellPrice4(double sellPrice4) { 190 | this.sellPrice4 = sellPrice4; 191 | } 192 | 193 | public long getSellCount4() { 194 | return sellCount4; 195 | } 196 | 197 | public void setSellCount4(long sellCount4) { 198 | this.sellCount4 = sellCount4; 199 | } 200 | 201 | public double getSellPrice5() { 202 | return sellPrice5; 203 | } 204 | 205 | public void setSellPrice5(double sellPrice5) { 206 | this.sellPrice5 = sellPrice5; 207 | } 208 | 209 | public long getSellCount5() { 210 | return sellCount5; 211 | } 212 | 213 | public void setSellCount5(long sellCount5) { 214 | this.sellCount5 = sellCount5; 215 | } 216 | 217 | public int getCurrent() { 218 | return current; 219 | } 220 | 221 | public void setCurrent(int current) { 222 | this.current = current; 223 | } 224 | 225 | @Override 226 | public String toString() { 227 | return "StockRealTimeEvent{" + 228 | "stockCode='" + stockCode + '\'' + 229 | ", newPrice=" + newPrice + 230 | ", buyPrice1=" + buyPrice1 + 231 | ", buyCount1=" + buyCount1 + 232 | ", buyPrice2=" + buyPrice2 + 233 | ", buyCount2=" + buyCount2 + 234 | ", buyPrice3=" + buyPrice3 + 235 | ", buyCount3=" + buyCount3 + 236 | ", buyPrice4=" + buyPrice4 + 237 | ", buyCount4=" + buyCount4 + 238 | ", buyPrice5=" + buyPrice5 + 239 | ", buyCount5=" + buyCount5 + 240 | ", sellPrice1=" + sellPrice1 + 241 | ", sellCount1=" + sellCount1 + 242 | ", sellPrice2=" + sellPrice2 + 243 | ", sellCount2=" + sellCount2 + 244 | ", sellPrice3=" + sellPrice3 + 245 | ", sellCount3=" + sellCount3 + 246 | ", sellPrice4=" + sellPrice4 + 247 | ", sellCount4=" + sellCount4 + 248 | ", sellPrice5=" + sellPrice5 + 249 | ", sellCount5=" + sellCount5 + 250 | ", current=" + current + 251 | '}'; 252 | } 253 | } -------------------------------------------------------------------------------- /src/main/java/com/yingjun/stock/mysql/ConnectionPool.java: -------------------------------------------------------------------------------- 1 | package com.yingjun.stock.mysql; 2 | 3 | import com.mchange.v2.c3p0.ComboPooledDataSource; 4 | import com.mchange.v2.c3p0.DataSources; 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | import javax.sql.DataSource; 9 | import java.io.InputStream; 10 | import java.sql.SQLException; 11 | import java.util.Properties; 12 | 13 | /** 14 | * @author yingjun 15 | */ 16 | public class ConnectionPool { 17 | 18 | 19 | private final Logger log = LoggerFactory.getLogger(this.getClass()); 20 | private volatile static ConnectionPool instance; 21 | private ComboPooledDataSource dataSource; 22 | 23 | 24 | public static ConnectionPool getInstance() { 25 | if (instance == null) { 26 | synchronized (ConnectionPool.class) { 27 | if (instance == null) { 28 | instance = new ConnectionPool(); 29 | } 30 | } 31 | } 32 | return instance; 33 | } 34 | 35 | 36 | private ConnectionPool() { 37 | Properties properties = new Properties(); 38 | InputStream inputStream = getClass().getClassLoader().getResourceAsStream("config.properties"); 39 | try { 40 | properties.load(inputStream); 41 | String driverClass = properties.getProperty("driver.class"); 42 | String jdbcUrl = properties.getProperty("jdbc.url"); 43 | String user = properties.getProperty("user"); 44 | String password = properties.getProperty("password"); 45 | int initPoolSize = Integer.parseInt(properties.getProperty("init.pool.size")); 46 | int minPoolSize = Integer.parseInt(properties.getProperty("min.pool.size")); 47 | int maxPoolSize = Integer.parseInt(properties.getProperty("max.pool.size")); 48 | dataSource = new ComboPooledDataSource(); 49 | dataSource.setDriverClass(driverClass); 50 | dataSource.setJdbcUrl(jdbcUrl); 51 | dataSource.setUser(user); 52 | dataSource.setPassword(password); 53 | dataSource.setInitialPoolSize(initPoolSize); 54 | dataSource.setMinPoolSize(minPoolSize); 55 | dataSource.setMaxPoolSize(maxPoolSize); 56 | } catch (Exception e) { 57 | log.error("Exception:", e); 58 | } 59 | } 60 | 61 | 62 | public DataSource getDataSource(){ 63 | return dataSource; 64 | } 65 | 66 | 67 | public void destroy() throws SQLException { 68 | DataSources.destroy(dataSource); 69 | } 70 | 71 | } 72 | -------------------------------------------------------------------------------- /src/main/java/com/yingjun/stock/spout/StockSpout.java: -------------------------------------------------------------------------------- 1 | package com.yingjun.stock.spout; 2 | 3 | import backtype.storm.spout.SpoutOutputCollector; 4 | import backtype.storm.task.TopologyContext; 5 | import backtype.storm.topology.OutputFieldsDeclarer; 6 | import backtype.storm.topology.base.BaseRichSpout; 7 | import backtype.storm.tuple.Fields; 8 | import backtype.storm.tuple.Values; 9 | import backtype.storm.utils.Utils; 10 | import com.alibaba.fastjson.JSONObject; 11 | import com.yingjun.stock.dto.StockRealTimeEvent; 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | 15 | import java.io.*; 16 | import java.util.Map; 17 | import java.util.Properties; 18 | 19 | /** 20 | * 从本地日志中读取行情数据。 21 | * 22 | * 已经改为用KafKaSpout获取数据 23 | * 24 | */ 25 | @Deprecated() 26 | public class StockSpout extends BaseRichSpout { 27 | 28 | private final Logger log = LoggerFactory.getLogger(this.getClass()); 29 | private SpoutOutputCollector collector; 30 | private BufferedReader bufferedReader; 31 | 32 | /** 33 | * open是当task起来后执行的初始化动作 34 | * 35 | * @param conf 36 | * @param context 37 | * @param collector 38 | */ 39 | @Override 40 | public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { 41 | 42 | //从context对象获取spout大小 43 | int spoutsSize=context.getComponentTasks(context.getThisComponentId()).size(); 44 | //从这个spout得到任务id 45 | int myIdx = context.getThisTaskIndex(); 46 | 47 | this.collector = collector; 48 | Properties p = new Properties(); 49 | try { 50 | InputStream inputStream = StockSpout.class.getClassLoader().getResourceAsStream("config.properties"); 51 | bufferedReader = new BufferedReader(new InputStreamReader(inputStream, "UTF-8")); 52 | p.load(bufferedReader); 53 | bufferedReader = new BufferedReader(new FileReader(new File(p.getProperty("stockLogFile")))); 54 | } catch (IOException e) { 55 | log.error("IOException:", e); 56 | throw new RuntimeException(e); 57 | } 58 | } 59 | 60 | /** 61 | * nextTuple 是spout实现核心, 62 | * 用collector 将消息emit出去。 63 | */ 64 | @Override 65 | public void nextTuple() { 66 | String json = null; 67 | try { 68 | if ((json = bufferedReader.readLine()) != null) { 69 | //日志数据转为java对象 70 | log.info("emit:" + json); 71 | try { 72 | StockRealTimeEvent stockRealTimeEvent = JSONObject.parseObject(json, StockRealTimeEvent.class); 73 | Values values = new Values(stockRealTimeEvent); 74 | //定义ID,保证消息的可靠性 75 | collector.emit(values, stockRealTimeEvent); 76 | } catch (Exception e) { 77 | log.info("Exception:", e); 78 | } 79 | } else { 80 | //当日志中无数据可读时,休息1秒。 81 | Utils.sleep(1000); 82 | } 83 | } catch (Exception e) { 84 | log.error("IOException:", e); 85 | throw new RuntimeException(e); 86 | } 87 | } 88 | 89 | /** 90 | * 定义spout发送数据,每个字段的含义 91 | * 92 | * @param declarer 93 | */ 94 | @Override 95 | public void declareOutputFields(OutputFieldsDeclarer declarer) { 96 | // 方法声明该bolt发射包含一个 StockSpout 字段的 tuple 97 | declarer.declare(new Fields("line")); 98 | } 99 | 100 | @Override 101 | public void ack(Object msgId) { 102 | //确认消息被完整处理 103 | log.info("-----------ack:" + msgId); 104 | } 105 | 106 | @Override 107 | public void fail(Object msgId) { 108 | //消息处理失败,纪录进MQ 109 | log.info("-----------fail:" + msgId); 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /src/main/java/com/yingjun/stock/topology/StockStategyTopology.java: -------------------------------------------------------------------------------- 1 | package com.yingjun.stock.topology; 2 | 3 | import backtype.storm.Config; 4 | import backtype.storm.LocalCluster; 5 | import backtype.storm.StormSubmitter; 6 | import backtype.storm.spout.SchemeAsMultiScheme; 7 | import backtype.storm.topology.TopologyBuilder; 8 | import com.yingjun.stock.bolt.*; 9 | import com.yingjun.stock.utils.EventScheme; 10 | import org.apache.thrift.TException; 11 | import storm.kafka.BrokerHosts; 12 | import storm.kafka.KafkaSpout; 13 | import storm.kafka.SpoutConfig; 14 | import storm.kafka.ZkHosts; 15 | 16 | import java.util.Arrays; 17 | 18 | /** 19 | * @author yingjun 20 | */ 21 | public class StockStategyTopology { 22 | 23 | public static void main(String[] args) throws TException { 24 | 25 | // Configure Kafka 26 | String zks="192.168.xx.100:2181,192.168.xx.101:2181,192.168.xx.102:2181"; 27 | String topic = "stock"; 28 | // default zookeeper root configuration for storm 29 | String zkRoot = "/kafkaStorm"; 30 | String spoutId = "kafkaSpout"; 31 | BrokerHosts brokerHosts = new ZkHosts(zks); 32 | SpoutConfig spoutConfig = new SpoutConfig(brokerHosts, topic, zkRoot, spoutId); 33 | spoutConfig.scheme = new SchemeAsMultiScheme(new EventScheme()); 34 | //该Topology因故障停止处理,下次正常运行时是否从Spout对应数据源Kafka中的该订阅Topic的起始位置开始读取 35 | spoutConfig.forceFromStart = false; 36 | spoutConfig.zkServers = Arrays.asList(new String[] {"192.168.xx.100","192.168.xx.101","192.168.xx.102"}); 37 | spoutConfig.zkPort = 2181; 38 | 39 | //创建topology的生成器 40 | TopologyBuilder builder = new TopologyBuilder(); 41 | 42 | // Kafka里创建了一个2分区的Topic,这里并行度设置为2 43 | builder.setSpout("kafka-reader", new KafkaSpout(spoutConfig), 2) 44 | .setNumTasks(2); 45 | 46 | // 设置数据处理节点名称,实例,并行度。 47 | builder.setBolt("stock-filter", new StockFilterBolt(), 2)//设置2个并行度(executor) 48 | .setNumTasks(2)//设置关联task个数 49 | .shuffleGrouping("kafka-reader"); 50 | 51 | builder.setBolt("stock-stategy-1", new StockStrategyBolt1(), 2) 52 | .setNumTasks(2) 53 | .shuffleGrouping("stock-filter"); 54 | builder.setBolt("stock-stategy-2", new StockStrategyBolt2(), 2) 55 | .setNumTasks(2) 56 | .shuffleGrouping("stock-filter"); 57 | builder.setBolt("stock-stategy-3", new StockStrategyBolt3(), 2) 58 | .setNumTasks(2) 59 | .shuffleGrouping("stock-filter"); 60 | builder.setBolt("report", new ReportBolt(), 1) 61 | .setNumTasks(2) 62 | .shuffleGrouping("stock-stategy-1") 63 | .shuffleGrouping("stock-stategy-2") 64 | .shuffleGrouping("stock-stategy-3"); 65 | 66 | Config config = new Config(); 67 | //设置一个spout task上面最多可以多少个没有处理的tuple,以防止tuple队列爆掉 68 | config.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 10000); 69 | config.setDebug(false); 70 | //分配几个进程来运行这个这个topology,建议大于物理机器数量。 71 | config.setNumWorkers(2); 72 | String name = StockStategyTopology.class.getSimpleName(); 73 | 74 | if (args != null && args.length > 0) { 75 | // Nimbus host name passed from command line 76 | config.put(Config.NIMBUS_HOST, args[0]); 77 | StormSubmitter.submitTopologyWithProgressBar(name, config, builder.createTopology()); 78 | } else { 79 | // 这里是本地模式下运行的启动代码。 80 | LocalCluster cluster = new LocalCluster(); 81 | cluster.submitTopology("test", config, builder.createTopology()); 82 | } 83 | 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/main/java/com/yingjun/stock/utils/EventScheme.java: -------------------------------------------------------------------------------- 1 | package com.yingjun.stock.utils; 2 | 3 | import backtype.storm.spout.Scheme; 4 | import backtype.storm.tuple.Fields; 5 | import backtype.storm.tuple.Values; 6 | import com.alibaba.fastjson.JSONObject; 7 | import com.yingjun.stock.dto.StockRealTimeEvent; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import java.util.List; 12 | 13 | /** 14 | * @author yingjun 15 | */ 16 | public class EventScheme implements Scheme { 17 | 18 | private static final Logger log = LoggerFactory.getLogger(EventScheme.class); 19 | 20 | @Override 21 | public List deserialize(byte[] bytes) { 22 | try { 23 | String msg = new String(bytes, "UTF-8"); 24 | StockRealTimeEvent stockRealTimeEvent = JSONObject.parseObject(msg, StockRealTimeEvent.class); 25 | Values values = new Values(stockRealTimeEvent); 26 | return values; 27 | } catch (Exception e) { 28 | log.error("Exception:", e); 29 | } 30 | return null; 31 | } 32 | 33 | @Override 34 | public Fields getOutputFields() { 35 | return new Fields("msg"); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/main/resources/config.properties: -------------------------------------------------------------------------------- 1 | # filename of the log file 2 | stockLogFile=F:\\stock_test.log 3 | 4 | driver.class=com.mysql.jdbc.Driver 5 | jdbc.url=jdbc:mysql://127.0.0.1:3306/stock_result?useUnicode=true&characterEncoding=UTF-8 6 | user=root 7 | password=yingjun 8 | init.pool.size=2 9 | min.pool.size=1 10 | max.pool.size=10 -------------------------------------------------------------------------------- /src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | %d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger -%msg%n 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | ./logs/debug.log.%d{yyyy-MM-dd} 18 | 30 19 | 20 | 21 | %d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger -%msg%n 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | ./logs/error.log.%d{yyyy-MM-dd} 30 | 60 31 | 32 | 33 | %d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger -%msg%n 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /src/main/resources/scheme.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE `result` ( 2 | `stock_id` INT(11) NOT NULL AUTO_INCREMENT COMMENT 'ID', 3 | `stock_code` VARCHAR(10) DEFAULT NULL COMMENT '股票代码', 4 | `stock_price` DOUBLE DEFAULT NULL COMMENT '股票价格', 5 | `stategy_id` INT(11) DEFAULT NULL COMMENT '策略ID', 6 | `total_value` DOUBLE DEFAULT NULL COMMENT '1分钟内成交量', 7 | `create_time` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', 8 | PRIMARY KEY (`stock_id`) 9 | ) ENGINE=MYISAM AUTO_INCREMENT=459 DEFAULT CHARSET=utf8 10 | -------------------------------------------------------------------------------- /stock/stock.log: -------------------------------------------------------------------------------- 1 | {"stockCode":"603618", "newPrice":15.2, "buyPrice1":15.2, "buyCount1":100, "buyPrice2":0.0, "buyCount2":0, "buyPrice3":0.0, "buyCount3":0, "buyPrice4":0.0, "buyCount4":0, "buyPrice5":0.0, "buyCount5":0, "sellPrice1":15.2, "sellCount1":6526, "sellPrice2":0.0, "sellCount2":385, "sellPrice3":0.0, "sellCount3":0, "sellPrice4":0.0, "sellCount4":0, "sellPrice5":0.0, "sellCount5":0, "current":0} 2 | {"stockCode":"600570", "newPrice":55.67, "buyPrice1":55.67, "buyCount1":0, "buyPrice2":55.66, "buyCount2":23, "buyPrice3":55.65, "buyCount3":30, "buyPrice4":55.60, "buyCount4":4, "buyPrice5":55.55, "buyCount5":10, "sellPrice1":55.72, "sellCount1":6526, "sellPrice2":55.74, "sellCount2":185, "sellPrice3":55.76, "sellCount3":10, "sellPrice4":55.78, "sellCount4":10, "sellPrice5":55.8, "sellCount5":165, "current":0} 3 | {"stockCode":"600570", "newPrice":55.67, "buyPrice1":55.67, "buyCount1":100, "buyPrice2":55.66, "buyCount2":23, "buyPrice3":55.65, "buyCount3":30, "buyPrice4":55.60, "buyCount4":4, "buyPrice5":55.55, "buyCount5":10, "sellPrice1":55.72, "sellCount1":6526, "sellPrice2":55.74, "sellCount2":185, "sellPrice3":55.76, "sellCount3":10, "sellPrice4":55.78, "sellCount4":10, "sellPrice5":55.8, "sellCount5":165, "current":100} 4 | {"stockCode":"603618", "newPrice":15.2, "buyPrice1":15.2, "buyCount1":100, "buyPrice2":0.0, "buyCount2":0, "buyPrice3":0.0, "buyCount3":0, "buyPrice4":0.0, "buyCount4":0, "buyPrice5":0.0, "buyCount5":0, "sellPrice1":15.2, "sellCount1":6526, "sellPrice2":0.0, "sellCount2":385, "sellPrice3":0.0, "sellCount3":0, "sellPrice4":0.0, "sellCount4":0, "sellPrice5":0.0, "sellCount5":0, "current":1} 5 | {"stockCode":"600570", "newPrice":55.67, "buyPrice1":55.67, "buyCount1":1, "buyPrice2":55.66, "buyCount2":23, "buyPrice3":55.65, "buyCount3":30, "buyPrice4":55.60, "buyCount4":4, "buyPrice5":55.55, "buyCount5":10, "sellPrice1":55.72, "sellCount1":6526, "sellPrice2":55.74, "sellCount2":185, "sellPrice3":55.76, "sellCount3":10, "sellPrice4":55.78, "sellCount4":10, "sellPrice5":55.8, "sellCount5":165, "current":1} 6 | {"stockCode":"600570", "newPrice":55.67, "buyPrice1":55.67, "buyCount1":100, "buyPrice2":55.66, "buyCount2":23, "buyPrice3":55.65, "buyCount3":30, "buyPrice4":55.60, "buyCount4":4, "buyPrice5":55.55, "buyCount5":10, "sellPrice1":55.72, "sellCount1":6526, "sellPrice2":55.74, "sellCount2":185, "sellPrice3":55.76, "sellCount3":10, "sellPrice4":55.78, "sellCount4":10, "sellPrice5":55.8, "sellCount5":165, "current":100} -------------------------------------------------------------------------------- /stock/test.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | for((i=0;i<100000;i++)); 3 | do 4 | echo "{\"stockCode\":\"603618\", \"newPrice\":15.2, \"buyPrice1\":15.2, \"buyCount1\":100, \"buyPrice2\":0.0, \"buyCount2\":0, \"buyPrice3\":0.0, \"buyCount3\":0, \"buyPrice4\":0.0, \"buyCount4\":0, \"buyPrice5\":0.0, \"buyCount5\":0, \"sellPrice1\":15.2, \"sellCount1\":6526, \"sellPrice2\":0.0, \"sellCount2\":385, \"sellPrice3\":0.0, \"sellCount3\":0, \"sellPrice4\":0.0, \"sellCount4\":0, \"sellPrice5\":0.0, \"sellCount5\":0, \"current\":$i}" >> /usr/local/stock/stock.log; 5 | echo "{\"stockCode\":\"600570\", \"newPrice\":55.67, \"buyPrice1\":55.67, \"buyCount1\":$i, \"buyPrice2\":55.66, \"buyCount2\":23, \"buyPrice3\":55.65, \"buyCount3\":30, \"buyPrice4\":55.60, \"buyCount4\":4, \"buyPrice5\":55.55, \"buyCount5\":10, \"sellPrice1\":55.72, \"sellCount1\":6526, \"sellPrice2\":55.74, \"sellCount2\":185, \"sellPrice3\":55.76, \"sellCount3\":10, \"sellPrice4\":55.78, \"sellCount4\":10, \"sellPrice5\":55.8, \"sellCount5\":165, \"current\":100}" >> /usr/local/stock/stock.log; 6 | echo "{\"stockCode\":\"600570\", \"newPrice\":55.67, \"buyPrice1\":55.67, \"buyCount1\":100, \"buyPrice2\":55.66, \"buyCount2\":23, \"buyPrice3\":55.65, \"buyCount3\":30, \"buyPrice4\":55.60, \"buyCount4\":4, \"buyPrice5\":55.55, \"buyCount5\":10, \"sellPrice1\":55.72, \"sellCount1\":6526, \"sellPrice2\":55.74, \"sellCount2\":185, \"sellPrice3\":55.76, \"sellCount3\":10, \"sellPrice4\":55.78, \"sellCount4\":10, \"sellPrice5\":55.8, \"sellCount5\":165, \"current\":$i}" >> /usr/local/stock/stock.log; 7 | done 8 | --------------------------------------------------------------------------------