├── LICENSE
├── beekeeper-1.png
├── beekeeper-2.png
├── beekeeper-3.png
├── beekeeper-4.png
├── beekeeper-5.png
├── cmd
├── beekeeper
│ └── main.go
├── honeypot
│ └── main.go
└── hunter
│ └── main.go
├── config.toml
├── demo.png
├── demo1.png
├── docs
├── analysis.md
├── config.md
└── deploy.md
├── go.mod
├── go.sum
├── headers.txt
├── hunter.toml
├── pkg
├── analysis
│ ├── handler.go
│ └── model
│ │ ├── http.go
│ │ └── session.go
├── config
│ ├── config.go
│ ├── distributor.go
│ ├── mq.go
│ └── tls.go
├── emulation
│ ├── http
│ │ └── http.go
│ ├── redis
│ │ ├── handle.go
│ │ └── replay.go
│ ├── relay
│ │ ├── http.go
│ │ ├── tcp.go
│ │ └── udp.go
│ ├── rsync
│ │ ├── rsyncd.go
│ │ └── wire.go
│ ├── session
│ │ ├── distributor.go
│ │ ├── limiter.go
│ │ └── session.go
│ ├── ssh
│ │ └── ssh.go
│ └── tls
│ │ └── tls.go
├── ingress
│ ├── manager.go
│ ├── tcp_listener.go
│ ├── transparent_proxy_linux.go
│ ├── transparent_proxy_other.go
│ └── udp_listener.go
├── logger
│ ├── jsonl.go
│ ├── logger.go
│ └── nsq.go
├── mq
│ └── utils.go
├── plugin
│ ├── httpserver.go
│ ├── registry.go
│ └── symbols
│ │ ├── github_com-fasthttp-router.go
│ │ ├── github_com-valyala-fasthttp.go
│ │ ├── hachimi-pkg-plugin.go
│ │ ├── hachimi-pkg-types.go
│ │ └── symbols.go
├── rules
│ ├── engine.go
│ ├── rule.go
│ └── util.go
├── types
│ ├── handler.go
│ ├── http.go
│ ├── log.go
│ ├── redis.go
│ ├── session.go
│ └── ssh.go
└── utils
│ └── utils.go
├── readme.md
├── servers.txt
├── tests
├── Listen_test.go
├── headers.txt
├── servers.txt
├── titles.txt
└── utils_test.go
├── titles.txt
└── tool
├── gen_sql
└── gen_sql.go
└── iptables.sh
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/beekeeper-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/burpheart/hachimi/bc8b94b99e773e44f60277762a821808886eb519/beekeeper-1.png
--------------------------------------------------------------------------------
/beekeeper-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/burpheart/hachimi/bc8b94b99e773e44f60277762a821808886eb519/beekeeper-2.png
--------------------------------------------------------------------------------
/beekeeper-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/burpheart/hachimi/bc8b94b99e773e44f60277762a821808886eb519/beekeeper-3.png
--------------------------------------------------------------------------------
/beekeeper-4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/burpheart/hachimi/bc8b94b99e773e44f60277762a821808886eb519/beekeeper-4.png
--------------------------------------------------------------------------------
/beekeeper-5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/burpheart/hachimi/bc8b94b99e773e44f60277762a821808886eb519/beekeeper-5.png
--------------------------------------------------------------------------------
/cmd/beekeeper/main.go:
--------------------------------------------------------------------------------
1 | // 集中化管理honeypot的启动、停止、重启 批量下发规则 更新等操作
2 | package main
3 |
4 | // 待开源
5 |
--------------------------------------------------------------------------------
/cmd/honeypot/main.go:
--------------------------------------------------------------------------------
1 | // 蜜罐 蜜网的最小组成部分 可单机独立运行
2 | package main
3 |
4 | import (
5 | "context"
6 | "crypto/tls"
7 | "flag"
8 | "hachimi/pkg/config"
9 | "hachimi/pkg/ingress"
10 | "log"
11 | "os"
12 | )
13 |
14 | var (
15 | configPath = flag.String("configPath", "config.toml", "config path if not set will use cli flags")
16 | host = flag.String("host", "0.0.0.0", "listen host")
17 | port = flag.Int("port", 12345, "listen port")
18 | logPath = flag.String("logPath", "stdout", "system log path")
19 | honeyLogPath = flag.String("honeyLogPath", "stdout", "honey log path")
20 | keyPath = flag.String("keyPath", "", "ssl key path")
21 | certPath = flag.String("certPath", "", "ssl cert path")
22 | timeOut = flag.Int("timeOut", 60, "timeout for honeypot session Default 60")
23 | beekeeper = flag.String("beekeeper", "", "beekeeper address")
24 | )
25 |
26 | func main() {
27 | flag.Parse()
28 | //configPath 文件是否存在
29 | if *configPath != "" {
30 | if _, err := os.Stat(*configPath); err == nil {
31 | err := config.LoadConfig(*configPath)
32 | if err != nil {
33 | log.Fatalf("load config file error: %v", err)
34 | }
35 | }
36 |
37 | }
38 | if *keyPath != "" && *certPath != "" {
39 | cert, err := config.LoadCert(*certPath, *keyPath)
40 | if err != nil {
41 | log.Fatalf("Failed to load certificate: %s\n", err)
42 | }
43 | config.TlsConfig.Certificates = []tls.Certificate{cert}
44 | }
45 | if *timeOut != 60 {
46 | config.GetPotConfig().TimeOut = *timeOut
47 | }
48 | if config.GetPotConfig().Host == "" {
49 | config.GetPotConfig().Host = *host
50 | }
51 | if config.GetPotConfig().Port == 0 {
52 | config.GetPotConfig().Port = *port
53 | }
54 | if config.GetPotConfig().LogPath == "" {
55 | config.GetPotConfig().LogPath = *logPath
56 | if config.GetPotConfig().LogPath != "stdout" {
57 | logFile, err := os.OpenFile(config.GetPotConfig().LogPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
58 | if err != nil {
59 | log.Fatalf("Failed to open log file: %s\n", err)
60 | }
61 | defer logFile.Close()
62 | log.SetOutput(logFile)
63 | }
64 | }
65 | if config.GetPotConfig().HoneyLogPath == "" {
66 | //TODO MQ
67 | config.GetPotConfig().HoneyLogPath = *honeyLogPath
68 | if config.GetPotConfig().HoneyLogPath != "stdout" {
69 | err := config.SetLogFile(config.GetPotConfig().HoneyLogPath)
70 | if err != nil {
71 | log.Fatalf("Failed to set honey log file: %s\n", err)
72 | }
73 | }
74 | }
75 |
76 | lm := ingress.ListenerManager{}
77 | tcpListener := ingress.NewTCPListener(config.GetPotConfig().Host, config.GetPotConfig().Port)
78 | lm = *ingress.NewListenerManager()
79 | lm.AddTCPListener(tcpListener)
80 | udpListener := ingress.NewUDPListener(config.GetPotConfig().Host, config.GetPotConfig().Port)
81 | lm.AddUDPListener(udpListener)
82 | lm.StartAll(context.Background())
83 | select {}
84 | }
85 |
--------------------------------------------------------------------------------
/cmd/hunter/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "flag"
6 | "log"
7 | "os"
8 |
9 | "hachimi/pkg/analysis"
10 | "hachimi/pkg/analysis/model"
11 | "hachimi/pkg/config"
12 | "hachimi/pkg/mq"
13 |
14 | "github.com/ClickHouse/clickhouse-go/v2"
15 | "github.com/oschwald/geoip2-golang"
16 | "github.com/pelletier/go-toml"
17 | )
18 |
19 | var cityDb *geoip2.Reader
20 | var countryDb *geoip2.Reader
21 | var asnDb *geoip2.Reader
22 |
23 | type HunterConfig struct {
24 | // MQ 消息队列配置
25 | MQ config.MQConfig `toml:"mq"`
26 | //数据库
27 | DBHost string `toml:"db_host"`
28 | DBUser string `toml:"db_user"`
29 | DBPassword string `toml:"db_password"`
30 | DBName string `toml:"db_name"`
31 | // GeoLite
32 | GeoLiteCountryPath string `toml:"geo_country"`
33 | GeoLiteCityPath string `toml:"geo_city"`
34 | GeoLiteASNPath string `toml:"geo_asn"`
35 | }
36 |
37 | // flag
38 | var (
39 | configPath = flag.String("c", "hunter.toml", "config path")
40 | )
41 | var hunterConfig HunterConfig
42 |
43 | func main() {
44 | flag.Parse()
45 | //configPath 文件是否存在
46 | var err error
47 | file, err := os.Open(*configPath)
48 | if err != nil {
49 | log.Fatalf("open config file error: %v", err)
50 | }
51 |
52 | defer file.Close()
53 | decoder := toml.NewDecoder(file)
54 | err = decoder.Decode(&hunterConfig)
55 | if err != nil {
56 | log.Fatalf("load config file error: %v", err)
57 | }
58 | if hunterConfig.DBHost == "" {
59 | log.Fatalf("db_host is empty")
60 | }
61 |
62 | if hunterConfig.GeoLiteCountryPath == "" {
63 | hunterConfig.GeoLiteCountryPath = "GeoLite2-Country.mmdb"
64 | }
65 | if hunterConfig.GeoLiteASNPath == "" {
66 | hunterConfig.GeoLiteASNPath = "GeoLite2-ASN.mmdb"
67 | }
68 | if hunterConfig.DBName == "" {
69 | hunterConfig.DBName = "default"
70 | }
71 |
72 | countryDb, err = geoip2.Open(hunterConfig.GeoLiteCountryPath)
73 | if err != nil {
74 | log.Println("open country db error:", err)
75 | } else {
76 | defer countryDb.Close()
77 | }
78 | cityDb, err = geoip2.Open(hunterConfig.GeoLiteCityPath)
79 | if err != nil {
80 | log.Println("open city db error:", err)
81 | } else {
82 | defer cityDb.Close()
83 | }
84 | asnDb, err = geoip2.Open(hunterConfig.GeoLiteASNPath)
85 | if err != nil {
86 | log.Println("open asn db error:", err)
87 | } else {
88 | defer asnDb.Close()
89 | }
90 |
91 | // 创建 ClickHouse 客户端
92 | conn, err := clickhouse.Open(&clickhouse.Options{
93 | Addr: []string{hunterConfig.DBHost},
94 | Auth: clickhouse.Auth{
95 | Database: hunterConfig.DBName,
96 | Username: hunterConfig.DBUser,
97 | Password: hunterConfig.DBPassword,
98 | },
99 | Debug: false,
100 | })
101 | if err != nil {
102 | log.Fatalf("Failed to connect to ClickHouse: %v", err)
103 | }
104 | if err := conn.Exec(context.Background(), model.CreateTablehttp_session()); err != nil {
105 | log.Fatalf("Failed to create table: %v", err)
106 | }
107 | if err := conn.Exec(context.Background(), model.CreateTablesession()); err != nil {
108 | log.Fatalf("Failed to create table: %v", err)
109 | }
110 |
111 | consumer, err := mq.NewNsqConsumer(hunterConfig.MQ.Topic, "channel", hunterConfig.MQ.AuthSecret, hunterConfig.MQ.Compression, hunterConfig.MQ.CompressionLevel, hunterConfig.MQ.Tls, hunterConfig.MQ.EnableTlsVerify, hunterConfig.MQ.ClientCertPath, hunterConfig.MQ.ClientKeyPath, hunterConfig.MQ.CaCertPath)
112 | if err != nil {
113 | log.Println(err)
114 | return
115 | }
116 | hander, err := analysis.NewPotMessageHandler(1000, conn, countryDb, cityDb, asnDb)
117 |
118 | consumer.AddHandler(hander)
119 | err = consumer.ConnectToNSQD(hunterConfig.MQ.Host)
120 | if err != nil {
121 | log.Fatalf("Failed to connect to NSQ: %v", err)
122 | return
123 | }
124 | hander.Wait()
125 |
126 | }
127 |
--------------------------------------------------------------------------------
/config.toml:
--------------------------------------------------------------------------------
1 | # 服务监听地址
2 | host = "127.0.0.1"
3 | # 服务监听端口 iptables 需要把端口流量转发到此端口
4 | port = 80
5 | # 系统日志路径 默认为stderr
6 | #logPath = "/var/log/system.log"
7 | # 蜜罐会话日志路径 默认为stdout
8 | #honeyLogPath = "/var/log/honey.log"
9 | # 超时时间
10 | timeOut = 60
11 |
12 | # TLS 配置
13 | [tls]
14 | certKey = "private.key"
15 | certFile = "certificate.crt"
16 |
17 | # 消息队列配置
18 | [mq]
19 | host = "127.0.0.1:1337"
20 | secret = ""
21 | topic = "hachimi"
22 | compression = false
23 | compressionLevel = 6
24 |
25 | # 转发规则 0 为转发所有端口
26 | #[[forward]]
27 | #port = 80
28 | #handler = "relay_http"
29 | #config = { service = "baidu", targetHost = "www.baidu.com:443", isTls = "true" }
30 |
31 | #[[forward]]
32 | #port = 33306
33 | #handler = "relay_tcp"
34 | #config = { service = "mysql", targetAddr = "127.0.0.1:3306", sendSession = "false" }
--------------------------------------------------------------------------------
/demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/burpheart/hachimi/bc8b94b99e773e44f60277762a821808886eb519/demo.png
--------------------------------------------------------------------------------
/demo1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/burpheart/hachimi/bc8b94b99e773e44f60277762a821808886eb519/demo1.png
--------------------------------------------------------------------------------
/docs/analysis.md:
--------------------------------------------------------------------------------
1 | # 数据分析
2 |
3 | redash 是一个数据分析工具,它可以帮助你从数据库中获取数据,然后对数据进行分析,生成报表。
4 | ## 安装
5 | 参考[安装文档](https://redash.io/help/open-source/setup)进行安装。
6 |
7 | ### 数据源
8 | ### 数据分析
--------------------------------------------------------------------------------
/docs/config.md:
--------------------------------------------------------------------------------
1 | # 配置文件
2 |
3 | ## 配置文件示例
4 |
5 |
6 | ```toml
7 | # 服务监听地址
8 | host = "127.0.0.1"
9 | # 服务监听端口 iptables 需要把端口流量转发到此端口
10 | port = 80
11 | # 系统日志路径 默认为stderr
12 | logPath = "/var/log/system.log"
13 | # 蜜罐会话日志路径 默认为stdout
14 | honeyLogPath = "/var/log/honey.log"
15 | # 超时时间
16 | timeOut = 60
17 |
18 | # TLS 配置
19 | [tls]
20 | certKey = "private.key"
21 | certFile = "certificate.crt"
22 |
23 | # 消息队列配置
24 | [mq]
25 | host = "mq.example.com:1337"
26 | secret = "1337"
27 | topic = "hachimitsu"
28 | compression = true
29 | compressionLevel = 6
30 | tls = true
31 | enableTlsVerify = true
32 | clientCertPath = "client.crt"
33 | clientKeyPath = "client.key"
34 | caCertPath = "ca.crt"
35 |
36 | # 转发规则 0 为转发所有端口
37 | [[forward]]
38 | port = 80
39 | #HTTP 转发
40 | handler = "relay_http"
41 | config = { service = "baidu", targetHost = "www.baidu.com:443", isTls = "true" }
42 |
43 | [[forward]]
44 | port = 33306
45 | #TCP 转发
46 | handler = "relay_tcp"
47 | config = { service = "mysql", targetAddr = "127.0.0.1:3306", sendSession = "false" }
48 |
49 | ```
50 |
51 | ## 消息队列配置 mq
52 |
53 | 启用消息队列后,蜜罐节点会将会话信息发送到消息队列中 而不是直接输出到日志文件或者是标准输出
54 |
55 | | 配置项 | 说明 |
56 | |------------------|-----------|
57 | | host | 消息队列地址 |
58 | | secret | 消息队列凭据 |
59 | | topic | 消息队列主题 |
60 | | compression | 是否启用压缩 |
61 | | compressionLevel | 压缩等级 |
62 | | tls | 是否启用TLS |
63 | | enableTlsVerify | 是否启用TLS验证 |
64 | | clientCertPath | 客户端证书路径 |
65 | | clientKeyPath | 客户端密钥路径 |
66 | | caCertPath | CA证书路径 |
67 |
68 | ## 转发规则配置 forward
69 | ### 介绍
70 | 转发规则用于将蜜罐节点监听的端口的流量转发到其他服务上 同时记录流经的入项流量
71 |
72 | | 配置项 | 说明 |
73 | |---------|---------------|
74 | | port | 转发端口 0为转发所有端口 |
75 | | handler | 转发处理器 |
76 | | config | 转发处理器配置 |
77 |
78 |
79 |
80 | ### HTTP 转发处理器 relay_http
81 | http 转发处理器用于将HTTP请求转发到目标地址
82 |
83 | 输出日志的格式同标准HTTP蜜罐日志
84 |
85 | | 配置项 | 说明 |
86 | |------------|---------|
87 | | service | 服务名称 |
88 | | targetHost | 目标地址 |
89 | | isTls | 是否启用TLS |
90 |
91 | ### TCP 转发处理器 relay_tcp:
92 | tcp 转发处理器用于将TCP请求转发到目标地址
93 |
94 | 没有额外日志输出 仅记录流经的入项流量 到session日志中
95 |
96 | | 配置项 | 说明 |
97 | |-------------|-------------------------------------------------|
98 | | service | 服务名称 |
99 | | targetAddr | 目标地址 |
100 | | sendSession | 会话json字符串会在建立连接时发送到目标 以换行符结尾
用于传递原始源ip 源端口 |
101 |
--------------------------------------------------------------------------------
/docs/deploy.md:
--------------------------------------------------------------------------------
1 | # 系统架构
2 | ```
3 | ┌─────┐ ┌─────┐ ┌─────┐
4 | │ POT │ │ POT │ │ POT │ ...
5 | └──┬──┘ └──┬──┘ └──┬──┘
6 | │ │ logs │
7 | │ ┌────▼────┐ │
8 | ┌──────────┐ └──►│ NSQ │◄──┘
9 | │ │ └────┬────┘
10 | │ Redash │ ┌──────┴───────┐
11 | │ │ │ │
12 | └──────────┘ ┌────▼───┐ ┌────▼───┐
13 | ▲ │ hunter ├──┬──┤ hunter │ ...
14 | │ └────────┘ │ └────────┘
15 | │ │
16 | │ ┌──────▼───────┐
17 | │ │ │
18 | └────────────────┤ Clickhouse │
19 | │ │
20 | └──────────────┘
21 | POT: 蜜罐节点
22 | NSQ: 消息队列服务器 NSQD
23 | hunter: 分析节点
24 | Clickhouse: 数据库
25 | Redash: 可视化分析平台
26 | ```
27 |
28 | # 系统要求
29 | ## 操作系统
30 |
31 | 蜜罐节点的完整功能仅在 Linux 系统上可用. 其他除数据库以外的组件没有操作系统要求
32 |
33 | 如果蜜罐节点在其他操作系统上尝试运行 透明代理可能将不可用. 这种情况仅支持单端口监听
34 |
35 | ## 组件
36 |
37 | ### 蜜罐节点
38 |
39 | 蜜罐节点占用资源极低,在 1 核 256M 内存的 VPS 上也可以轻松运行.
40 |
41 | ### 消息队列服务器
42 |
43 | 消息队列服务器使用 NSQ. 按照消费者和生产者的数量和消费速度决定服务器配置.
44 |
45 | 在小于10节点的蜜网下 最低可以使用 1 核 1G 内存的 VPS.
46 |
47 | ### 分析节点
48 |
49 | 分析节点作为消费者从消息队列服务器中获取数据,分析数据并将结果存储到数据库中
50 |
51 | 需要更多的内存和 CPU. 请按照节点数量和消费速度决定服务器配置.
52 |
53 | ### 数据库
54 |
55 | 建议使用clickhouse作为数据存储, 也可以使用其他数据库, 但是需要自行实现数据存储逻辑.
56 |
57 | ### 可视化分析
58 |
59 | 可视化分析使用 Redash, 请按照 Redash 官方文档安装.
60 |
61 | ### 网络要求
62 |
63 | 节点需要接受来自互联网的所有 TCP和UDP 流量,如果你的节点有防火墙,请确保已经放行全部协议和全部端口的入站流量。
64 |
65 |
66 |
67 | # 节点安装
68 |
69 | ### iptables 设置
70 |
71 |
72 | 如果服务器上正在使用监听端口的服务 你正在通过公网端口管理蜜罐节点,比如SSH.
73 |
74 | 请在蜜罐节点上的 iptables 规则,跳过你所正常使用的端口。下面的教程以保留端口范围65521-65535为例
75 |
76 |
77 | 修改默认ssh 端口为 65532 让端口在规则范围外 避免影响SSH正常连接
78 | ```bash
79 | sudo sed -i 's/#Port 22/Port 65532/g' /etc/ssh/sshd_config
80 | ```
81 |
82 | 重启 ssh 服务让配置生效
83 | ```bash
84 | sudo systemctl restart sshd
85 | ```
86 |
87 | 安装 iptables-services
88 | ```bash
89 | sudo yum install -y iptables-services
90 | ```
91 |
92 | 放行所有的流量
93 | ```bash
94 | sudo iptables -P INPUT ACCEPT
95 | sudo iptables -P FORWARD ACCEPT
96 | sudo iptables -P OUTPUT ACCEPT
97 | ```
98 | 清空所有规则
99 | ```bash
100 | sudo iptables -F INPUT
101 | sudo iptables -F FORWARD
102 | sudo iptables -F OUTPUT
103 | sudo iptables -F
104 | sudo iptables -X
105 | sudo iptables -Z
106 | sudo iptables -t mangle -F
107 | ```
108 | 创建 DIVERT 链
109 | ```bash
110 | sudo iptables -t mangle -N DIVERT
111 | ```
112 | 设置 DIVERT 链的规则 用于出站流量bypass
113 | ```bash
114 | sudo iptables -t mangle -A DIVERT -j MARK --set-mark 1
115 | sudo iptables -t mangle -A DIVERT -j ACCEPT
116 | sudo iptables -t mangle -I PREROUTING -p tcp -m socket -j DIVERT
117 | sudo iptables -t mangle -I PREROUTING -p udp -m socket -j DIVERT
118 | ```
119 | 设置 DIVERT 链的规则 用于入站流量透明代理
120 | `$(ip -o -4 route show to default | awk '{print $5}')` 获取默认路由网卡 使用其他网卡请手动替换为你的默认网卡
121 |
122 | `$(hostname -I | awk '{print $1}')` 获取本机IP 其他情况请手动替换为你的本机IP
123 |
124 | `123456` 为蜜罐节点听端口
125 |
126 | 某些极老版本的 iptables 可能不支持端口范围 请注意规则是否添加成功
127 | ```bash
128 | sudo iptables -t mangle -A PREROUTING -i $(ip -o -4 route show to default | awk '{print $5}') -p tcp -d $(hostname -I | awk '{print $1}') --dport 0:12344 -j TPROXY --tproxy-mark 0x1/0x1 --on-ip $(hostname -I | awk '{print $1}') --on-port 12345
129 | sudo iptables -t mangle -A PREROUTING -i $(ip -o -4 route show to default | awk '{print $5}') -p tcp -d $(hostname -I | awk '{print $1}') --dport 12346:65520 -j TPROXY --tproxy-mark 0x1/0x1 --on-ip $(hostname -I | awk '{print $1}') --on-port 12345
130 | sudo iptables -t mangle -A PREROUTING -i $(ip -o -4 route show to default | awk '{print $5}') -p udp -d $(hostname -I | awk '{print $1}') --dport 0:12344 -j TPROXY --tproxy-mark 0x1/0x1 --on-ip $(hostname -I | awk '{print $1}') --on-port 12345
131 | sudo iptables -t mangle -A PREROUTING -i $(ip -o -4 route show to default | awk '{print $5}') -p udp -d $(hostname -I | awk '{print $1}') --dport 12346:65520 -j TPROXY --tproxy-mark 0x1/0x1 --on-ip $(hostname -I | awk '{print $1}') --on-port 12345
132 | ```
133 | 蜜罐同时支持IPV6网络 请使用ip6tables设置ipv6规则
134 |
135 | `$(ip -o -6 route show to default | awk '{print $5}')` 获取IPV6默认路由网卡 使用其他网卡请手动替换为你的默认网卡
136 |
137 | `$(hostname -I | awk '{print $2}')` 获取本机IPV6 其他网卡请手动替换为你的本机IP
138 |
139 | 保存规则 启动iptables
140 |
141 | ```bash
142 | sudo service iptables save
143 | sudo systemctl start iptables
144 | sudo systemctl enable iptables
145 | #ipv6
146 | sudo service ip6tables save
147 | sudo systemctl start ip6tables
148 | sudo systemctl enable ip6tables
149 | ```
150 |
151 | ### 证书生成
152 |
153 | 蜜罐节点的TLS证书 推荐使用自己的证书 也可以不使用外部证书 每次启动都会生成一个新的临时证书
154 | ```bash
155 | openssl genpkey -algorithm RSA -out private.key
156 | openssl req -new -key private.key -out certificate.csr -subj "/C=CN/ST=BeiJing/L=test/O=test/OU=test/CN=test"
157 | openssl x509 -req -days 3650 -in certificate.csr -signkey private.key -out certificate.crt
158 | ```
159 | ### 启动蜜罐节点
160 |
161 | 蜜罐节点可以使用命令行参数和配置文件来设置配置 部分配置只有在配置文件中可以设置 请参考[配置文件](config.md)
162 |
163 | 不加参数启动蜜罐节点 会将系统日志和输出到标准错误 同时将蜜罐日志输出到标准输出
164 | ```bash
165 | ./honeypot -c config.yaml
166 | ```
167 | # NSQ 部署
168 | 在github上下载最新的NSQ稳定版本 [仓库地址](https://github.com/nsqio/nsq/releases/)
169 |
170 | ## 单实例部署
171 |
172 | http-address 为 NSQD 的 WEB 管理地址 无鉴权功能 建议绑定到本地地址 避免暴露在公网
173 | tcp-address 为 NSQD 的 TCP 服务地址 用于生产者和消费者连接
174 | ```bash
175 | ./nsqd -http-address 127.0.0.1:11451 -tcp-address :1337
176 | ```
177 | 消费者处理不完的溢出数据会暂时存储在当前目录下
178 |
179 | 配置参考 [NSQD 配置](https://nsq.io/components/nsqd.html)
180 | ### 认证配置
181 |
182 | #### secret 认证
183 |
184 | 你需要搭建一个认证服务 用于认证生产者和消费者的身份
185 | 这里使用一个开源的认证服务 [nsq-auth](https://github.com/zhimiaox/nsq-auth)
186 | ```bash
187 | ./nsq-auth -a 127.0.0.1:1919 --secret hachimitsu
188 | ```
189 | 这样就可以启动一个简单的无权限划分的鉴权服务
190 |
191 | 之后在启动 nsqd 时使用 --auth-http-address 参数可以指定认证服务地址
192 | ```bash
193 | ./nsqd -http-address 127.0.0.1:11451 -tcp-address :1337 --auth-http-address 127.0.0.1:1919
194 | ```
195 |
196 | # 测试消息队列
197 |
198 | 使用nsq 自带的nsq_tail工具测试消息队列是否正常工作
199 |
200 | `-nsqd-tcp-address` 为`nsqd`消息队列服务器地址
201 |
202 | `-producer-opt=auth_secret,secret` `可选` 为生产者认证凭据 与认证服务一致
203 |
204 | `--topic hachimi` 为消息队列主题 在蜜罐节点上设置的主题对应
205 |
206 | ```bash
207 | nsq_tail -nsqd-tcp-address 127.0.0.1:1337 --topic hachimi -producer-opt=auth_secret,hachimitsu
208 | ```
209 |
210 | 访问蜜罐 蜜罐节点会将访问信息发送到消息队列中 nsq_tail 消费者会将消息逐行打印到控制台
211 |
212 | 正常情况下可以看到终端打印出json 格式的访问日志字符串
213 |
214 | # 数据库部署
215 | 参考[clickhouse 官方文档](https://clickhouse.com/docs/zh/getting-started/install/)
216 |
217 | # 分析节点部署
218 | 下载 GeoLite2 数据库 [GeoLite2 数据库](https://dev.maxmind.com/geoip/geoip2/geolite2/)
219 |
220 | hunter 配置文件
221 | ```yaml
222 | # 数据库配置
223 | db_host = "10.0.0.1:9000"
224 | db_name = "default"
225 | db_user = "default"
226 | db_password = "123456"
227 | geo_country = "./GeoLite2-Country.mmdb"
228 | geo_asn = "./GeoLite2-ASN.mmdb"
229 | # 消息队列配置
230 | [mq]
231 | host = "127.0.0.1:1337"
232 | secret = ""
233 | topic = "hachimi"
234 | compression = false
235 | compressionLevel = 6
236 | ```
237 | 启动 hunter
238 | ```bash
239 | ./hunter -c config.yaml
240 | ```
241 | 从消息队列拉取的数据会保存到数据库中
242 |
243 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module hachimi
2 |
3 | go 1.22.0
4 |
5 | toolchain go1.23.4
6 |
7 | require (
8 | github.com/ClickHouse/clickhouse-go/v2 v2.30.0
9 | github.com/expr-lang/expr v1.16.9
10 | github.com/fasthttp/router v1.5.3
11 | github.com/fsnotify/fsnotify v1.8.0
12 | github.com/google/uuid v1.6.0
13 | github.com/nsqio/go-nsq v1.1.0
14 | github.com/oschwald/geoip2-golang v1.11.0
15 | github.com/pelletier/go-toml v1.9.5
16 | github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38
17 | github.com/traefik/yaegi v0.16.1
18 | github.com/valyala/fasthttp v1.58.0
19 | github.com/xitongsys/parquet-go v1.6.2
20 | github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0
21 | github.com/yeqown/fasthttp-reverse-proxy/v2 v2.2.3
22 | golang.org/x/crypto v0.29.0
23 | gorm.io/gorm v1.25.12
24 |
25 | )
26 |
27 | require (
28 | github.com/ClickHouse/ch-go v0.61.5 // indirect
29 | github.com/andybalholm/brotli v1.1.1 // indirect
30 | github.com/apache/arrow/go/arrow v0.0.0-20200730104253-651201b0f516 // indirect
31 | github.com/apache/thrift v0.14.2 // indirect
32 | github.com/fasthttp/websocket v1.5.7 // indirect
33 | github.com/go-faster/city v1.0.1 // indirect
34 | github.com/go-faster/errors v0.7.1 // indirect
35 | github.com/golang/snappy v0.0.3 // indirect
36 | github.com/jinzhu/inflection v1.0.0 // indirect
37 | github.com/jinzhu/now v1.1.5 // indirect
38 | github.com/klauspost/compress v1.17.11 // indirect
39 | github.com/mmcloughlin/geohash v0.10.0 // indirect
40 | github.com/oschwald/maxminddb-golang v1.13.0 // indirect
41 | github.com/paulmach/orb v0.11.1 // indirect
42 | github.com/pierrec/lz4/v4 v4.1.21 // indirect
43 | github.com/pkg/errors v0.9.1 // indirect
44 | github.com/segmentio/asm v1.2.0 // indirect
45 | github.com/shopspring/decimal v1.4.0 // indirect
46 | github.com/stretchr/testify v1.10.0 // indirect
47 | github.com/valyala/bytebufferpool v1.0.0 // indirect
48 | go.opentelemetry.io/otel v1.26.0 // indirect
49 | go.opentelemetry.io/otel/trace v1.26.0 // indirect
50 | golang.org/x/net v0.31.0 // indirect
51 | golang.org/x/sys v0.28.0 // indirect
52 | golang.org/x/term v0.26.0 // indirect
53 | golang.org/x/text v0.21.0 // indirect
54 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
55 | gopkg.in/yaml.v3 v3.0.1 // indirect
56 | )
57 |
--------------------------------------------------------------------------------
/headers.txt:
--------------------------------------------------------------------------------
1 | WWW-Authenticate: Basic realm="DVR"
2 | WWW-Authenticate: Basic realm="Broadband Router"
3 | WWW-Authenticate: Basic realm="RT-N2UyNjgx"
4 | WWW-Authenticate: Basic realm="TP-LINK Wireless N Router WR841N"
5 | WWW-Authenticate: Basic realm="iPEX Internet Cafe"
6 | WWW-Authenticate: Basic realm="MSNswitch"
7 | WWW-Authenticate: Digest realm="GoAhead", domain=":81",qop="auth", nonce="405448722b302b85aa6ef2b444ea6b5c", opaque="5ccc069c403ebaf9f0171e9517f40e41",algorithm="MD5", stale="FALSE"
8 | WWW-Authenticate: Basic realm="HomeHub"
9 | WWW-Authenticate: Digest realm="IgdAuthentication", domain="/", nonce="N2UyNjgxMjA6NjQ1MWZiOTA6IDJlNjI5NDA=", qop="auth", algorithm=MD5
10 | WWW-Authenticate: Basic realm="Tomcat Manager Application"
11 | WWW-Authenticate: Basic realm=""
12 | WWW-Authenticate: Basic realm="TP-LINK Wireless N Router IPC223(P)-6"
13 | WWW-Authenticate: Basic realm="WT-ABCyNjgx"
14 | WWW-Authenticate: Basic realm="System"
15 | WWW-Authenticate: Basic realm="Server Status"a
16 | WWW-Authenticate: Basic realm="MOBOTIX Camera User"
17 | WWW-Authenticate: Basic aHR0cHdhdGNoOmY=
18 | WWW-Authenticate: Digest realm="realm@easycwmp",qop="auth",nonce="e22f76001fa38ce36f9b69bdbd73fbfb0001c399",opaque="328458fab28345ae87ab3210a8513b14eff452a2"
19 | WWW-Authenticate: Basic realm="netcam"
20 | WWW-Authenticate: Basic realm="karaf"
21 | WWW-Authenticate: Digest realm="IgdAuthentication", domain="/", nonce="N2UyNjgxMjA6NjQ1MWZiOTA6IDJlNjI5NDA=", qop="auth", algorithm="MD5"
22 | WWW-Authenticate: Basic realm="NETGEAR DGN1000"
23 | WWW-Authenticate: Basic realm="Panasonic network device"
24 | WWW-Authenticate: Basic realm="[RTX1210]"
25 | WWW-Authenticate: Basic realm="Nagios Core"
26 | WWW-Authenticate: Basic realm="TP-LINK Wireless N Router WR840N"
27 | WWW-Authenticate: Basic realm="TP-LINK AC1200 Wireless Dual Band Gigabit Router Archer C5"
28 | WWW-Authenticate: Basic realm="Video Server"
29 | WWW-Authenticate: Digest realm="Control", domain="PDVR M800", nonce="34712299626f372c5ea0729ded8e8f37", algorithm=MD5, qop="auth"
--------------------------------------------------------------------------------
/hunter.toml:
--------------------------------------------------------------------------------
1 | # 数据库配置
2 | db_host = "10.0.0.1:9000"
3 | db_name = "default"
4 | db_user = "default"
5 | db_password = "123456"
6 | #geo_country = "./GeoLite2-Country.mmdb"
7 | #geo_city = "./GeoLite2-City.mmdb"
8 | #geo_asn = "./GeoLite2-ASN.mmdb"
9 | # 消息队列配置
10 | [mq]
11 | host = "127.0.0.1:1337"
12 | secret = ""
13 | topic = "hachimi"
14 | compression = false
15 | compressionLevel = 6
--------------------------------------------------------------------------------
/pkg/analysis/handler.go:
--------------------------------------------------------------------------------
1 | package analysis
2 |
3 | import (
4 | "encoding/json"
5 | "log"
6 | "net"
7 | "sync"
8 | "sync/atomic"
9 | "time"
10 |
11 | "hachimi/pkg/analysis/model"
12 | "hachimi/pkg/types"
13 | "hachimi/pkg/utils"
14 |
15 | "github.com/ClickHouse/clickhouse-go/v2"
16 | "github.com/mmcloughlin/geohash"
17 | "github.com/nsqio/go-nsq"
18 | "github.com/oschwald/geoip2-golang"
19 | )
20 |
21 | type PotMessageHandler struct {
22 | logChan chan *types.HoneyData
23 | wg sync.WaitGroup
24 | mu sync.Mutex
25 | buffer []*types.HoneyData
26 | count int64
27 | conn clickhouse.Conn
28 | cityDb *geoip2.Reader
29 | countryDb *geoip2.Reader
30 | asnDb *geoip2.Reader
31 | maxSize int
32 | }
33 |
34 | func NewPotMessageHandler(bufferSize int, conn clickhouse.Conn, countryDb *geoip2.Reader, cityDb *geoip2.Reader, asnDb *geoip2.Reader) (*PotMessageHandler, error) {
35 | Handler := &PotMessageHandler{
36 | logChan: make(chan *types.HoneyData, 100),
37 | buffer: make([]*types.HoneyData, 0, bufferSize),
38 | maxSize: bufferSize,
39 | conn: conn,
40 | countryDb: countryDb,
41 | cityDb: cityDb,
42 | asnDb: asnDb,
43 | }
44 | Handler.wg.Add(1)
45 | go Handler.processLogs()
46 | return Handler, nil
47 | }
48 | func (h *PotMessageHandler) HandleMessage(m *nsq.Message) error {
49 | if len(m.Body) == 0 {
50 | return nil
51 | }
52 | var data types.HoneyData
53 | err := json.Unmarshal(m.Body, &data)
54 | if err != nil {
55 | log.Println(err)
56 | return err
57 | }
58 | atomic.AddInt64(&h.count, 1)
59 | h.logChan <- &data
60 | return nil
61 | }
62 |
63 | func (h *PotMessageHandler) processLogs() {
64 | defer h.wg.Done()
65 |
66 | ticker := time.NewTicker(1 * time.Second) // 每 1 秒强制写入一次
67 | defer ticker.Stop()
68 |
69 | for {
70 | select {
71 | case log, ok := <-h.logChan:
72 | if !ok {
73 | // 通道关闭,写入剩余日志
74 | h.mu.Lock()
75 | h.flush()
76 | h.mu.Unlock()
77 | return
78 | }
79 | // 收到新日志,加入缓冲区
80 | h.mu.Lock()
81 | h.buffer = append(h.buffer, log)
82 | // 如果缓冲区已满,触发写入
83 | if len(h.buffer) >= h.maxSize {
84 | h.flush()
85 | }
86 | h.mu.Unlock()
87 | case <-ticker.C:
88 | // 定时器触发,写入缓冲区中的日志
89 | if h.count > 0 {
90 | h.mu.Lock()
91 | h.flush()
92 | h.mu.Unlock()
93 | }
94 | }
95 | }
96 | }
97 |
98 | func (h *PotMessageHandler) flush() {
99 | //return
100 | var logs map[string][]interface{}
101 | logs = make(map[string][]interface{})
102 | for _, msg := range h.buffer {
103 | data := msg.Data.(map[string]interface{})
104 |
105 | switch msg.Type {
106 | case "session":
107 | // map[string]interface {}
108 | var session model.Session
109 | session.ID = data["id"].(string)
110 | session.Protocol = data["protocol"].(string)
111 | session.StartTime, _ = utils.StringToTime(data["start_time"].(string))
112 | session.EndTime, _ = utils.StringToTime(data["end_time"].(string))
113 | session.SrcIP = data["src_ip"].(string)
114 | session.SrcPort = int(data["src_port"].(float64))
115 | session.DstIP = data["dst_ip"].(string)
116 | session.DstPort = int(data["dst_port"].(float64))
117 | session.NodeName = msg.NodeName
118 | session.IsTls = data["is_tls"].(bool)
119 | //session.IsIpV6 = data["is_ipv6"].(bool)
120 | //session.IsGmTls = data["is_gm_tls"].(bool)
121 | session.IsHandled = data["is_handled"].(bool)
122 | session.IsHttp = data["is_http"].(bool)
123 | session.Data = data["data"].(string)
124 | session.Service = data["service"].(string)
125 | session.Duration = int(data["duration"].(float64))
126 |
127 | ip := net.ParseIP(session.SrcIP)
128 |
129 | if h.cityDb != nil {
130 | record, err := h.cityDb.City(ip)
131 | if err != nil {
132 | log.Println(err)
133 | } else {
134 | // Country
135 | CountryName := record.Country.Names["zh-CN"]
136 | IsoCode := record.Country.IsoCode
137 | if CountryName == "" {
138 | CountryName = record.RegisteredCountry.Names["zh-CN"]
139 | IsoCode = record.Country.IsoCode
140 | }
141 | session.CountryName = CountryName
142 | session.IsoCode = IsoCode
143 | // City
144 | session.CityName = record.City.Names["en"]
145 | session.GeoHash = geohash.Encode(record.Location.Latitude, record.Location.Longitude)
146 | }
147 | } else if h.countryDb != nil {
148 | record, err := h.countryDb.Country(ip)
149 | if err != nil {
150 | log.Println(err)
151 | } else {
152 | CountryName := record.Country.Names["zh-CN"]
153 | IsoCode := record.Country.IsoCode
154 | if CountryName == "" {
155 | CountryName = record.RegisteredCountry.Names["zh-CN"]
156 | IsoCode = record.Country.IsoCode
157 | }
158 | session.CountryName = CountryName
159 | session.IsoCode = IsoCode
160 | }
161 | }
162 |
163 | if h.asnDb != nil {
164 | record, err := h.asnDb.ASN(ip)
165 | if err != nil {
166 | log.Println(err)
167 | } else {
168 | session.AsnOrg = record.AutonomousSystemOrganization
169 | session.AsnNumber = record.AutonomousSystemNumber
170 | }
171 | }
172 |
173 | session.DataHash = utils.SHA1(session.Data)
174 | logs["session"] = append(logs["session"], session)
175 |
176 | case "http_session":
177 | var http model.HttpSession
178 | http.ID = data["id"].(string)
179 | http.SessionID = data["session_id"].(string)
180 | http.StartTime, _ = utils.StringToTime(data["start_time"].(string))
181 | http.EndTime, _ = utils.StringToTime(data["end_time"].(string))
182 | http.Header = utils.MapInterfaceToString(data["header"].(map[string]interface{}))
183 | http.UriParam = utils.MapInterfaceToString(data["uri_param"].(map[string]interface{}))
184 | http.BodyParam = utils.MapInterfaceToString(data["body_param"].(map[string]interface{}))
185 | http.SrcIP = data["src_ip"].(string)
186 | http.SrcPort = int(data["src_port"].(float64))
187 | http.DstIP = data["dst_ip"].(string)
188 | http.DstPort = int(data["dst_port"].(float64))
189 | http.NodeName = msg.NodeName
190 | http.IsTls = data["is_tls"].(bool)
191 | //session.IsIpV6 = data["is_ipv6"].(bool)
192 | //session.IsGmTls = data["is_gm_tls"].(bool)
193 | http.IsHandled = data["is_handled"].(bool)
194 | http.IsHttp = data["is_http"].(bool)
195 | http.Data = data["data"].(string)
196 | http.Method = data["method"].(string)
197 | http.Path = data["path"].(string)
198 | http.UA = data["ua"].(string)
199 | http.Host = data["host"].(string)
200 | http.RawHeader = data["raw_header"].(string)
201 | http.Body = data["body"].(string)
202 | http.Service = data["service"].(string)
203 | http.Duration = int(data["duration"].(float64))
204 |
205 | ip := net.ParseIP(http.SrcIP)
206 |
207 | if h.cityDb != nil {
208 | record, err := h.cityDb.City(ip)
209 | if err != nil {
210 | log.Println(err)
211 | } else {
212 | // Country
213 | CountryName := record.Country.Names["zh-CN"]
214 | IsoCode := record.Country.IsoCode
215 | if CountryName == "" {
216 | CountryName = record.RegisteredCountry.Names["zh-CN"]
217 | IsoCode = record.Country.IsoCode
218 | }
219 | http.CountryName = CountryName
220 | http.IsoCode = IsoCode
221 | // City
222 | http.CityName = record.City.Names["en"]
223 | http.GeoHash = geohash.Encode(record.Location.Latitude, record.Location.Longitude)
224 | }
225 | } else if h.countryDb != nil {
226 | record, err := h.countryDb.Country(ip)
227 | if err != nil {
228 | log.Println(err)
229 | } else {
230 | CountryName := record.Country.Names["zh-CN"]
231 | IsoCode := record.Country.IsoCode
232 | if CountryName == "" {
233 | CountryName = record.RegisteredCountry.Names["zh-CN"]
234 | IsoCode = record.Country.IsoCode
235 | }
236 | http.CountryName = CountryName
237 | http.IsoCode = IsoCode
238 | }
239 | }
240 |
241 | if h.asnDb != nil {
242 | record, err := h.asnDb.ASN(ip)
243 | if err != nil {
244 | log.Println(err)
245 | } else {
246 | http.AsnOrg = record.AutonomousSystemOrganization
247 | http.AsnNumber = record.AutonomousSystemNumber
248 | }
249 | }
250 |
251 | http.BodyHash = utils.SHA1(http.Body)
252 | http.HeaderHash = utils.SHA1(http.RawHeader)
253 | http.PathHash = utils.SHA1(http.Path)
254 | http.DataHash = utils.SHA1(http.Data)
255 | logs["http"] = append(logs["http"], http)
256 |
257 | }
258 | }
259 | for key, value := range logs {
260 | if len(value) == 0 {
261 | continue
262 | }
263 | switch key {
264 | case "session":
265 | // 通过反射获取结构体字段的列名
266 | if len(value) == 0 {
267 | continue
268 | }
269 | var sessions []model.Session
270 | for _, v := range value {
271 | sessions = append(sessions, v.(model.Session))
272 | }
273 |
274 | // 执行批量插入
275 | if err := model.InsertSession(h.conn, sessions); err != nil {
276 | log.Fatalf("failed to send batch: %v", err)
277 | }
278 | case "http":
279 | if len(value) == 0 {
280 | continue
281 | }
282 | var httpsessions []model.HttpSession
283 | for _, v := range value {
284 | httpsessions = append(httpsessions, v.(model.HttpSession))
285 | }
286 | if err := model.InsertHttpSession(h.conn, httpsessions); err != nil {
287 | log.Fatalf("failed to send batch: %v", err)
288 | }
289 |
290 | }
291 | h.buffer = h.buffer[:0]
292 | }
293 | }
294 | func (h *PotMessageHandler) Close() {
295 | close(h.logChan)
296 | h.Wait()
297 | }
298 | func (h *PotMessageHandler) Wait() {
299 | h.wg.Wait()
300 | }
301 |
--------------------------------------------------------------------------------
/pkg/analysis/model/http.go:
--------------------------------------------------------------------------------
1 | package model
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "time"
7 |
8 | "github.com/ClickHouse/clickhouse-go/v2"
9 | )
10 |
11 | //TODO JA3
12 |
13 | type HttpSession struct {
14 | ID string `ch_name:"id" ch_type:"String"`
15 | SrcIP string `ch_name:"src_ip" ch_type:"String"` //TODO IPV6
16 | SrcPort int `ch_name:"src_port" ch_type:"UInt16"`
17 | DstIP string `ch_name:"dst_ip" ch_type:"String"` //TODO IPV6
18 | DstPort int `ch_name:"dst_port" ch_type:"UInt16"`
19 | NodeName string `ch_name:"node_name" ch_type:"String"`
20 | IsIpV6 bool `ch_name:"is_ipv6" ch_type:"UInt8"`
21 | IsTls bool `ch_name:"is_tls" ch_type:"UInt8"`
22 | IsGmTls bool `ch_name:"is_gm_tls" ch_type:"UInt8"`
23 | IsHttp bool `ch_name:"is_http" ch_type:"UInt8"`
24 | IsHandled bool `ch_name:"is_handled" ch_type:"UInt8"`
25 | SessionID string `ch_name:"session_id" ch_type:"String"`
26 | Header map[string]string `ch_name:"header" ch_type:"Map(String, String)"`
27 | UriParam map[string]string `ch_name:"uri_param" ch_type:"Map(String, String)"`
28 | BodyParam map[string]string `ch_name:"body_param" ch_type:"Map(String, String)"`
29 | Method string `ch_name:"method" ch_type:"String"`
30 | Path string `ch_name:"path" ch_type:"String"`
31 | UA string `ch_name:"user_agent" ch_type:"String"`
32 | Host string `ch_name:"host" ch_type:"String"`
33 | RawHeader string `ch_name:"raw_header" ch_type:"String"`
34 | Body string `ch_name:"body" ch_type:"String"`
35 | Data string `ch_name:"data" ch_type:"String"`
36 | StartTime time.Time `ch_name:"start_time" ch_type:"DateTime('UTC')" ch_order:"true"`
37 | EndTime time.Time `ch_name:"end_time" ch_type:"DateTime('UTC')"`
38 | Service string `ch_name:"service" ch_type:"String"`
39 | //经过的时间 ms
40 | Duration int `ch_name:"duration" ch_type:"UInt32"`
41 | /* ip info */
42 | IsoCode string `ch_name:"iso_code" ch_type:"String"`
43 | CountryName string `ch_name:"country_name" ch_type:"String"`
44 | CityName string `ch_name:"city_name" ch_type:"String"`
45 | GeoHash string `ch_name:"geo_hash" ch_type:"FixedString(12)"`
46 | AsnNumber uint `ch_name:"asn_number" ch_type:"UInt32"`
47 | AsnOrg string `ch_name:"asn_org" ch_type:"String"`
48 | /* hash */
49 | BodyHash string `ch_name:"body_hash" ch_type:"String"`
50 | HeaderHash string `ch_name:"header_hash" ch_type:"String"`
51 | PathHash string `ch_name:"path_hash" ch_type:"String"`
52 | DataHash string `ch_name:"data_hash" ch_type:"String"`
53 | }
54 |
55 | // Code generated by gen_clickhouse.go DO NOT EDIT.
56 |
57 | func CreateTablehttp_session() string {
58 | query := `CREATE TABLE IF NOT EXISTS http_session (
59 | id String,
60 | src_ip String,
61 | src_port UInt16,
62 | dst_ip String,
63 | dst_port UInt16,
64 | node_name String,
65 | is_ipv6 UInt8,
66 | is_tls UInt8,
67 | is_gm_tls UInt8,
68 | is_http UInt8,
69 | is_handled UInt8,
70 | session_id String,
71 | header Map(String, String),
72 | uri_param Map(String, String),
73 | body_param Map(String, String),
74 | method String,
75 | path String,
76 | user_agent String,
77 | host String,
78 | raw_header String,
79 | body String,
80 | data String,
81 | start_time DateTime('UTC'),
82 | end_time DateTime('UTC'),
83 | service String,
84 | duration UInt32,
85 | iso_code String,
86 | country_name String,
87 | city_name String,
88 | geo_hash FixedString(12),
89 | asn_number UInt32,
90 | asn_org String,
91 | body_hash String,
92 | header_hash String,
93 | path_hash String,
94 | data_hash String
95 | ) ENGINE = MergeTree() ORDER BY start_time`
96 |
97 | return query
98 | }
99 |
100 | func InsertHttpSession(conn clickhouse.Conn, HttpSessions []HttpSession) error {
101 | batch, err := conn.PrepareBatch(context.Background(), "INSERT INTO http_session (id, src_ip, src_port, dst_ip, dst_port, node_name, is_ipv6, is_tls, is_gm_tls, is_http, is_handled, session_id, header, uri_param, body_param, method, path, user_agent, host, raw_header, body, data, start_time, end_time, service, duration, iso_code, country_name, city_name, geo_hash, asn_number, asn_org, body_hash, header_hash, path_hash, data_hash)")
102 | if err != nil {
103 | return fmt.Errorf("failed to prepare batch: %w", err)
104 | }
105 |
106 | for _, httpsession := range HttpSessions {
107 | if err := batch.Append(httpsession.ID, httpsession.SrcIP, httpsession.SrcPort, httpsession.DstIP, httpsession.DstPort, httpsession.NodeName, httpsession.IsIpV6, httpsession.IsTls, httpsession.IsGmTls, httpsession.IsHttp, httpsession.IsHandled, httpsession.SessionID, httpsession.Header, httpsession.UriParam, httpsession.BodyParam, httpsession.Method, httpsession.Path, httpsession.UA, httpsession.Host, httpsession.RawHeader, httpsession.Body, httpsession.Data, httpsession.StartTime, httpsession.EndTime, httpsession.Service, httpsession.Duration, httpsession.IsoCode, httpsession.CountryName, httpsession.CityName, httpsession.GeoHash, httpsession.AsnNumber, httpsession.AsnOrg, httpsession.BodyHash, httpsession.HeaderHash, httpsession.PathHash, httpsession.DataHash); err != nil {
108 | return fmt.Errorf("failed to append data: %w", err)
109 | }
110 | }
111 |
112 | if err := batch.Send(); err != nil {
113 | return fmt.Errorf("failed to send batch: %w", err)
114 | }
115 | return nil
116 | }
117 |
118 | // End of generated code
119 |
--------------------------------------------------------------------------------
/pkg/analysis/model/session.go:
--------------------------------------------------------------------------------
1 | package model
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "time"
7 |
8 | "github.com/ClickHouse/clickhouse-go/v2"
9 | )
10 |
11 | type Session struct {
12 | ID string `ch_name:"id" ch_type:"String"`
13 | SrcIP string `ch_name:"src_ip" ch_type:"String"` //TODO IPV6
14 | SrcPort int `ch_name:"src_port" ch_type:"UInt16"`
15 | DstIP string `ch_name:"dst_ip" ch_type:"String"` //TODO IPV6
16 | DstPort int `ch_name:"dst_port" ch_type:"UInt16"`
17 | NodeName string `ch_name:"node_name" ch_type:"String"`
18 | IsIpV6 bool `ch_name:"is_ipv6" ch_type:"UInt8"`
19 | IsTls bool `ch_name:"is_tls" ch_type:"UInt8"`
20 | IsGmTls bool `ch_name:"is_gm_tls" ch_type:"UInt8"`
21 | IsHttp bool `ch_name:"is_http" ch_type:"UInt8"`
22 | IsHandled bool `ch_name:"is_handled" ch_type:"UInt8"`
23 | Protocol string `ch_name:"protocol" ch_type:"String"`
24 | Data string `ch_name:"data" ch_type:"String"`
25 | Service string `ch_name:"service" ch_type:"String"`
26 | StartTime time.Time `ch_name:"start_time" ch_type:"DateTime('UTC')" ch_order:"true"`
27 | EndTime time.Time `ch_name:"end_time" ch_type:"DateTime('UTC')"`
28 | //经过的时间 ms
29 | Duration int `ch_name:"duration" ch_type:"UInt32"`
30 | /* ip info */
31 | IsoCode string `ch_name:"iso_code" ch_type:"String"`
32 | CountryName string `ch_name:"country_name" ch_type:"String"`
33 | CityName string `ch_name:"city_name" ch_type:"String"`
34 | GeoHash string `ch_name:"geo_hash" ch_type:"FixedString(12)"`
35 | AsnNumber uint `ch_name:"asn_number" ch_type:"UInt32"`
36 | AsnOrg string `ch_name:"asn_org" ch_type:"String"`
37 | /* hash */
38 | DataHash string `ch_name:"data_hash" ch_type:"String"`
39 | }
40 |
41 | // Code generated by gen_clickhouse.go DO NOT EDIT.
42 |
43 | func CreateTablesession() string {
44 | query := `CREATE TABLE IF NOT EXISTS session (
45 | id String,
46 | src_ip String,
47 | src_port UInt16,
48 | dst_ip String,
49 | dst_port UInt16,
50 | node_name String,
51 | is_ipv6 UInt8,
52 | is_tls UInt8,
53 | is_gm_tls UInt8,
54 | is_http UInt8,
55 | is_handled UInt8,
56 | protocol String,
57 | data String,
58 | service String,
59 | start_time DateTime('UTC'),
60 | end_time DateTime('UTC'),
61 | duration UInt32,
62 | iso_code String,
63 | country_name String,
64 | city_name String,
65 | geo_hash FixedString(12),
66 | asn_number UInt32,
67 | asn_org String,
68 | data_hash String
69 | ) ENGINE = MergeTree() ORDER BY start_time`
70 |
71 | return query
72 | }
73 |
74 | func InsertSession(conn clickhouse.Conn, Sessions []Session) error {
75 | batch, err := conn.PrepareBatch(context.Background(), "INSERT INTO session (id, src_ip, src_port, dst_ip, dst_port, node_name, is_ipv6, is_tls, is_gm_tls, is_http, is_handled, protocol, data, service, start_time, end_time, duration, iso_code, country_name, city_name, geo_hash, asn_number, asn_org, data_hash)")
76 | if err != nil {
77 | return fmt.Errorf("failed to prepare batch: %w", err)
78 | }
79 |
80 | for _, session := range Sessions {
81 | if err := batch.Append(session.ID, session.SrcIP, session.SrcPort, session.DstIP, session.DstPort, session.NodeName, session.IsIpV6, session.IsTls, session.IsGmTls, session.IsHttp, session.IsHandled, session.Protocol, session.Data, session.Service, session.StartTime, session.EndTime, session.Duration, session.IsoCode, session.CountryName, session.CityName, session.GeoHash, session.AsnNumber, session.AsnOrg, session.DataHash); err != nil {
82 | return fmt.Errorf("failed to append data: %w", err)
83 | }
84 | }
85 |
86 | if err := batch.Send(); err != nil {
87 | return fmt.Errorf("failed to send batch: %w", err)
88 | }
89 | return nil
90 | }
91 |
92 | // End of generated code
93 |
--------------------------------------------------------------------------------
/pkg/config/config.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "crypto/rand"
5 | "crypto/rsa"
6 | "crypto/tls"
7 | "github.com/pelletier/go-toml"
8 | "gorm.io/gorm"
9 | "hachimi/pkg/logger"
10 | "hachimi/pkg/mq"
11 | "log"
12 | "os"
13 | )
14 |
15 | // 全局数据库实例
16 | var (
17 | DB *gorm.DB
18 | TlsConfig *tls.Config
19 | )
20 |
21 | type PotConfig struct {
22 | // Host 服务监听地址
23 | Host string `toml:"host"`
24 | // Port 服务监听端口
25 | Port int `toml:"port"`
26 | // Forward 转发规则
27 | ForwardingRules *[]ForwardingRule `toml:"forward"`
28 | // TLS TLS 配置 默认 随机生成一个硬编码主体的证书 用于临时使用
29 | TLS *TLSConfig `toml:"tls"`
30 | // LogPath 系统日志路径 默认为stderr
31 | LogPath string `toml:"logPath"`
32 | // HoneyLogPath 蜜罐会话日志路径 默认为stdout 如果启用 mq 会话日志将会被发送到 nsq 中 不会写入文件
33 | HoneyLogPath string `toml:"honeyLogPath"`
34 | // TimeOut 会话超时时间 默认为 60s
35 | TimeOut int `toml:"timeOut"`
36 | // LimitSize 会话日志大小限制 默认为 1MB //HTTP限制
37 | LimitSize int64 `toml:"limitSize"`
38 | // NodeName 节点名称 默认使用主机名
39 | NodeName string `toml:"nodeName"`
40 | // IpMasking 是否启用IP脱敏 默认为false 会将数据中可能出现的公网节点IP 尽可能替换为10.0.0.1
41 | IpMasking bool `toml:"ipMasking"` //TODO
42 | // NodeIP 节点公网 IP 地址 用于脱敏
43 | NodeIP string `toml:"nodeIP"`
44 | // NodeIPV6 节点公网 IPV6 地址 用于脱敏
45 | NodeIPV6 string `toml:"nodeIPV6"`
46 | // MQ 消息队列配置
47 | MQ *MQConfig `toml:"mq"`
48 | // Debug 是否启用调试模式 会打印错误日志
49 | Debug bool `toml:"debug"`
50 | //MaxSession 单一源IP最大会话数 限制单IP10分钟内建立的最大会话数
51 | MaxSession int `toml:"maxSession"`
52 | }
53 |
54 | var potConfig *PotConfig
55 |
56 | func GetPotConfig() *PotConfig {
57 | if potConfig == nil {
58 | log.Fatalln("PotConfig is not loaded")
59 | }
60 | return potConfig
61 | }
62 |
63 | func GetLimitSize() int64 {
64 | if potConfig == nil || potConfig.LimitSize == 0 {
65 | return 1024 * 1024 * 5 // 5MB
66 | }
67 | return potConfig.LimitSize
68 | }
69 |
70 | // Logger 全局会话日志处理器
71 | var Logger logger.Logger
72 |
73 | var SshPrivateKey *rsa.PrivateKey
74 |
75 | func init() {
76 | potConfig = &PotConfig{
77 | TimeOut: 60,
78 | }
79 | TlsConfig = &tls.Config{
80 | InsecureSkipVerify: true,
81 | //CipherSuites: AllCiphers,
82 | Certificates: []tls.Certificate{genCert()},
83 | //MaxVersion: tls.VersionTLS12,
84 | MinVersion: tls.VersionSSL30,
85 | }
86 | SshPrivateKey, _ = rsa.GenerateKey(rand.Reader, 2048)
87 | jsonlLogger := logger.NewJSONLLogger(os.Stdout, 100, GetNodeName())
88 | Logger = jsonlLogger
89 | }
90 |
91 | func LoadConfig(filePath string) error {
92 | file, err := os.Open(filePath)
93 | if err != nil {
94 | return nil
95 | }
96 | defer file.Close()
97 |
98 | var config PotConfig
99 | decoder := toml.NewDecoder(file)
100 | err = decoder.Decode(&config)
101 | if err != nil {
102 | return err
103 | }
104 | if config.LogPath != "" {
105 | logFile, err := os.OpenFile(config.LogPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
106 | if err != nil {
107 | log.Fatalf("Failed to open log file: %s\n", err)
108 | }
109 | defer logFile.Close()
110 | // 设置日志输出
111 | log.Println("Log Will be written to ", config.LogPath)
112 | log.SetOutput(logFile)
113 | }
114 | // 设置全局日志处理器
115 | if config.MQ == nil {
116 | if config.HoneyLogPath != "" {
117 | err := SetLogFile(config.HoneyLogPath)
118 | if err != nil {
119 | return err
120 | }
121 | }
122 | } else {
123 | producer, err := mq.NewNsqProducer(config.MQ.Host, config.MQ.AuthSecret, config.MQ.Compression, config.MQ.CompressionLevel, config.MQ.Tls, config.MQ.EnableTlsVerify, config.MQ.ClientCertPath, config.MQ.ClientKeyPath, config.MQ.CaCertPath)
124 | if err != nil {
125 | return err
126 | }
127 | Logger, err = logger.NewNSQLogger(producer, config.MQ.Topic, 10, GetNodeName())
128 | if err != nil {
129 | return err
130 | }
131 | }
132 | if config.TLS != nil {
133 | if config.TLS.CertFile != "" && config.TLS.CertKey != "" {
134 | cert, err := LoadCert(config.TLS.CertFile, config.TLS.CertKey)
135 | if err != nil {
136 | log.Fatalf("Failed to load certificate: %s\n", err)
137 | }
138 | TlsConfig.Certificates = []tls.Certificate{cert}
139 | } else {
140 | log.Println("TLS config is not complete, using default TLS config")
141 | }
142 | }
143 | if config.TimeOut == 0 {
144 | config.TimeOut = 60
145 | }
146 | if config.ForwardingRules != nil {
147 | err := validateForwardingRule(config.ForwardingRules)
148 | if err != nil {
149 | return err
150 | }
151 | }
152 | potConfig = &config
153 | return nil
154 | }
155 | func SetLogFile(path string) error {
156 | file, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
157 | if err != nil {
158 | return err
159 | }
160 | honeyLogger := logger.NewJSONLLogger(file, 100, GetNodeName())
161 | Logger = honeyLogger
162 | return nil
163 | }
164 |
165 | func LoadConfigFromString(data string) (*PotConfig, error) {
166 | var config PotConfig
167 | err := toml.Unmarshal([]byte(data), &config)
168 | if err != nil {
169 | return nil, err
170 | }
171 | return &config, nil
172 | }
173 | func GetNodeName() string {
174 | if potConfig.NodeName == "" {
175 | hostname, err := os.Hostname()
176 | if err != nil {
177 | return "unknown"
178 | }
179 | potConfig.NodeName = hostname
180 | return hostname
181 | }
182 | return potConfig.NodeName
183 | }
184 | func GetNodeIP() string {
185 | if potConfig.NodeIP == "" {
186 | return ""
187 | }
188 | return potConfig.NodeIP
189 | }
190 |
--------------------------------------------------------------------------------
/pkg/config/distributor.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import "errors"
4 |
5 | type ForwardingRule struct {
6 | // Port 匹配端口 0 为所有端口
7 | Port int `toml:"port"`
8 | // Handler 处理器 目前有 relay_http relay_tcp(null 为只记录流量)
9 | Handler string `toml:"handler"`
10 | // Config 配置 处理器的配置 目前只有 relay_http relay_tcp 需要配置
11 | Config map[string]string `toml:"config"`
12 | }
13 |
14 | /*
15 | relay_tcp 配置:
16 | service 服务名
17 | targetAddr 上游主机名:端口
18 | sendSession 是否发送session信息 启用会连接后发送一行session的序列化信息用于 传递真实ip等信息 上游需要自行处理
19 |
20 | relay_http 配置:
21 | service 服务名
22 | targetHost 上游主机名:端口
23 | isTls 是否启用tls
24 | 真实ip 地址会从xff头中传递
25 | 另外relay_http会深度解析 http 的请求和响应 relay_tcp只会在session 中记录数据
26 | */
27 | func validateForwardingRule(rules *[]ForwardingRule) error {
28 | for _, rule := range *rules {
29 | if rule.Handler == "relay_http" {
30 | if rule.Config["targetHost"] == "" {
31 | return errors.New("relay_http rule targetHost is empty")
32 | }
33 | } else if rule.Handler == "relay_tcp" {
34 | if rule.Config["targetAddr"] == "" {
35 | return errors.New("relay_tcp rule targetAddr is empty")
36 | }
37 | } else {
38 | return errors.New("unknown handler " + rule.Handler)
39 | }
40 | }
41 | return nil
42 |
43 | }
44 |
--------------------------------------------------------------------------------
/pkg/config/mq.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | // MQConfig NSQ 消息队列配置
4 | type MQConfig struct {
5 | // Host 服务监听地址
6 | Host string `toml:"host"`
7 | // AuthSecret 密码
8 | AuthSecret string `toml:"secret"`
9 | // Topic 主题
10 | Topic string `toml:"topic"`
11 | Compression bool `toml:"compression"`
12 | CompressionLevel int `toml:"compressionLevel"`
13 | Tls bool `toml:"tls"`
14 | EnableTlsVerify bool `toml:"enableTlsVerify"`
15 | ClientCertPath string `toml:"clientCertPath"`
16 | ClientKeyPath string `toml:"clientKeyPath"`
17 | CaCertPath string `toml:"caCertPath"`
18 | }
19 |
--------------------------------------------------------------------------------
/pkg/config/tls.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "crypto/rand"
5 | "crypto/rsa"
6 | "crypto/tls"
7 | "crypto/x509"
8 | "crypto/x509/pkix"
9 | "log"
10 | "math/big"
11 | "time"
12 | )
13 |
14 | // TLSConfig TLS 配置
15 | type TLSConfig struct {
16 | // CertKey 证书路径
17 | CertKey string `toml:"certKey"`
18 | // CertFile 证书文件
19 | CertFile string `toml:"certFile"`
20 | // TODO 证书自动生成配置
21 | }
22 |
23 | func LoadCert(certFile, keyFile string) (tls.Certificate, error) {
24 | return tls.LoadX509KeyPair(certFile, keyFile)
25 | }
26 | func genCert() tls.Certificate {
27 | //生成 TLS 证书
28 | priv, err := rsa.GenerateKey(rand.Reader, 2048)
29 | if err != nil {
30 | log.Fatalf("Failed to generate private key: %s\n", err)
31 | }
32 | notBefore := time.Now()
33 | notAfter := notBefore.Add(365 * 24 * time.Hour)
34 | serialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
35 | if err != nil {
36 | log.Fatalf("Failed to generate serial number: %s\n", err)
37 | }
38 | template := x509.Certificate{
39 | SerialNumber: serialNumber,
40 | Subject: pkix.Name{
41 | Organization: []string{"Hachimi"},
42 | },
43 | NotBefore: notBefore,
44 | NotAfter: notAfter,
45 | }
46 |
47 | cer, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
48 | if err != nil {
49 | log.Fatalf("Failed to create certificate: %s\n", err)
50 | }
51 | return tls.Certificate{
52 | Certificate: [][]byte{cer},
53 | PrivateKey: priv,
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/pkg/emulation/http/http.go:
--------------------------------------------------------------------------------
1 | package http
2 |
3 | import (
4 | "github.com/fsnotify/fsnotify"
5 | "github.com/google/uuid"
6 | "github.com/traefik/yaegi/interp"
7 | "github.com/traefik/yaegi/stdlib"
8 | "github.com/valyala/fasthttp"
9 | "hachimi/pkg/config"
10 | "hachimi/pkg/plugin"
11 | "hachimi/pkg/plugin/symbols"
12 | "hachimi/pkg/types"
13 | "hachimi/pkg/utils"
14 | "log"
15 | "net"
16 |
17 | "os"
18 | "sync"
19 | "time"
20 | )
21 |
22 | var serverPool sync.Pool
23 |
24 | func HandleHttp(conn net.Conn, session *types.Session) {
25 | httpLog := &types.Http{Session: *session}
26 | httpLog.StartTime = session.StartTime
27 | httpLog.ID = uuid.New().String()
28 | httpLog.SessionID = session.ID
29 | httpLog.Header = make(map[string]string)
30 | httpLog.BodyParam = make(map[string]string)
31 | httpLog.UriParam = make(map[string]string)
32 | httpLog.IsHandled = true
33 | err := ServeHttp(conn, func(fasthttpCtx *fasthttp.RequestCtx) {
34 | // 在 requestHandlerFunc 中传递 ctx
35 | RequestHandlerFunc(httpLog, fasthttpCtx)
36 | })
37 | httpLog.EndTime = time.Now()
38 | httpLog.Duration = int(httpLog.EndTime.Sub(httpLog.StartTime).Milliseconds())
39 | if err != nil {
40 | httpLog.IsHandled = false
41 | utils.ReadAll(conn, 1024)
42 | }
43 | config.Logger.Log(httpLog)
44 | }
45 |
46 | func ServeHttp(c net.Conn, handler fasthttp.RequestHandler) error {
47 | v := serverPool.Get()
48 | if v == nil {
49 | v = &fasthttp.Server{}
50 | }
51 | s := v.(*fasthttp.Server)
52 | s.NoDefaultServerHeader = true
53 | s.NoDefaultContentType = true
54 | s.ReadBufferSize = 1024 * 1024 * 5
55 | s.DisableHeaderNamesNormalizing = true
56 | s.DisableKeepalive = false
57 | s.Handler = handler
58 | err := s.ServeConn(c)
59 | s.KeepHijackedConns = true
60 | s.Handler = nil
61 | serverPool.Put(v)
62 | return err
63 | }
64 |
65 | var RequestHandlerFunc func(*types.Http, *fasthttp.RequestCtx)
66 |
67 | func init() {
68 | scriptFileName := "./httpserver.go"
69 | watcher, err := fsnotify.NewWatcher()
70 | if err != nil {
71 | panic(err)
72 | return
73 | }
74 | //defer watcher.Close()
75 | err = watcher.Add(scriptFileName)
76 | if err != nil {
77 | RequestHandlerFunc = plugin.RequestHandler
78 | return
79 | }
80 | loadScript(scriptFileName)
81 |
82 | if RequestHandlerFunc == nil {
83 | panic("requestHandlerFunc == nil")
84 | }
85 |
86 | go func() {
87 | for {
88 | select {
89 | case event, ok := <-watcher.Events:
90 | if !ok {
91 | return
92 | }
93 | log.Printf("Event: %s\n", event)
94 | if event.Op&fsnotify.Write == fsnotify.Write {
95 | // 文件被写入,重新加载脚本
96 | log.Println("Reloading script...")
97 | time.Sleep(1 * time.Second)
98 | loadScript(scriptFileName)
99 | }
100 | case err, ok := <-watcher.Errors:
101 | if !ok {
102 | return
103 | }
104 | log.Println("Error watching file:", err)
105 | }
106 | }
107 | }()
108 |
109 | }
110 | func loadScript(fileName string) {
111 | i := interp.New(interp.Options{})
112 |
113 | i.Use(stdlib.Symbols)
114 | i.Use(symbols.Symbols)
115 | // 从文件中读取脚本内容
116 | scriptContent, err := os.ReadFile(fileName)
117 | if err != nil {
118 | log.Println("Error reading script file:", err)
119 | return
120 | }
121 | _, err = i.Eval(string(scriptContent))
122 | if err != nil {
123 | log.Println("Error evaluating script:", err)
124 | return
125 | }
126 |
127 | // 获取 requestHandler 函数
128 | requestHandlerValue, err := i.Eval("plugin.RequestHandler")
129 | if err != nil {
130 | log.Println("Error getting requestHandler:", err)
131 | return
132 | }
133 |
134 | // 将值转换为函数
135 | ok := false
136 | lrequestHandlerFunc, ok := requestHandlerValue.Interface().(func(*types.Http, *fasthttp.RequestCtx))
137 | if !ok {
138 | log.Println("Cannot convert value to function")
139 | return
140 | }
141 |
142 | // 更新 requestHandlerFunc
143 | RequestHandlerFunc = lrequestHandlerFunc
144 | log.Println("Script reloaded successfully")
145 | }
146 |
--------------------------------------------------------------------------------
/pkg/emulation/redis/handle.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "bufio"
5 | "fmt"
6 | "github.com/google/uuid"
7 | "hachimi/pkg/config"
8 | "hachimi/pkg/types"
9 | "hachimi/pkg/utils"
10 | "io"
11 | "io/ioutil"
12 | "net"
13 | "strconv"
14 | "strings"
15 | "sync"
16 | "time"
17 | )
18 |
19 | func HandleRedis(conn net.Conn, session *types.Session) bool {
20 | c := newConn(conn, config.GetPotConfig().TimeOut)
21 | list := map[string]bool{"acl": true,
22 | "append": true,
23 | "asking": true,
24 | "auth": true,
25 | "bf.add": true,
26 | "bf.card": true,
27 | "bf.exists": true,
28 | "bf.info": true,
29 | "bf.insert": true,
30 | "bf.loadchunk": true,
31 | "bf.madd": true,
32 | "bf.mexists": true,
33 | "bf.reserve": true,
34 | "bf.scandump": true,
35 | "bgrewriteaof": true,
36 | "bgsave": true,
37 | "bitcount": true,
38 | "bitfield": true,
39 | "bitfield_ro": true,
40 | "bitop": true,
41 | "bitpos": true,
42 | "blmove": true,
43 | "blmpop": true,
44 | "blpop": true,
45 | "brpop": true,
46 | "brpoplpush": true,
47 | "bzmpop": true,
48 | "bzpopmax": true,
49 | "bzpopmin": true,
50 | "cf.add": true,
51 | "cf.addnx": true,
52 | "cf.count": true,
53 | "cf.del": true,
54 | "cf.exists": true,
55 | "cf.info": true,
56 | "cf.insert": true,
57 | "cf.insertnx": true,
58 | "cf.loadchunk": true,
59 | "cf.mexists": true,
60 | "cf.reserve": true,
61 | "cf.scandump": true,
62 | "client": true,
63 | "cluster": true,
64 | "cms.incrby": true,
65 | "cms.info": true,
66 | "cms.initbydim": true,
67 | "cms.initbyprob": true,
68 | "cms.merge": true,
69 | "cms.query": true,
70 | "command": true,
71 | "config": true,
72 | "copy": true,
73 | "dbsize": true,
74 | "decr": true,
75 | "decrby": true,
76 | "del": true,
77 | "discard": true,
78 | "dump": true,
79 | "echo": true,
80 | "eval": true,
81 | "eval_ro": true,
82 | "evalsha": true,
83 | "evalsha_ro": true,
84 | "exec": true,
85 | "exists": true,
86 | "expire": true,
87 | "expireat": true,
88 | "expiretime": true,
89 | "failover": true,
90 | "fcall": true,
91 | "fcall_ro": true,
92 | "flushall": true,
93 | "flushdb": true,
94 | "ft._list": true,
95 | "ft.aggregate": true,
96 | "ft.aliasadd": true,
97 | "ft.aliasdel": true,
98 | "ft.aliasupdate": true,
99 | "ft.alter": true,
100 | "ft.config": true,
101 | "ft.create": true,
102 | "ft.cursor": true,
103 | "ft.dictadd": true,
104 | "ft.dictdel": true,
105 | "ft.dictdump": true,
106 | "ft.dropindex": true,
107 | "ft.explain": true,
108 | "ft.explaincli": true,
109 | "ft.info": true,
110 | "ft.profile": true,
111 | "ft.search": true,
112 | "ft.spellcheck": true,
113 | "ft.sugadd": true,
114 | "ft.sugdel": true,
115 | "ft.sugget": true,
116 | "ft.suglen": true,
117 | "ft.syndump": true,
118 | "ft.synupdate": true,
119 | "ft.tagvals": true,
120 | "function": true,
121 | "geoadd": true,
122 | "geodist": true,
123 | "geohash": true,
124 | "geopos": true,
125 | "georadius": true,
126 | "georadius_ro": true,
127 | "georadiusbymember": true,
128 | "georadiusbymember_ro": true,
129 | "geosearch": true,
130 | "geosearchstore": true,
131 | "get": true,
132 | "getbit": true,
133 | "getdel": true,
134 | "getex": true,
135 | "getrange": true,
136 | "getset": true,
137 | "hdel": true,
138 | "hello": true,
139 | "hexists": true,
140 | "hget": true,
141 | "hgetall": true,
142 | "hincrby": true,
143 | "hincrbyfloat": true,
144 | "hkeys": true,
145 | "hlen": true,
146 | "hmget": true,
147 | "hmset": true,
148 | "hrandfield": true,
149 | "hscan": true,
150 | "hset": true,
151 | "hsetnx": true,
152 | "hstrlen": true,
153 | "hvals": true,
154 | "incr": true,
155 | "incrby": true,
156 | "incrbyfloat": true,
157 | "info": true,
158 | "json.arrappend": true,
159 | "json.arrindex": true,
160 | "json.arrinsert": true,
161 | "json.arrlen": true,
162 | "json.arrpop": true,
163 | "json.arrtrim": true,
164 | "json.clear": true,
165 | "json.debug": true,
166 | "json.del": true,
167 | "json.forget": true,
168 | "json.get": true,
169 | "json.merge": true,
170 | "json.mget": true,
171 | "json.mset": true,
172 | "json.numincrby": true,
173 | "json.nummultby": true,
174 | "json.objkeys": true,
175 | "json.objlen": true,
176 | "json.resp": true,
177 | "json.set": true,
178 | "json.strappend": true,
179 | "json.strlen": true,
180 | "json.toggle": true,
181 | "json.type": true,
182 | "keys": true,
183 | "lastsave": true,
184 | "latency": true,
185 | "lcs": true,
186 | "lindex": true,
187 | "linsert": true,
188 | "llen": true,
189 | "lmove": true,
190 | "lmpop": true,
191 | "lolwut": true,
192 | "lpop": true,
193 | "lpos": true,
194 | "lpush": true,
195 | "lpushx": true,
196 | "lrange": true,
197 | "lrem": true,
198 | "lset": true,
199 | "ltrim": true,
200 | "memory": true,
201 | "mget": true,
202 | "migrate": true,
203 | "module": true,
204 | "monitor": true,
205 | "move": true,
206 | "mset": true,
207 | "msetnx": true,
208 | "multi": true,
209 | "object": true,
210 | "persist": true,
211 | "pexpire": true,
212 | "pexpireat": true,
213 | "pexpiretime": true,
214 | "pfadd": true,
215 | "pfcount": true,
216 | "pfdebug": true,
217 | "pfmerge": true,
218 | "pfselftest": true,
219 | "ping": true,
220 | "psetex": true,
221 | "psubscribe": true,
222 | "psync": true,
223 | "pttl": true,
224 | "publish": true,
225 | "pubsub": true,
226 | "punsubscribe": true,
227 | "quit": true,
228 | "randomkey": true,
229 | "readonly": true,
230 | "readwrite": true,
231 | "rename": true,
232 | "renamenx": true,
233 | "replconf": true,
234 | "replicaof": true,
235 | "reset": true,
236 | "restore": true,
237 | "restore-asking": true,
238 | "role": true,
239 | "rpop": true,
240 | "rpoplpush": true,
241 | "rpush": true,
242 | "rpushx": true,
243 | "sadd": true,
244 | "save": true,
245 | "scan": true,
246 | "scard": true,
247 | "script": true,
248 | "sdiff": true,
249 | "sdiffstore": true,
250 | "select": true,
251 | "set": true,
252 | "setbit": true,
253 | "setex": true,
254 | "setnx": true,
255 | "setrange": true,
256 | "shutdown": true,
257 | "sinter": true,
258 | "sintercard": true,
259 | "sinterstore": true,
260 | "sismember": true,
261 | "slaveof": true,
262 | "slowlog": true,
263 | "smembers": true,
264 | "smismember": true,
265 | "smove": true,
266 | "sort": true,
267 | "sort_ro": true,
268 | "spop": true,
269 | "spublish": true,
270 | "srandmember": true,
271 | "srem": true,
272 | "sscan": true,
273 | "ssubscribe": true,
274 | "strlen": true,
275 | "subscribe": true,
276 | "substr": true,
277 | "sunion": true,
278 | "sunionstore": true,
279 | "sunsubscribe": true,
280 | "swapdb": true,
281 | "sync": true,
282 | "tdigest.add": true,
283 | "tdigest.byrank": true,
284 | "tdigest.byrevrank": true,
285 | "tdigest.cdf": true,
286 | "tdigest.create": true,
287 | "tdigest.info": true,
288 | "tdigest.max": true,
289 | "tdigest.merge": true,
290 | "tdigest.min": true,
291 | "tdigest.quantile": true,
292 | "tdigest.rank": true,
293 | "tdigest.reset": true,
294 | "tdigest.revrank": true,
295 | "tdigest.trimmed_mean": true,
296 | "tfcall": true,
297 | "tfcallasync": true,
298 | "tfunction": true,
299 | "time": true,
300 | "topk.add": true,
301 | "topk.count": true,
302 | "topk.incrby": true,
303 | "topk.info": true,
304 | "topk.list": true,
305 | "topk.query": true,
306 | "topk.reserve": true,
307 | "touch": true,
308 | "ts.add": true,
309 | "ts.alter": true,
310 | "ts.create": true,
311 | "ts.createrule": true,
312 | "ts.decrby": true,
313 | "ts.del": true,
314 | "ts.deleterule": true,
315 | "ts.get": true,
316 | "ts.incrby": true,
317 | "ts.info": true,
318 | "ts.madd": true,
319 | "ts.mget": true,
320 | "ts.mrange": true,
321 | "ts.mrevrange": true,
322 | "ts.queryindex": true,
323 | "ts.range": true,
324 | "ts.revrange": true,
325 | "ttl": true,
326 | "type": true,
327 | "unlink": true,
328 | "unsubscribe": true,
329 | "unwatch": true,
330 | "wait": true,
331 | "waitaof": true,
332 | "watch": true,
333 | "xack": true,
334 | "xadd": true,
335 | "xautoclaim": true,
336 | "xclaim": true,
337 | "xdel": true,
338 | "xgroup": true,
339 | "xinfo": true,
340 | "xlen": true,
341 | "xpending": true,
342 | "xrange": true,
343 | "xread": true,
344 | "xreadgroup": true,
345 | "xrevrange": true,
346 | "xsetid": true,
347 | "xtrim": true,
348 | "zadd": true,
349 | "zcard": true,
350 | "zcount": true,
351 | "zdiff": true,
352 | "zdiffstore": true,
353 | "zincrby": true,
354 | "zinter": true,
355 | "zintercard": true,
356 | "zinterstore": true,
357 | "zlexcount": true,
358 | "zmpop": true,
359 | "zmscore": true,
360 | "zpopmax": true,
361 | "zpopmin": true,
362 | "zrandmember": true,
363 | "zrange": true,
364 | "zrangebylex": true,
365 | "zrangebyscore": true,
366 | "zrangestore": true,
367 | "zrank": true,
368 | "zrem": true,
369 | "zremrangebylex": true,
370 | "zremrangebyrank": true,
371 | "zremrangebyscore": true,
372 | "zrevrange": true,
373 | "zrevrangebylex": true,
374 | "zrevrangebyscore": true,
375 | "zrevrank": true,
376 | "zscan": true,
377 | "zscore": true,
378 | "zunion": true}
379 | sess := &types.RedisSession{}
380 | sess.ID = uuid.New().String()
381 | sess.SessionID = session.ID
382 | sess.Service = "redis"
383 | sess.StartTime = time.Now()
384 | defer func() {
385 | sess.EndTime = time.Now()
386 | sess.Duration = int(sess.EndTime.Sub(sess.StartTime).Milliseconds())
387 | config.Logger.Log(sess)
388 | }()
389 | for {
390 | request, err := parseRequest(c)
391 | if err != nil {
392 | sess.Error = true
393 | if err == io.EOF {
394 | return true
395 | } else {
396 | if len(sess.Data) == 0 {
397 | return false //降级到非redis
398 | }
399 | }
400 | return true
401 |
402 | } else if strings.ToLower(request.Name) == "auth" {
403 | if len(request.Args) == 1 {
404 | sess.PassWord = string(request.Args[0])
405 | } else if len(request.Args) == 2 {
406 | sess.User = string(request.Args[0])
407 | sess.PassWord = string(request.Args[1])
408 | }
409 |
410 | conn.Write([]byte("+OK\r\n"))
411 | } else if strings.ToLower(request.Name) == "info" {
412 | if len(request.Args) > 0 {
413 | if strings.ToLower(string(request.Args[0])) == "server" {
414 | conn.Write([]byte("$439\r\n# Server\r\nredis_version:4.0.8\r\nredis_git_sha1:00000000\r\nredis_git_dirty:0\r\nredis_build_id:c2238b38b1edb0e2\r\nredis_mode:standalone\r\nos:Linux 3.10.0-1024.1.2.el7.x86_64 x86_64\r\narch_bits:64\r\nmultiplexing_api:epoll\r\ngcc_version:4.8.5\r\nprocess_id:3772\r\nrun_id:0e61abd297771de3fe812a3c21027732ac9f41fe\r\ntcp_port:6379\r\nuptime_in_seconds:25926381\r\nuptime_in_days:300\r\nhz:10\r\nlru_clock:13732392\r\nconfig_file:/usr/local/redis-local/etc/redis.conf\r\n\r\n"))
415 | continue
416 | }
417 | }
418 |
419 | conn.Write([]byte("$2012\r\n# Server\r\nredis_version:4.0.8\r\nredis_git_sha1:00000000\r\nredis_git_dirty:0\r\nredis_build_id:ca4ed916473088db\r\nredis_mode:standalone\r\nos:Linux 3.10.0-1024.1.2.el7.x86_64 x86_64\r\narch_bits:64\r\nmultiplexing_api:epoll\r\ngcc_version:4.8.5\r\nprocess_id:3772\r\nrun_id:0e61abd297771de3fe812a3c21027732ac9f41fe\r\ntcp_port:6379\r\nuptime_in_seconds:25926381\r\nuptime_in_days:300\r\nhz:10\r\nlru_clock:13732392\r\nconfig_file:/usr/local/redis-local/etc/redis.conf\r\n\r\n# Clients\r\nconnected_clients:208\r\nclient_longest_output_list:0\r\nclient_biggest_input_buf:517\r\nblocked_clients:0\r\n\r\n# Memory\r\nused_memory:5151720\r\nused_memory_human:4.91M\r\nused_memory_rss:6885376\r\nused_memory_peak:5214456\r\nused_memory_peak_human:4.97M\r\nused_memory_lua:61440\r\nmem_fragmentation_ratio:1.34\r\nmem_allocator:jemalloc-3.6.0\r\n\r\n# Persistence\r\nloading:0\r\nrdb_changes_since_last_save:0\r\nrdb_bgsave_in_progress:0\r\nrdb_last_save_time:1704104232\r\nrdb_last_bgsave_status:ok\r\nrdb_last_bgsave_time_sec:0\r\nrdb_current_bgsave_time_sec:-1\r\naof_enabled:0\r\naof_rewrite_in_progress:0\r\naof_rewrite_scheduled:0\r\naof_last_rewrite_time_sec:-1\r\naof_current_rewrite_time_sec:-1\r\naof_last_bgrewrite_status:ok\r\naof_last_write_status:ok\r\n\r\n# Stats\r\ntotal_connections_received:509000\r\ntotal_commands_processed:616946\r\ninstantaneous_ops_per_sec:0\r\ntotal_net_input_bytes:20893857\r\ntotal_net_output_bytes:39299490\r\ninstantaneous_input_kbps:0.00\r\ninstantaneous_output_kbps:0.00\r\nrejected_connections:0\r\nsync_full:0\r\nsync_partial_ok:0\r\nsync_partial_err:0\r\nexpired_keys:45\r\nevicted_keys:0\r\nkeyspace_hits:49026\r\nkeyspace_misses:308\r\npubsub_channels:0\r\npubsub_patterns:0\r\nlatest_fork_usec:300\r\nmigrate_cached_sockets:0\r\n\r\n# Replication\r\nrole:master\r\nconnected_slaves:0\r\nmaster_repl_offset:0\r\nrepl_backlog_active:0\r\nrepl_backlog_size:1048576\r\nrepl_backlog_first_byte_offset:0\r\nrepl_backlog_histlen:0\r\n\r\n# CPU\r\nused_cpu_sys:15257.43\r\nused_cpu_user:10518.80\r\nused_cpu_sys_children:279.62\r\nused_cpu_user_children:31.15\r\n\r\n# Cluster\r\ncluster_enabled:0\r\n\r\n# Keyspace\r\ndb0:keys=1,expires=0,avg_ttl=0\r\n\r\n"))
420 |
421 | } else if strings.ToLower(request.Name) == "ping" {
422 | if len(request.Args) > 0 {
423 | conn.Write([]byte("*" + strconv.Itoa(len(request.Args[0])) + "\r\n"))
424 | conn.Write(request.Args[0])
425 | conn.Write([]byte("\r\n"))
426 | } else {
427 | conn.Write([]byte("+PONG\r\n"))
428 | }
429 | } else if list[strings.ToLower(request.Name)] {
430 | conn.Write([]byte("+OK\r\n"))
431 | } else {
432 | conn.Write([]byte("-ERR unknown command '" + request.Name + "'\r\n")) //返回指纹特征 便于搜索引擎记录
433 | //if len(sess.Data) == 0 {
434 | // return false //降级到非redis
435 | //}
436 | }
437 | var arg string
438 | arg = request.Name + " "
439 | for _, v := range request.Args {
440 | arg += utils.EscapeBytes(v) + " "
441 | }
442 | sess.Data += arg + "\n"
443 |
444 | //if _, err = reply.WriteTo(c.w); err != nil {
445 | // return err
446 | //}
447 | /*
448 | if c.timeout > 0 {
449 | deadline := time.Now().Add(c.timeout)
450 | if err := c.nc.SetWriteDeadline(deadline); err != nil {
451 | return nil
452 | }
453 | }
454 | */
455 |
456 | }
457 | return true
458 | }
459 |
460 | type Request struct {
461 | Name string
462 | Args [][]byte
463 | Host string
464 | ClientChan chan struct{}
465 | }
466 |
467 | type Conn struct {
468 | r *bufio.Reader
469 | w *bufio.Writer
470 |
471 | wLock sync.Mutex
472 |
473 | db uint32
474 | nc net.Conn
475 |
476 | // summary for this connection
477 | summ string
478 | timeout time.Duration
479 |
480 | authenticated bool
481 |
482 | // whether sync from master or not
483 | isSyncing bool
484 | }
485 |
486 | func newConn(nc net.Conn, timeout int) *Conn {
487 | c := &Conn{
488 | nc: nc,
489 | }
490 |
491 | c.r = bufio.NewReader(nc)
492 | c.w = bufio.NewWriter(nc)
493 | //c.summ = fmt.Sprintf("local%s-remote%s", nc.LocalAddr(), nc.RemoteAddr())
494 | c.timeout = time.Duration(timeout) * time.Second
495 | c.authenticated = false
496 | c.isSyncing = false
497 | //log.Info("connection established:", c.summ)
498 |
499 | return c
500 | }
501 |
502 | func (c *Conn) Close() {
503 | c.nc.Close()
504 | c = nil
505 | }
506 |
507 | func parseRequest(c *Conn) (*Request, error) {
508 | r := c.r
509 | // first line of redis request should be:
510 | // *CRLF
511 | line, err := r.ReadString('\n')
512 | if err != nil {
513 | return nil, err
514 | }
515 | // note that this line also protects us from negative integers
516 | var argsCount int
517 |
518 | // Multiline request:
519 | if line[0] == '*' {
520 | if _, err := fmt.Sscanf(line, "*%d\r", &argsCount); err != nil {
521 | return nil, malformed("*", line)
522 | }
523 | // All next lines are pairs of:
524 | //$ CR LF
525 | // CR LF
526 | // first argument is a command name, so just convert
527 | firstArg, err := readArgument(r)
528 | if err != nil {
529 | return nil, err
530 | }
531 |
532 | args := make([][]byte, argsCount-1)
533 | for i := 0; i < argsCount-1; i += 1 {
534 | if args[i], err = readArgument(r); err != nil {
535 | return nil, err
536 | }
537 | }
538 |
539 | return &Request{
540 | Name: strings.ToLower(string(firstArg)),
541 | Args: args,
542 | }, nil
543 | }
544 |
545 | // Inline request:
546 | fields := strings.Split(strings.Trim(line, "\r\n"), " ")
547 |
548 | var args [][]byte
549 | if len(fields) > 1 {
550 | for _, arg := range fields[1:] {
551 | args = append(args, []byte(arg))
552 | }
553 | }
554 | return &Request{
555 | Name: strings.ToLower(string(fields[0])),
556 | Args: args,
557 | }, nil
558 |
559 | }
560 |
561 | func readArgument(r *bufio.Reader) ([]byte, error) {
562 |
563 | line, err := r.ReadString('\n')
564 | if err != nil {
565 | return nil, malformed("$", line)
566 | }
567 | var argSize int
568 | if _, err := fmt.Sscanf(line, "$%d\r", &argSize); err != nil {
569 | return nil, malformed("$", line)
570 | }
571 |
572 | // I think int is safe here as the max length of request
573 | // should be less then max int value?
574 | data, err := ioutil.ReadAll(io.LimitReader(r, int64(argSize)))
575 | if err != nil {
576 | return nil, err
577 | }
578 |
579 | if len(data) != argSize {
580 | return nil, malformedLength(argSize, len(data))
581 | }
582 |
583 | // Now check for trailing CR
584 | if b, err := r.ReadByte(); err != nil || b != '\r' {
585 | return nil, malformedMissingCRLF()
586 | }
587 |
588 | // And LF
589 | if b, err := r.ReadByte(); err != nil || b != '\n' {
590 | return nil, malformedMissingCRLF()
591 | }
592 |
593 | return data, nil
594 | }
595 |
596 | func malformed(expected string, got string) error {
597 | return fmt.Errorf("Mailformed request:'%s does not match %s\\r\\n'", got, expected)
598 | }
599 |
600 | func malformedLength(expected int, got int) error {
601 | return fmt.Errorf(
602 | "Mailformed request: argument length '%d does not match %d\\r\\n'",
603 | got, expected)
604 | }
605 |
606 | func malformedMissingCRLF() error {
607 | return fmt.Errorf("Mailformed request: line should end with \\r\\n")
608 | }
609 |
--------------------------------------------------------------------------------
/pkg/emulation/redis/replay.go:
--------------------------------------------------------------------------------
1 | // Copyright 2013 Docker, Inc.
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // See the License for the specific language governing permissions and
12 | // limitations under the License.
13 | //
14 | // The following only applies to changes made to this file as part of ELEME development.
15 | //
16 | // Portions Copyright (c) 2019 ELEME, Inc.
17 | //
18 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
19 | // in compliance with the License. You may obtain a copy of the License at
20 | //
21 | // http://www.apache.org/licenses/LICENSE-2.0
22 | //
23 | // Unless required by applicable law or agreed to in writing, software distributed under the License
24 | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
25 | // or implied. See the License for the specific language governing permissions and limitations
26 | // under the License.
27 |
28 | package redis
29 |
30 | import (
31 | "bytes"
32 | "errors"
33 | "io"
34 | "strconv"
35 | )
36 |
37 | type ReplyWriter io.WriterTo
38 |
39 | type StatusReply struct {
40 | code string
41 | }
42 |
43 | func (r *StatusReply) WriteTo(w io.Writer) (int64, error) {
44 | n, err := w.Write([]byte("+" + r.code + "\r\n"))
45 | return int64(n), err
46 | }
47 |
48 | type IntegerReply struct {
49 | number int
50 | }
51 |
52 | func (r *IntegerReply) WriteTo(w io.Writer) (int64, error) {
53 | n, err := w.Write([]byte(":" + strconv.Itoa(r.number) + "\r\n"))
54 | return int64(n), err
55 | }
56 |
57 | type Integer64Reply struct {
58 | number int64
59 | }
60 |
61 | func (r *Integer64Reply) WriteTo(w io.Writer) (int64, error) {
62 | n, err := w.Write([]byte(":" + strconv.FormatInt(r.number, 10) + "\r\n"))
63 | return int64(n), err
64 | }
65 |
66 | type BulkReply struct {
67 | value []byte
68 | }
69 |
70 | func writeBytes(value interface{}, w io.Writer) (int64, error) {
71 | //it's a NullBulkReply
72 | if value == nil {
73 | n, err := w.Write([]byte("$-1\r\n"))
74 | return int64(n), err
75 | }
76 | switch v := value.(type) {
77 | case string:
78 | if len(v) == 0 {
79 | n, err := w.Write([]byte("$-1\r\n"))
80 | return int64(n), err
81 | }
82 | wrote, err := w.Write([]byte("$" + strconv.Itoa(len(v)) + "\r\n"))
83 | if err != nil {
84 | return int64(wrote), err
85 | }
86 | wroteBytes, err := w.Write([]byte(v))
87 | if err != nil {
88 | return int64(wrote + wroteBytes), err
89 | }
90 | wroteCrLf, err := w.Write([]byte("\r\n"))
91 | return int64(wrote + wroteBytes + wroteCrLf), err
92 | case []byte:
93 | if v == nil {
94 | n, err := w.Write([]byte("$-1\r\n"))
95 | return int64(n), err
96 | }
97 | wrote, err := w.Write([]byte("$" + strconv.Itoa(len(v)) + "\r\n"))
98 | if err != nil {
99 | return int64(wrote), err
100 | }
101 | wroteBytes, err := w.Write(v)
102 | if err != nil {
103 | return int64(wrote + wroteBytes), err
104 | }
105 | wroteCrLf, err := w.Write([]byte("\r\n"))
106 | return int64(wrote + wroteBytes + wroteCrLf), err
107 | case int:
108 | wrote, err := w.Write([]byte(":" + strconv.Itoa(v) + "\r\n"))
109 | if err != nil {
110 | return int64(wrote), err
111 | }
112 | return int64(wrote), err
113 | }
114 |
115 | //Debugf("Invalid type sent to writeBytes: %v", reflect.TypeOf(value).Name())
116 | return 0, errors.New("Invalid type sent to writeBytes")
117 | }
118 |
119 | func (r *BulkReply) WriteTo(w io.Writer) (int64, error) {
120 | return writeBytes(r.value, w)
121 | }
122 |
123 | type MonitorReply struct {
124 | c <-chan string
125 | }
126 |
127 | func (r *MonitorReply) WriteTo(w io.Writer) (int64, error) {
128 | statusReply := &StatusReply{}
129 | totalBytes := int64(0)
130 | for line := range r.c {
131 | statusReply.code = line
132 | if n, err := statusReply.WriteTo(w); err != nil {
133 | totalBytes += n
134 | return int64(totalBytes), err
135 | } else {
136 | totalBytes += n
137 | }
138 | }
139 | return totalBytes, nil
140 | }
141 |
142 | // for nil reply in multi bulk just set []byte as nil
143 | type MultiBulkReply struct {
144 | values []interface{}
145 | }
146 |
147 | func MultiBulkFromMap(m map[string]interface{}) *MultiBulkReply {
148 | values := make([]interface{}, len(m)*2)
149 | i := 0
150 | for key, val := range m {
151 | values[i] = []byte(key)
152 | values[i+1] = val
153 | i += 2
154 | }
155 | return &MultiBulkReply{values: values}
156 | }
157 |
158 | func writeMultiBytes(values []interface{}, w io.Writer) (int64, error) {
159 | if values == nil {
160 | return 0, errors.New("Nil in multi bulk replies are not ok")
161 | }
162 | wrote, err := w.Write([]byte("*" + strconv.Itoa(len(values)) + "\r\n"))
163 | if err != nil {
164 | return int64(wrote), err
165 | }
166 | wrote64 := int64(wrote)
167 | for _, v := range values {
168 | wroteBytes, err := writeBytes(v, w)
169 | if err != nil {
170 | return wrote64 + wroteBytes, err
171 | }
172 | wrote64 += wroteBytes
173 | }
174 | return wrote64, err
175 | }
176 |
177 | func (r *MultiBulkReply) WriteTo(w io.Writer) (int64, error) {
178 | return writeMultiBytes(r.values, w)
179 | }
180 |
181 | func ReplyToString(r ReplyWriter) (string, error) {
182 | var b bytes.Buffer
183 |
184 | _, err := r.WriteTo(&b)
185 | if err != nil {
186 | return "ERROR!", err
187 | }
188 | return b.String(), nil
189 | }
190 |
191 | type MultiChannelWriter struct {
192 | Chans []*ChannelWriter
193 | }
194 |
195 | func (c *MultiChannelWriter) WriteTo(w io.Writer) (n int64, err error) {
196 | chans := make(chan struct{}, len(c.Chans))
197 | for _, elem := range c.Chans {
198 | go func(elem io.WriterTo) {
199 | defer func() { chans <- struct{}{} }()
200 | if n2, err2 := elem.WriteTo(w); err2 != nil {
201 | n += n2
202 | err = err2
203 | return
204 | } else {
205 | n += n2
206 | }
207 | }(elem)
208 | }
209 | for i := 0; i < len(c.Chans); i++ {
210 | <-chans
211 | }
212 | return n, err
213 | }
214 |
215 | type ChannelWriter struct {
216 | FirstReply []interface{}
217 | Channel chan []interface{}
218 | clientChan chan struct{}
219 | }
220 |
221 | func (c *ChannelWriter) WriteTo(w io.Writer) (int64, error) {
222 | totalBytes, err := writeMultiBytes(c.FirstReply, w)
223 | if err != nil {
224 | return totalBytes, err
225 | }
226 |
227 | for {
228 | select {
229 | case <-c.clientChan:
230 | return totalBytes, err
231 | case reply := <-c.Channel:
232 | if reply == nil {
233 | return totalBytes, nil
234 | } else {
235 | wroteBytes, err := writeMultiBytes(reply, w)
236 | // FIXME: obvious overflow here,
237 | // Just ignore? Who cares?
238 | totalBytes += wroteBytes
239 | if err != nil {
240 | return totalBytes, err
241 | }
242 | }
243 | }
244 | }
245 | return totalBytes, nil
246 | }
247 |
--------------------------------------------------------------------------------
/pkg/emulation/relay/http.go:
--------------------------------------------------------------------------------
1 | package relay
2 |
3 | /*
4 | HTTP Relay
5 | 实现HTTP 的中间人代理 深度包分析
6 | */
7 |
8 | import (
9 | "crypto/tls"
10 | "github.com/google/uuid"
11 | "github.com/valyala/fasthttp"
12 | proxy "github.com/yeqown/fasthttp-reverse-proxy/v2"
13 | "hachimi/pkg/config"
14 | "hachimi/pkg/types"
15 | "hachimi/pkg/utils"
16 | "io"
17 | "net"
18 | "sync"
19 | "time"
20 | )
21 |
22 | var serverPool sync.Pool
23 |
24 | func HandleHttpRelay(src net.Conn, session *types.Session, configMap map[string]string) bool {
25 | targetHost := configMap["targetHost"] //目标地址
26 | service := configMap["service"] //服务
27 | if service == "" {
28 | service = "http_relay"
29 | }
30 | isTls := configMap["isTls"] //是否是tls
31 |
32 | var tlsConfig *tls.Config
33 |
34 | if isTls == "true" {
35 | host, _, _ := net.SplitHostPort(targetHost)
36 | //panic("tls not supported")
37 | tlsConfig = &tls.Config{
38 | ServerName: host,
39 | InsecureSkipVerify: true,
40 | MaxVersion: tls.VersionTLS13,
41 | MinVersion: tls.VersionSSL30,
42 | }
43 | }
44 |
45 | httpLog := &types.Http{Session: *session}
46 | httpLog.StartTime = session.StartTime
47 | httpLog.ID = uuid.New().String()
48 | httpLog.SessionID = session.ID
49 | httpLog.Header = make(map[string]string)
50 | httpLog.BodyParam = make(map[string]string)
51 | httpLog.UriParam = make(map[string]string)
52 | httpLog.IsHandled = true
53 | httpLog.Service = service
54 | proxyServer, _ := proxy.NewReverseProxyWith(proxy.WithAddress(targetHost), proxy.WithTLSConfig(tlsConfig), proxy.WithTimeout(time.Duration(config.GetPotConfig().TimeOut)*time.Second))
55 | v := serverPool.Get()
56 | if v == nil {
57 | v = &fasthttp.Server{}
58 | }
59 | s := v.(*fasthttp.Server)
60 | s.NoDefaultServerHeader = true
61 | s.NoDefaultContentType = true
62 | s.ReadBufferSize = 1024 * 1024 * 5
63 | s.DisableHeaderNamesNormalizing = true
64 | s.DisableKeepalive = false
65 | httpLog.Service = service
66 | s.Handler = func(fasthttpCtx *fasthttp.RequestCtx) {
67 | // 在 requestHandlerFunc 中传递 ctx
68 | RequestHandler(httpLog, fasthttpCtx, proxyServer.ServeHTTP, configMap)
69 | }
70 | serverPool.Put(v)
71 | err := s.ServeConn(src)
72 | if err != nil {
73 | httpLog.IsHandled = false
74 | io.ReadAll(src) //出错继续读取
75 | }
76 | return true
77 | }
78 |
79 | func RequestHandler(plog *types.Http, ctx *fasthttp.RequestCtx, next func(*fasthttp.RequestCtx), configMap map[string]string) {
80 | ctx.SetConnectionClose()
81 | ctx.Request.Header.VisitAll(func(key, value []byte) {
82 | plog.Header[string(key)] = string(value)
83 | })
84 | if string(ctx.Request.Header.Cookie("rememberMe")) != "" {
85 | ctx.Response.Header.Set("Set-Cookie", "rememberMe=deleteMe; Path=/; Max-Age=0;")
86 | }
87 |
88 | ctx.URI().DisablePathNormalizing = true
89 | plog.Path = string(ctx.URI().RequestURI())
90 | Hash := string(ctx.URI().Hash())
91 | if Hash != "" {
92 | plog.Path += "#" + Hash
93 |
94 | }
95 | isTls := configMap["isTls"]
96 | if isTls == "true" {
97 | // HTTP 代理HTTPS
98 | // https://github.com/valyala/fasthttp/blob/ce283fb97c2e0c4801e68fd6c362a81a8a5c74b5/client.go#L1432C26-L1432C33
99 | // https://github.com/valyala/fasthttp/issues/841
100 | ctx.URI().SetScheme("https")
101 | }
102 |
103 | plog.Method = string(ctx.Method())
104 | plog.Host = string(ctx.Host())
105 | plog.UA = string(ctx.UserAgent())
106 | ctx.QueryArgs().VisitAll(func(key, value []byte) {
107 | plog.UriParam[string(key)] = string(value)
108 | })
109 | ctx.PostArgs().VisitAll(func(key, value []byte) {
110 | plog.BodyParam[string(key)] = string(value)
111 | })
112 | plog.Body = utils.EscapeBytes(ctx.Request.Body())
113 | plog.RawHeader = string(ctx.Request.Header.Header())
114 | next(ctx)
115 | plog.EndTime = time.Now()
116 | plog.Duration = int(plog.EndTime.Sub(plog.StartTime).Milliseconds())
117 | config.Logger.Log(plog)
118 | }
119 |
--------------------------------------------------------------------------------
/pkg/emulation/relay/tcp.go:
--------------------------------------------------------------------------------
1 | package relay
2 |
3 | import (
4 | "encoding/json"
5 | "hachimi/pkg/types"
6 | "hachimi/pkg/utils"
7 | "io"
8 | "log"
9 | "net"
10 | )
11 |
12 | func HandleTCPRelay(src net.Conn, session *types.Session, config map[string]string) bool {
13 | targetAddr := config["targetAddr"] //目标地址
14 | sendSesion := config["sendSession"] //是否发送session
15 | dst, err := net.Dial("tcp", targetAddr)
16 | if err != nil {
17 | log.Printf("Failed to connect to target server: %v", err)
18 | return false
19 | }
20 | defer dst.Close()
21 | //连接 两个连接
22 | if sendSesion == "true" {
23 | sess, _ := utils.ToMap(session)
24 | jsonData, _ := json.Marshal(sess)
25 | dst.Write(jsonData)
26 | dst.Write([]byte("\n"))
27 | }
28 | go func() {
29 | io.Copy(src, dst)
30 | }()
31 | io.Copy(dst, src)
32 |
33 | return true
34 | }
35 |
--------------------------------------------------------------------------------
/pkg/emulation/relay/udp.go:
--------------------------------------------------------------------------------
1 | package relay
2 |
3 | import (
4 | "hachimi/pkg/types"
5 | "net"
6 | "time"
7 | )
8 |
9 | func HandleUDPRelay(src net.Conn, session *types.Session, buf []byte, config map[string]string) []byte {
10 | targetAddr := config["targetAddr"] //目标地址
11 | dst, err := net.Dial("udp", targetAddr)
12 | if err != nil {
13 | return nil
14 | }
15 | defer dst.Close()
16 | dst.Write(buf)
17 | // 读取 等待2s
18 | dst.SetReadDeadline(time.Now().Add(time.Second * 2))
19 | var buffer = make([]byte, 1024)
20 | n, err := dst.Read(buffer)
21 | if n == 0 {
22 | return nil
23 | }
24 | return buffer[:n]
25 |
26 | }
27 |
--------------------------------------------------------------------------------
/pkg/emulation/rsync/rsyncd.go:
--------------------------------------------------------------------------------
1 | package rsync
2 |
3 | import (
4 | "bufio"
5 | "fmt"
6 | "hachimi/pkg/types"
7 | "io"
8 | "net"
9 | "strings"
10 | )
11 |
12 | // HandleRsync 处理rsync协议 27 版本 当前只能处理列出模块
13 | func HandleRsync(conn net.Conn, session *types.Session) {
14 | //status := 0
15 | session.Service = "rsync"
16 | // 1. 握手:发送服务端协议版本
17 | _, err := fmt.Fprintf(conn, "@RSYNCD: %d\n", 27)
18 | if err != nil {
19 | return
20 | }
21 | rd := bufio.NewReader(conn)
22 | // 2. 读取客户端协议版本
23 | clientGreeting, err := rd.ReadString('\n')
24 | if err != nil {
25 | return
26 | }
27 | if !strings.HasPrefix(clientGreeting, "@RSYNCD: ") {
28 | return
29 | }
30 |
31 | for {
32 | // 3. 读取客户端请求的模块名
33 | requestedModule, err := rd.ReadString('\n')
34 | if err != nil {
35 | return
36 | }
37 | if strings.HasPrefix(requestedModule, "@RSYNCD: EXIT") {
38 | return
39 | }
40 | requestedModule = strings.TrimSpace(requestedModule)
41 | // 如果请求模块为空或请求列出模块,返回模块列表
42 | if requestedModule == "" || requestedModule == "#list" {
43 | _, err := io.WriteString(conn, formatModuleList())
44 | if err != nil {
45 | return
46 | }
47 | _, err = io.WriteString(conn, "@RSYNCD: EXIT\n")
48 | if err != nil {
49 | return
50 | }
51 |
52 | return
53 |
54 | }
55 | _, _ = io.WriteString(conn, "@RSYNCD: OK\n")
56 | //if requestedModule in modules
57 | if strings.HasSuffix(requestedModule, "//") {
58 | //status = 1
59 | sendFileEntry(&Conn{Writer: conn}, &file{name: "test.txt"}, nil, 0)
60 |
61 | }
62 |
63 | }
64 |
65 | }
66 |
67 | var modules = []string{"wwwdata", "backup", "data", "html"}
68 |
69 | func formatModuleList() string {
70 | if len(modules) == 0 {
71 | return ""
72 | }
73 | var list strings.Builder
74 | for _, mod := range modules {
75 | fmt.Fprintf(&list, "%s\t%s\n",
76 | mod,
77 | mod)
78 | }
79 | return list.String()
80 | }
81 |
82 | type file struct {
83 | name string
84 | size int64
85 | mode uint32
86 | mtime int64
87 | uid int
88 | gid int
89 | }
90 |
91 | func sendFileEntry(conn *Conn, f *file, last *file, flags uint16) error {
92 | // 初始化 flags
93 | flags = 0
94 | //TODO
95 |
96 | return nil
97 | }
98 |
99 | const (
100 | XMIT_TOP_DIR = (1 << 0)
101 | XMIT_SAME_MODE = (1 << 1)
102 | XMIT_EXTENDED_FLAGS = (1 << 2)
103 | XMIT_SAME_RDEV_pre28 = XMIT_EXTENDED_FLAGS /* Only in protocols < 28 */
104 | XMIT_SAME_UID = (1 << 3)
105 | XMIT_SAME_GID = (1 << 4)
106 | XMIT_SAME_NAME = (1 << 5)
107 | XMIT_LONG_NAME = (1 << 6)
108 | XMIT_SAME_TIME = (1 << 7)
109 | XMIT_SAME_RDEV_MAJOR = (1 << 8)
110 | XMIT_HAS_IDEV_DATA = (1 << 9)
111 | XMIT_SAME_DEV = (1 << 10)
112 | XMIT_RDEV_MINOR_IS_SMALL = (1 << 11)
113 | )
114 | const (
115 | S_IFMT = 0o0170000 // bits determining the file type
116 | S_IFDIR = 0o0040000 // Directory
117 | S_IFCHR = 0o0020000 // Character device
118 | S_IFBLK = 0o0060000 // Block device
119 | S_IFREG = 0o0100000 // Regular file
120 | S_IFIFO = 0o0010000 // FIFO
121 | S_IFLNK = 0o0120000 // Symbolic link
122 | S_IFSOCK = 0o0140000 // Socket
123 | )
124 |
--------------------------------------------------------------------------------
/pkg/emulation/rsync/wire.go:
--------------------------------------------------------------------------------
1 | package rsync
2 |
3 | import (
4 | "bytes"
5 | "encoding/binary"
6 | "fmt"
7 | "io"
8 | "log"
9 | )
10 |
11 | const (
12 | MsgData uint8 = 0
13 | MsgInfo uint8 = 2
14 | MsgError uint8 = 1
15 | )
16 |
17 | const mplexBase = 7
18 |
19 | type MultiplexWriter struct {
20 | Writer io.Writer
21 | }
22 |
23 | func (w *MultiplexWriter) Write(p []byte) (n int, err error) {
24 | return w.WriteMsg(MsgData, p)
25 | }
26 |
27 | func (w *MultiplexWriter) WriteMsg(tag uint8, p []byte) (n int, err error) {
28 | header := uint32(mplexBase+tag)<<24 | uint32(len(p))
29 | // log.Printf("len %d (hex %x)", len(p), uint32(len(p)))
30 | // log.Printf("header=%v (%x)", header, header)
31 | if err := binary.Write(w.Writer, binary.LittleEndian, header); err != nil {
32 | return 0, err
33 | }
34 | return w.Writer.Write(p)
35 | }
36 |
37 | type MultiplexReader struct {
38 | Reader io.Reader
39 | }
40 |
41 | // rsync.h defines IO_BUFFER_SIZE as 32 * 1024, but gokr-rsyncd increases it to
42 | // 256K. Since we use this as the maximum message size, too, we need to at least
43 | // match it.
44 | const ioBufferSize = 256 * 1024
45 | const maxMessageSize = ioBufferSize
46 |
47 | func (w *MultiplexReader) ReadMsg() (tag uint8, p []byte, err error) {
48 | var header uint32
49 | if err := binary.Read(w.Reader, binary.LittleEndian, &header); err != nil {
50 | return 0, nil, err
51 | }
52 |
53 | tag = uint8(header>>24) - mplexBase
54 | length := header & 0x00FFFFFF
55 | if length > maxMessageSize {
56 | // NOTE: if you run into this error, one alternative to bumping
57 | // maxMessageSize is to restructure the program to work with i/o buffer
58 | // windowing.
59 | return 0, nil, fmt.Errorf("length %d exceeds max message size (%d)", length, maxMessageSize)
60 | }
61 | p = make([]byte, int(length))
62 | if _, err := io.ReadFull(w.Reader, p); err != nil {
63 | return 0, nil, err
64 | }
65 | // log.Printf("header=%v (%x), tag=%v, length=%v", header, header, tag, length)
66 | // log.Printf("payload=%x / %q", p, p)
67 | return tag, p, nil
68 | }
69 |
70 | func (w *MultiplexReader) Read(p []byte) (n int, err error) {
71 | tag, payload, err := w.ReadMsg()
72 | if err != nil {
73 | return 0, err
74 | }
75 | if tag == MsgError {
76 | return 0, fmt.Errorf("%s", payload)
77 | }
78 | if tag == MsgInfo {
79 | log.Printf("info: %s", payload)
80 | }
81 | if tag != MsgData {
82 | return 0, fmt.Errorf("unexpected tag: got %v, want %v", tag, MsgData)
83 | }
84 | if len(p) < len(payload) {
85 | panic(fmt.Sprintf("not enough buffer space! %d < %d", len(p), len(payload)))
86 | }
87 | return copy(p, payload), nil
88 | }
89 |
90 | type Buffer struct {
91 | // buf.Write() never fails, making for a convenient API.
92 | buf bytes.Buffer
93 | }
94 |
95 | func (b *Buffer) WriteByte(data byte) {
96 | binary.Write(&b.buf, binary.LittleEndian, data)
97 | }
98 |
99 | func (b *Buffer) WriteInt32(data int32) {
100 | binary.Write(&b.buf, binary.LittleEndian, data)
101 | }
102 |
103 | func (b *Buffer) WriteInt64(data int64) {
104 | // send as a 32-bit integer if possible
105 | if data <= 0x7FFFFFFF && data >= 0 {
106 | b.WriteInt32(int32(data))
107 | return
108 | }
109 | // otherwise, send -1 followed by the 64-bit integer
110 | b.WriteInt32(-1)
111 | binary.Write(&b.buf, binary.LittleEndian, data)
112 | }
113 |
114 | func (b *Buffer) WriteString(data string) {
115 | io.WriteString(&b.buf, data)
116 | }
117 |
118 | func (b *Buffer) String() string {
119 | return b.buf.String()
120 | }
121 |
122 | type Conn struct {
123 | Writer io.Writer
124 | Reader io.Reader
125 | }
126 |
127 | func (c *Conn) WriteByte(data byte) error {
128 | return binary.Write(c.Writer, binary.LittleEndian, data)
129 | }
130 |
131 | func (c *Conn) WriteInt32(data int32) error {
132 | return binary.Write(c.Writer, binary.LittleEndian, data)
133 | }
134 |
135 | func (c *Conn) WriteInt64(data int64) error {
136 | // send as a 32-bit integer if possible
137 | if data <= 0x7FFFFFFF && data >= 0 {
138 | return c.WriteInt32(int32(data))
139 | }
140 | // otherwise, send -1 followed by the 64-bit integer
141 | if err := c.WriteInt32(-1); err != nil {
142 | return err
143 | }
144 | return binary.Write(c.Writer, binary.LittleEndian, data)
145 | }
146 |
147 | func (c *Conn) WriteString(data string) error {
148 | _, err := io.WriteString(c.Writer, data)
149 | return err
150 | }
151 |
152 | func (c *Conn) ReadByte() (byte, error) {
153 | var buf [1]byte
154 | if _, err := io.ReadFull(c.Reader, buf[:]); err != nil {
155 | return 0, err
156 | }
157 | return buf[0], nil
158 | }
159 |
160 | func (c *Conn) ReadInt32() (int32, error) {
161 | var buf [4]byte
162 | if _, err := io.ReadFull(c.Reader, buf[:]); err != nil {
163 | return 0, err
164 | }
165 | return int32(binary.LittleEndian.Uint32(buf[:])), nil
166 | }
167 |
168 | func (c *Conn) ReadInt64() (int64, error) {
169 | {
170 | data, err := c.ReadInt32()
171 | if err != nil {
172 | return 0, err
173 | }
174 | if data != -1 {
175 | // The value was small enough to fit into a 32 bit int, so it was
176 | // transferred directly.
177 | return int64(data), nil
178 | }
179 | // Otherwise, -1 was transmitted, followed by the int64.
180 | }
181 | var data int64
182 | if err := binary.Read(c.Reader, binary.LittleEndian, &data); err != nil {
183 | return 0, err
184 | }
185 | return data, nil
186 | }
187 |
--------------------------------------------------------------------------------
/pkg/emulation/session/distributor.go:
--------------------------------------------------------------------------------
1 | package session
2 |
3 | import (
4 | "bytes"
5 | "hachimi/pkg/config"
6 | "hachimi/pkg/emulation/http"
7 | "hachimi/pkg/emulation/redis"
8 | "hachimi/pkg/emulation/relay"
9 | "hachimi/pkg/emulation/rsync"
10 | "hachimi/pkg/emulation/ssh"
11 | "hachimi/pkg/emulation/tls"
12 | "hachimi/pkg/types"
13 | "hachimi/pkg/utils"
14 | "io"
15 | "log"
16 | "net"
17 | "strings"
18 | "time"
19 | )
20 |
21 | func Distributor(conn net.Conn, session *types.Session) bool {
22 | conn.SetDeadline(time.Now().Add(time.Duration(config.GetPotConfig().TimeOut) * time.Second))
23 | session.SetOutBuffer(new(bytes.Buffer))
24 | //if !PortDistributor(conn, session) {
25 | //if session.GetOutBuffer().Len() > 0 {
26 | // conn = utils.NewLoggedConn(
27 | // conn,
28 | // io.MultiReader(session.GetOutBuffer(), conn), //由于之前已经读取了一部分数据 需要覆盖 reader 还原buffer
29 | // session.GetOutBuffer(),
30 | // )
31 | //}
32 | //return MagicDistributor(conn, session)
33 | //} else {
34 | // return true
35 | //}
36 | return MagicDistributor(conn, session)
37 | }
38 |
39 | func PortDistributor(conn net.Conn, session *types.Session) bool {
40 | switch conn.LocalAddr().(*net.TCPAddr).Port {
41 | /*
42 | case 22:
43 | ssh.HandleSsh(conn, session)
44 | return true
45 |
46 |
47 | case 23:
48 | relay.HandleTCPRelay(conn, session, map[string]string{"targetAddr": "10.13.1.2:23", "sendSession": "false"})
49 | return true
50 | */
51 | case 6379:
52 | return redis.HandleRedis(conn, session)
53 | }
54 |
55 | return false
56 | }
57 | func MagicDistributor(conn net.Conn, session *types.Session) bool {
58 | // 读取magicByte 长度 10
59 | magicByte := make([]byte, 10)
60 | n, err := conn.Read(magicByte)
61 | if err != nil {
62 | conn.Close()
63 | return false
64 | }
65 | session.IsHandled = true
66 | var conn2 net.Conn
67 | //已知问题 生成的的conn2 第一次读取只能读到第一个reader的数据 并不是紧连着的 之后需要再次读取才能读到原始连接的数据
68 | conn2 = utils.NewLoggedConn(
69 | conn,
70 | io.MultiReader(bytes.NewReader(magicByte[0:n]), conn), //由于之前已经读取了一部分数据 需要覆盖 reader 还原buffer
71 | session.GetOutBuffer(),
72 | config.GetLimitSize(),
73 | )
74 | if config.GetPotConfig().ForwardingRules != nil {
75 | for _, rule := range *config.GetPotConfig().ForwardingRules {
76 | if rule.Handler == "relay_tcp" && (rule.Port == 0 || rule.Port == conn.LocalAddr().(*net.TCPAddr).Port) {
77 | session.IsHandled = true
78 | session.IsHttp = true
79 | if rule.Config["service"] == "" {
80 | session.Service = "relay_tcp"
81 | } else {
82 | session.Service = rule.Config["service"]
83 | }
84 | relay.HandleTCPRelay(conn2, session, rule.Config)
85 | return true
86 | }
87 | }
88 | }
89 | /* TLS */
90 | if bytes.Equal(magicByte[0:2], []uint8{22, 3}) { //SSL3.0+ (TLS 1.0 1.1 1.2 1.3) ClientHello 0x16 0x03 TODO 其他协议模拟
91 | tlsServer := tls.NewTlsServer(conn2, session)
92 | err := tlsServer.Handle()
93 | if err != nil {
94 | if config.GetPotConfig().Debug {
95 | log.Printf("SESSION %s SRC %s:%d DST %s:%d TLS ERROR %s\n", session.ID, session.SrcIP, session.SrcPort, session.DstIP, session.DstPort, err)
96 | }
97 | io.ReadAll(conn) //出错继续读取
98 | return false
99 | }
100 |
101 | session.IsTls = true
102 | session.SetOutBuffer(new(bytes.Buffer)) // 重置buffer 不记录tls原始数据
103 | // 重新读取magicByte 长度 10
104 | magicByte = make([]byte, 10)
105 | n, err = tlsServer.Conn.Read(magicByte)
106 | if err != nil {
107 | io.ReadAll(conn2) //出错继续读取
108 | return false
109 | }
110 | conn2 = utils.NewLoggedConn(
111 | tlsServer.Conn,
112 | io.MultiReader(bytes.NewReader(magicByte[0:n]), tlsServer.Conn),
113 | session.GetOutBuffer(),
114 | config.GetLimitSize(),
115 | )
116 | }
117 | /* gmTLS */
118 | /*deleted*/
119 | /* HTTP */
120 | // 通过开头几个字节快速判断是否是HTTP请求
121 | //CONNECT 为 HTTP 代理请求
122 | if string(magicByte[0:5]) == "POST " || string(magicByte[0:4]) == "GET " || string(magicByte[0:5]) == "HEAD " || string(magicByte[0:8]) == "OPTIONS " || string(magicByte[0:7]) == "DELETE " || string(magicByte[0:4]) == "PUT " || string(magicByte[0:6]) == "TRACE " || string(magicByte[0:8]) == "CONNECT " || string(magicByte[0:6]) == "PATCH " {
123 | if config.GetPotConfig().ForwardingRules != nil {
124 | for _, rule := range *config.GetPotConfig().ForwardingRules {
125 | if rule.Handler == "relay_http" && (rule.Port == 0 || rule.Port == conn.LocalAddr().(*net.TCPAddr).Port) {
126 | session.IsHandled = true
127 | session.IsHttp = true
128 | relay.HandleHttpRelay(conn2, session, rule.Config)
129 | return true
130 | }
131 | }
132 | }
133 | session.Service = "http"
134 | session.IsHandled = true
135 | session.IsHttp = true
136 | http.HandleHttp(conn2, session)
137 | return true
138 | }
139 | /* SSH */
140 | if strings.HasPrefix(string(magicByte), "SSH-") { //SSH
141 | session.Service = "ssh"
142 | session.IsHandled = true
143 | ssh.HandleSsh(conn2, session)
144 | return true
145 | }
146 | /* Redis */
147 | //简单匹配
148 | if string(magicByte) == "*1\r\n" || string(magicByte) == "*2\r\n" || strings.ToLower(string(magicByte)) == "info" || strings.ToLower(string(magicByte)) == "ping" {
149 | session.Service = "redis"
150 | session.IsHandled = true
151 | if redis.HandleRedis(conn2, session) {
152 | return true
153 | }
154 | } else if string(magicByte[0:6]) == "@RSYNC" {
155 | /* RSYNC */
156 | session.Service = "rsync"
157 | rsync.HandleRsync(conn2, session)
158 | return true
159 |
160 | } else {
161 | /* Other */
162 | //TODO BUFFER POOL
163 | var buffer = make([]byte, 1024*1024)
164 | n, err = conn2.Read(buffer)
165 | if err != nil {
166 | conn.Close()
167 | return false
168 | }
169 | buffer = nil
170 | }
171 |
172 | //读取第一行 限制大小1K
173 | buf := make([]byte, 1024)
174 | _, err = conn2.Read(buf)
175 | if err != nil {
176 | conn.Close()
177 | return false
178 | }
179 | firstLine := strings.Split(string(session.GetOutBuffer().Bytes()[0:utils.Min(session.GetOutBuffer().Len(), 1024)]), "\n")[0]
180 | //判断是遗漏的非标准HTTP请求
181 | conn2 = utils.NewLoggedConn(
182 | conn2,
183 | io.MultiReader(bytes.NewReader(session.GetOutBuffer().Bytes()), conn2),
184 | nil, //日志在上一层记录
185 | 0,
186 | )
187 | if isHTTPRequestLine(firstLine) {
188 | session.Service = "http"
189 | session.IsHandled = true
190 | session.IsHttp = true
191 | http.HandleHttp(conn2, session)
192 | return true
193 | }
194 | if PortDistributor(conn2, session) {
195 | return true
196 | } else {
197 | io.ReadAll(conn2)
198 | session.IsHandled = false
199 | session.Service = "raw"
200 | return false
201 | }
202 |
203 | }
204 |
205 | // 判断是否为 HTTP 请求行
206 | func isHTTPRequestLine(line string) bool {
207 | // 去掉首尾空格
208 | line = strings.TrimSpace(line)
209 |
210 | // 拆分字符串为三部分:METHOD PATH PROTOCOL
211 | parts := strings.Fields(line)
212 | if len(parts) != 3 {
213 | return false // 如果不是三部分,直接返回 false
214 | }
215 |
216 | // 检查协议部分是否以 "HTTP/" 开头,并且后面跟一个版本号
217 | protocol := parts[2]
218 | if !strings.HasPrefix(protocol, "HTTP/") {
219 | return false
220 | }
221 |
222 | version := strings.TrimPrefix(protocol, "HTTP/")
223 | if version != "1.0" && version != "1.1" && version != "2" && version != "3" {
224 | return false
225 | }
226 |
227 | // 检查路径是否以 "/" 开头
228 | //path := parts[1]
229 | //if !strings.HasPrefix(path, "/") {
230 | // return false
231 | //}
232 |
233 | return true
234 | }
235 |
--------------------------------------------------------------------------------
/pkg/emulation/session/limiter.go:
--------------------------------------------------------------------------------
1 | package session
2 |
3 | import (
4 | "sync"
5 | "time"
6 | )
7 |
8 | const (
9 | blockDuration = 10 * time.Minute // 拉黑时长
10 | windowDuration = 10 * time.Minute // 统计时间窗口
11 | )
12 |
13 | type ConnectionLimiter struct {
14 | maxConnections int
15 | mu sync.Mutex
16 | connections map[string][]time.Time // 每个 IP 的连接时间记录
17 | blacklist map[string]time.Time // 黑名单列表
18 | cleanupTicker *time.Ticker // 定期清理任务
19 | }
20 |
21 | // NewConnectionLimiter 创建一个新的连接限制器
22 | func NewConnectionLimiter(maxConnections int) *ConnectionLimiter {
23 | limiter := &ConnectionLimiter{
24 | maxConnections: maxConnections,
25 | connections: make(map[string][]time.Time),
26 | blacklist: make(map[string]time.Time),
27 | cleanupTicker: time.NewTicker(1 * time.Minute),
28 | }
29 |
30 | // 启动清理任务
31 | go limiter.cleanupExpiredEntries()
32 | return limiter
33 | }
34 |
35 | // AllowConnection 判断是否允许来自指定 IP 的连接
36 | func (cl *ConnectionLimiter) AllowConnection(ip string) bool {
37 | if cl.maxConnections == 0 || ip == "" {
38 | return true
39 | }
40 | cl.mu.Lock()
41 | defer cl.mu.Unlock()
42 |
43 | // 检查黑名单
44 | if unblockTime, blacklisted := cl.blacklist[ip]; blacklisted {
45 | // 如果还在封禁时间内,拒绝连接
46 | if time.Now().Before(unblockTime) {
47 | return false
48 | }
49 | // 否则移除黑名单
50 | delete(cl.blacklist, ip)
51 | }
52 |
53 | // 记录当前连接时间
54 | now := time.Now()
55 | cl.connections[ip] = append(cl.connections[ip], now)
56 |
57 | // 移除超过时间窗口的记录
58 | windowStart := now.Add(-windowDuration)
59 | validConnections := []time.Time{}
60 | for _, t := range cl.connections[ip] {
61 | if t.After(windowStart) {
62 | validConnections = append(validConnections, t)
63 | }
64 | }
65 | cl.connections[ip] = validConnections
66 |
67 | // 检查是否超过阈值
68 | if len(validConnections) > cl.maxConnections {
69 | cl.blacklist[ip] = now.Add(blockDuration) // 加入黑名单
70 | delete(cl.connections, ip) // 清空历史记录
71 | return false
72 | }
73 |
74 | return true
75 | }
76 |
77 | // cleanupExpiredEntries 定期清理过期的连接记录和黑名单
78 | func (cl *ConnectionLimiter) cleanupExpiredEntries() {
79 | for range cl.cleanupTicker.C {
80 | cl.mu.Lock()
81 |
82 | now := time.Now()
83 |
84 | // 清理过期的连接记录
85 | for ip, times := range cl.connections {
86 | validTimes := []time.Time{}
87 | for _, t := range times {
88 | if t.After(now.Add(-windowDuration)) {
89 | validTimes = append(validTimes, t)
90 | }
91 | }
92 | if len(validTimes) > 0 {
93 | cl.connections[ip] = validTimes
94 | } else {
95 | delete(cl.connections, ip)
96 | }
97 | }
98 |
99 | // 清理过期的黑名单
100 | for ip, unblockTime := range cl.blacklist {
101 | if now.After(unblockTime) {
102 | delete(cl.blacklist, ip)
103 | }
104 | }
105 |
106 | cl.mu.Unlock()
107 | }
108 | }
109 |
110 | // Close 停止清理任务
111 | func (cl *ConnectionLimiter) Close() {
112 | cl.cleanupTicker.Stop()
113 | }
114 |
--------------------------------------------------------------------------------
/pkg/emulation/session/session.go:
--------------------------------------------------------------------------------
1 | package session
2 |
3 | import (
4 | "github.com/google/uuid"
5 | "hachimi/pkg/types"
6 | "net"
7 | "strconv"
8 | "time"
9 | )
10 |
11 | func NewSession(conn interface{}, src interface{}) *types.Session {
12 | s := &types.Session{}
13 | s.SetConnection(conn)
14 | s.ID = uuid.New().String()
15 | // conn 是否是 *net.UDPConn
16 | if _, ok := conn.(*net.TCPConn); ok {
17 | s.Protocol = "TCP"
18 | //兼容IPV6 net.SplitHostPort(address)
19 | var port string
20 | s.SrcIP, port, _ = net.SplitHostPort(conn.(*net.TCPConn).RemoteAddr().String())
21 | s.SrcPort, _ = strconv.Atoi(port)
22 | s.DstIP, port, _ = net.SplitHostPort(conn.(*net.TCPConn).LocalAddr().String())
23 | s.DstPort, _ = strconv.Atoi(port)
24 | } else if _, ok := conn.(*net.UDPConn); ok {
25 | s.Protocol = "UDP"
26 | var port string
27 | s.DstIP, port, _ = net.SplitHostPort(conn.(*net.UDPConn).LocalAddr().String())
28 | s.DstPort, _ = strconv.Atoi(port)
29 | if src != nil {
30 | s.SrcIP, port, _ = net.SplitHostPort(src.(*net.UDPAddr).String())
31 | s.SrcPort, _ = strconv.Atoi(port)
32 | }
33 | }
34 |
35 | s.StartTime = time.Now()
36 | return s
37 | }
38 |
--------------------------------------------------------------------------------
/pkg/emulation/ssh/ssh.go:
--------------------------------------------------------------------------------
1 | package ssh
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "errors"
7 | "fmt"
8 | "github.com/google/uuid"
9 | "golang.org/x/crypto/ssh/terminal"
10 | "hachimi/pkg/config"
11 | "hachimi/pkg/types"
12 | "io"
13 | "log"
14 | "net"
15 | "strconv"
16 | "strings"
17 | "time"
18 |
19 | "golang.org/x/crypto/ssh"
20 | )
21 |
22 | type SSHSession struct {
23 | types.Session
24 | ID string `gorm:"primaryKey" json:"id"`
25 | SessionID string `gorm:"index" json:"session_id"`
26 | StartTime time.Time `gorm:"index" json:"start_time"`
27 | EndTime time.Time `gorm:"index" json:"end_time"`
28 | Duration int `json:"duration"`
29 | ClientVersion string `json:"client_version"`
30 | Shell string `json:"shell"`
31 | Request string `json:"request"`
32 | Error bool `json:"error"`
33 | PublicKey string `json:"public_key"`
34 | Service string `json:"service"`
35 | User string `json:"user"`
36 | Data string `json:"data"`
37 | IsInteract bool `json:"is_interact"`
38 | PassWord string `json:"password"`
39 | }
40 |
41 | var (
42 | errBadPassword = errors.New("permission denied")
43 | ServerVersions = []string{
44 | "SSH-2.0-OpenSSH_8.4",
45 | }
46 | )
47 |
48 | func HandleSsh(conn net.Conn, session *types.Session) {
49 | var s SSHSession
50 | s.Session = *session
51 | serverConfig := &ssh.ServerConfig{
52 | MaxAuthTries: 6,
53 | PasswordCallback: s.PasswordCallback,
54 | PublicKeyCallback: s.PublicKeyCallback,
55 | ServerVersion: ServerVersions[0],
56 | }
57 | s.ID = uuid.New().String()
58 | s.Service = "ssh"
59 | s.SessionID = session.ID
60 | s.StartTime = time.Now()
61 | signer, _ := ssh.NewSignerFromSigner(config.SshPrivateKey)
62 | serverConfig.AddHostKey(signer)
63 | s.HandleConn(conn, serverConfig)
64 | s.EndTime = time.Now()
65 | s.Duration = int(s.EndTime.Sub(s.StartTime).Milliseconds())
66 | config.Logger.Log(&s)
67 |
68 | }
69 |
70 | func (s *SSHSession) PublicKeyCallback(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
71 | s.User = conn.User()
72 | s.PublicKey = strings.Trim(strconv.Quote(string(key.Marshal())), `"`)
73 |
74 | //time.Sleep(100 * time.Millisecond)
75 | return nil, nil
76 | }
77 |
78 | func (s *SSHSession) PasswordCallback(conn ssh.ConnMetadata, password []byte) (*ssh.Permissions, error) {
79 | s.User = conn.User()
80 | s.PassWord = strings.Trim(strconv.Quote(string(password)), `"`)
81 | //time.Sleep(100 * time.Millisecond)
82 | return nil, nil
83 | }
84 |
85 | func (s *SSHSession) HandleConn(conn net.Conn, serverConfig *ssh.ServerConfig) {
86 | defer conn.Close()
87 | s.Service = "ssh"
88 | //(conn.RemoteAddr())
89 | sshConn, chans, Request, err := ssh.NewServerConn(conn, serverConfig)
90 | if err != nil {
91 | s.Error = true
92 | //log.Println("Failed to handshake:", err)
93 | return
94 | }
95 | s.ClientVersion = strings.Trim(strconv.Quote(string(sshConn.ClientVersion())), `"`)
96 |
97 | go func() {
98 | for req := range Request {
99 |
100 | s.Request += fmt.Sprintf("%s %s\n", req.Type, strings.Trim(strconv.Quote(string(req.Payload)), `"`))
101 | }
102 | }()
103 |
104 | //log.Printf("New SSH connection from %s\n", sshConn.RemoteAddr())
105 |
106 | //go ssh.DiscardRequests(reqs)
107 |
108 | for newChannel := range chans {
109 | go s.handleChannel(newChannel)
110 | }
111 | }
112 |
113 | type ConnLogger struct {
114 | io.ReadWriter
115 | in *bytes.Buffer
116 | out *bytes.Buffer
117 | }
118 |
119 | // NewConnLogger 创建一个 ConnLogger 实例
120 |
121 | // 重写 net.Conn 的 Read 方法,记录输入流量
122 | func (cl *ConnLogger) Read(b []byte) (int, error) {
123 | n, err := cl.ReadWriter.Read(b)
124 | if n > 0 {
125 | cl.in.Write(b[:n])
126 | }
127 | return n, err
128 | }
129 |
130 | // 重写 net.Conn 的 Write 方法,记录输出流量
131 | func (cl *ConnLogger) Write(b []byte) (int, error) {
132 | n, err := cl.ReadWriter.Write(b)
133 | if n > 0 {
134 | _, err := cl.out.Write(b[:n])
135 | if err != nil {
136 |
137 | log.Println(err)
138 | }
139 |
140 | }
141 | return n, err
142 | }
143 | func NewConnLogger(conn io.ReadWriter, in *bytes.Buffer, out *bytes.Buffer) *ConnLogger {
144 | return &ConnLogger{ReadWriter: conn, in: in, out: out}
145 | }
146 | func (s *SSHSession) handleChannel(newChannel ssh.NewChannel) {
147 | if t := newChannel.ChannelType(); t != "session" {
148 | newChannel.Reject(ssh.UnknownChannelType, fmt.Sprintf("unknown channel type: %s", t))
149 | return
150 | }
151 | //NewTerminal(newChannel)
152 | channel, Request, err := newChannel.Accept()
153 | if err != nil {
154 | return
155 | }
156 | var ch chan struct{}
157 | ch = make(chan struct{})
158 | var inBuffer bytes.Buffer
159 | var outBuffer bytes.Buffer
160 | channelLogger := NewConnLogger(channel, &inBuffer, &outBuffer)
161 | go func() {
162 | for req := range Request {
163 |
164 | if req.Type == "shell" {
165 | ch <- struct{}{}
166 | s.IsInteract = true
167 | } else if req.Type == "exec" {
168 | var payload = struct{ Value string }{}
169 | err := ssh.Unmarshal(req.Payload, &payload)
170 | if err != nil {
171 | s.Error = true
172 | s.Shell += strings.Trim(strconv.Quote(string(req.Payload)), `"`)
173 | } else {
174 | s.Shell += payload.Value
175 | if strings.Contains(payload.Value, "scp -t") {
176 | //欺骗SCP 客户端
177 | channel.Write([]byte("\x00"))
178 | reader := bufio.NewReader(channelLogger)
179 | _, err := reader.ReadString('\n')
180 | if err != nil {
181 | log.Println("Error reading:", err)
182 | break
183 | }
184 | channel.Write([]byte("\x00"))
185 | io.ReadAll(channelLogger)
186 | continue
187 | }
188 | if strings.Contains(payload.Value, "echo") {
189 | channel.Write([]byte(payload.Value + "\n"))
190 | continue
191 | }
192 |
193 | if strings.Contains(payload.Value, "uname -s -v -n -r -m") {
194 | channel.Write([]byte("Linux ubuntu 3.13.0-24-generic #47-Ubuntu SMP Fri May 2 23:30:00 UTC 2014 x86_64\n"))
195 | }
196 |
197 | if strings.Contains(payload.Value, "uname -a") {
198 | channel.Write([]byte("Linux ubuntu 3.13.0-24-generic #47-Ubuntu SMP Fri May 2 23:30:00 UTC 2014 x86_64 x86_64 x86_64 GNU/Linux\n"))
199 | }
200 | if strings.Contains(payload.Value, "whoami") {
201 | channel.Write([]byte("root\r"))
202 | }
203 | if strings.Contains(payload.Value, "id") {
204 | channel.Write([]byte("uid=0(root) gid=0(root) groups=0(root) context=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023\n"))
205 | }
206 |
207 | }
208 | }
209 | s.Request += fmt.Sprintf("%s %s\n", req.Type, strings.Trim(strconv.Quote(string(req.Payload)), `"`))
210 | }
211 | close(ch)
212 | }()
213 | <-ch
214 | defer channel.Close()
215 |
216 | defer func() { s.Data = strings.Trim(strconv.Quote(string(inBuffer.Bytes())), `"`) }()
217 |
218 | term := terminal.NewTerminal(channelLogger, "[root@ubuntu ~]# ")
219 |
220 | for {
221 | // get user input
222 | line, err := term.ReadLine()
223 | if err != nil {
224 | break
225 | }
226 | s.Shell += fmt.Sprintf("%s\n", strings.Trim(strconv.Quote(line), `"`))
227 | if len(line) > 0 {
228 | switch strings.Fields(line)[0] {
229 | case "exit":
230 | return
231 | case "env":
232 | term.Write([]byte("SHELL=/bin/bash\nUSER=root\nPATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\nPWD=/root\nLANG=en_US.UTF-8\nSHLVL=1\nHOME=/root\nLOGNAME=root"))
233 | case "ls":
234 | term.Write([]byte("Desktop Documents Downloads Music Pictures Public Templates Videos\n"))
235 | case "uname":
236 | if len(strings.Fields(line)) > 1 {
237 | switch strings.Fields(line)[1] {
238 | case "-a":
239 | term.Write([]byte("Linux ubuntu 3.13.0-24-generic #47-Ubuntu SMP Fri May 2 23:30:00 UTC 2014 x86_64 x86_64 x86_64 GNU/Linux\n"))
240 | case "-i":
241 | term.Write([]byte("x86_64\n"))
242 | case "-p":
243 | term.Write([]byte("x86_64\n"))
244 |
245 | case "-o":
246 | term.Write([]byte("GNU/Linux\n"))
247 | default:
248 | term.Write([]byte("x86_64\n"))
249 |
250 | }
251 |
252 | } else {
253 | term.Write([]byte("Linux\n"))
254 | }
255 | case "whoami":
256 | term.Write([]byte("root\n"))
257 | case "hostname":
258 | term.Write([]byte("ubuntu\n"))
259 |
260 | case "id":
261 | term.Write([]byte("uid=0(root) gid=0(root) groups=0(root) context=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023\n"))
262 | case "echo":
263 | if len(strings.Fields(line)) > 1 {
264 | term.Write([]byte(strings.Join(strings.Fields(line)[1:], " ") + "\n"))
265 | }
266 | case "pwd":
267 | term.Write([]byte("/root\n"))
268 | case "ps":
269 | term.Write([]byte("PID TTY TIME CMD\n1 ? 00:00:00 init\n2 ? 00:00:00 kthreadd\n3 ? 00:00:00 ksoftirqd/0\n5 ? 00:00:00 kworker/0:0H\n7 ? 00:00:00 rcu_sched\n8 ? 00:00:00 rcu_bh\n9 ? 00:00:00 migration/0\n10 ? 00:00:00 watchdog/0\n11 ? 00:00:00 cpuhp/0\n12 ? 00:00:00 kdevtmpfs\n13 ? 00:00:00 netns\n14 ? 00:00:00 khungtaskd\n15 ? 00:00:00 oom_reaper\n16 ? 00:00:00 writeback\n17 ? 00:00:00 kcompactd0\n18 ? 00:00:00 ksmd\n19 ? 00:00:00 khugepaged\n20 ? 00:00:00 crypto\n21 ? 00:00:00 kintegrityd\n22 ? 00:00:00 kblockd\n23 ? 00:00:00 ata_sff\n24 ? 00:00:00 md\n25 ? 00:00:00 edac-poller\n26 ? 00:00:00 devfreq_wq\n27 ? 00:00:00 watchdogd\n28 ? 00:00:00 kswapd0\n29 ? 00:00:00 bash\n"))
270 | case "ifconfig":
271 | term.Write([]byte("eth0: flags=4163 mtu 1500\n inet 10.45.14.41 netmask 255.255.255.0 broadcast 10.45.14.255\n ether 00:01:11:45:cd:88 txqueuelen 1000 (Ethernet)\n RX packets 69959146 bytes 4174629619 (4.7 GB)\n RX errors 0 dropped 0 overruns 0 frame 0\n TX packets 92871777 bytes 7929333630 (7.2 GB)\n TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0\n\nlo: flags=73 mtu 65536\n inet 127.0.0.1 netmask 255.0.0.0\n inet6 ::1 prefixlen 128 scopeid 0x10\n loop txqueuelen 1000 (Local Loopback)\n RX packets 324635 bytes 24156673 (24.1 MB)\n RX errors 0 dropped 488 overruns 0 frame 0\n TX packets 324635 bytes 24156673 (24.1 MB)\n TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0\n\n"))
272 | default:
273 |
274 | }
275 | }
276 | }
277 |
278 | }
279 |
--------------------------------------------------------------------------------
/pkg/emulation/tls/tls.go:
--------------------------------------------------------------------------------
1 | package tls
2 |
3 | import (
4 | "crypto/tls"
5 | "hachimi/pkg/config"
6 | "hachimi/pkg/types"
7 | "net"
8 | )
9 |
10 | type TlsServer struct {
11 | Conn net.Conn
12 | session *types.Session
13 | }
14 |
15 | func NewTlsServer(conn net.Conn, session *types.Session) *TlsServer {
16 | return &TlsServer{Conn: tls.Server(conn, config.TlsConfig), session: session}
17 | }
18 |
19 | func (t *TlsServer) Handle() error {
20 | return t.Conn.(*tls.Conn).Handshake()
21 | }
22 |
--------------------------------------------------------------------------------
/pkg/ingress/manager.go:
--------------------------------------------------------------------------------
1 | package ingress
2 |
3 | import (
4 | "context"
5 | "hachimi/pkg/config"
6 | "hachimi/pkg/emulation/session"
7 | "hachimi/pkg/utils"
8 | "log"
9 | "net"
10 | "strings"
11 | "sync"
12 | "time"
13 | )
14 |
15 | // ListenerManager manages multiple TCP and UDP listeners.
16 | type ListenerManager struct {
17 | tcpListeners []*TCPListener
18 | udpListeners []*UDPListener
19 | wg *sync.WaitGroup
20 | }
21 |
22 | var connLimiter *session.ConnectionLimiter
23 |
24 | // NewListenerManager creates a new ListenerManager instance.
25 | func NewListenerManager() *ListenerManager {
26 | connLimiter = session.NewConnectionLimiter(config.GetPotConfig().MaxSession)
27 | return &ListenerManager{
28 | wg: &sync.WaitGroup{},
29 | }
30 | }
31 |
32 | // AddTCPListener adds a new TCP listener to the manager.
33 | func (m *ListenerManager) AddTCPListener(listener *TCPListener) {
34 | m.tcpListeners = append(m.tcpListeners, listener)
35 | }
36 |
37 | // AddUDPListener adds a new UDP listener to the manager.
38 | func (m *ListenerManager) AddUDPListener(listener *UDPListener) {
39 | m.udpListeners = append(m.udpListeners, listener)
40 | }
41 |
42 | // StartAll starts all managed listeners.
43 | func (m *ListenerManager) StartAll(ctx context.Context) error {
44 | for _, tcpListener := range m.tcpListeners {
45 | m.wg.Add(1)
46 | go func(listener *TCPListener) {
47 | defer m.wg.Done()
48 | err := listener.Start(ctx, DefaultTCPHandler)
49 | if err != nil {
50 | // Log the error but continue starting other listeners
51 | DefaultErrorHandler(err)
52 | }
53 | }(tcpListener)
54 | }
55 |
56 | for _, udpListener := range m.udpListeners {
57 | m.wg.Add(1)
58 | go func(listener *UDPListener) {
59 | defer m.wg.Done()
60 | err := listener.Start(ctx, DefaultUDPHandler)
61 | if err != nil {
62 | DefaultErrorHandler(err)
63 | }
64 | }(udpListener)
65 | }
66 | return nil
67 | }
68 |
69 | // StopAll stops all managed listeners.
70 | func (m *ListenerManager) StopAll() {
71 | for _, tcpListener := range m.tcpListeners {
72 | tcpListener.Stop()
73 | }
74 |
75 | for _, udpListener := range m.udpListeners {
76 | udpListener.Stop()
77 | }
78 |
79 | m.wg.Wait()
80 | }
81 | func (m *ListenerManager) Wait() {
82 | m.wg.Wait()
83 | }
84 | func DefaultTCPHandler(conn *net.TCPConn) {
85 | if connLimiter != nil {
86 | ip, _, _ := net.SplitHostPort(conn.RemoteAddr().String())
87 | if !connLimiter.AllowConnection(ip) {
88 | conn.Close()
89 | return
90 | }
91 | }
92 | sess := session.NewSession(conn, nil)
93 | session.Distributor(conn, sess)
94 | sess.EndTime = time.Now()
95 | sess.Duration = int(sess.EndTime.Sub(sess.StartTime).Milliseconds())
96 | sess.Data = strings.Trim(utils.EscapeBytes(sess.GetOutBuffer().Bytes()), `"`)
97 | sess.GetOutBuffer().Reset()
98 | config.Logger.Log(sess)
99 | sess.Close()
100 |
101 | }
102 | func DefaultUDPHandler(conn *net.UDPConn, src *net.UDPAddr, dst *net.UDPAddr, buf []byte) {
103 | if connLimiter != nil && src != nil {
104 | ip, _, _ := net.SplitHostPort(src.String())
105 | if !connLimiter.AllowConnection(ip) {
106 | conn.Close()
107 | return
108 | }
109 | }
110 | sess := session.NewSession(conn, src)
111 | //session.Distributor(conn, sess)
112 | //TODO UDP 服务端
113 | if dst != nil {
114 | sess.DstIP = dst.IP.String()
115 | sess.DstPort = dst.Port
116 | }
117 |
118 | sess.EndTime = time.Now()
119 | sess.Duration = int(sess.EndTime.Sub(sess.StartTime).Milliseconds())
120 | sess.Data = utils.EscapeBytes(buf)
121 | config.Logger.Log(sess)
122 |
123 | }
124 |
125 | // DefaultErrorHandler handles errors during listener startup.
126 | func DefaultErrorHandler(err error) {
127 | if err != nil {
128 | log.Printf("Error starting listener: %v", err)
129 | }
130 | }
131 |
--------------------------------------------------------------------------------
/pkg/ingress/tcp_listener.go:
--------------------------------------------------------------------------------
1 | package ingress
2 |
3 | import (
4 | "context"
5 | "log"
6 | "net"
7 | "sync"
8 | )
9 |
10 | // TCPListener represents a managed TCP listener.
11 | type TCPListener struct {
12 | Host string
13 | Port int
14 | listener *net.TCPListener
15 | wg *sync.WaitGroup
16 | }
17 |
18 | // NewTCPListener creates a new TCPListener instance.
19 | func NewTCPListener(host string, port int) *TCPListener {
20 | return &TCPListener{
21 | Host: host,
22 | Port: port,
23 | wg: &sync.WaitGroup{},
24 | }
25 | }
26 |
27 | // Start begins listening for TCP connections on the specified address.
28 | func (t *TCPListener) Start(ctx context.Context, handler func(conn *net.TCPConn)) error {
29 | addr := &net.TCPAddr{
30 | IP: net.ParseIP(t.Host),
31 | Port: t.Port,
32 | }
33 | var err error
34 | t.listener, err = net.ListenTCP("tcp", addr)
35 | if err != nil {
36 | return err
37 | }
38 | file, err := t.listener.File()
39 | if err == nil {
40 | defer file.Close()
41 | // TransparentProxy
42 | err = TransparentProxy(file)
43 | if err != nil {
44 | log.Printf("Warning: Failed to set socket option (IP_TRANSPARENT/IP_RECVORIGDSTADDR): %s\n", err)
45 | log.Printf("Fallback to normal TCP listener. Full Port Forwarding is not available.\n")
46 | }
47 |
48 | } else {
49 | log.Printf("Warning: Failed to get socket file descriptor: %s\n", err)
50 | log.Printf("Fallback to normal TCP listener. Full Port Forwarding is not available.\n")
51 | }
52 |
53 | log.Printf("TCP listener started on %s", t.listener.Addr().String())
54 | t.wg.Add(1)
55 | go func() {
56 | defer t.wg.Done()
57 | for {
58 | select {
59 | case <-ctx.Done():
60 | log.Printf("TCP listener on %s is stopping", t.listener.Addr().String())
61 | return
62 | default:
63 | conn, err := t.listener.AcceptTCP()
64 | if err != nil {
65 | if ctx.Err() != nil {
66 | // Context canceled, listener stopped
67 | return
68 | }
69 | log.Printf("Error accepting TCP connection: %v", err)
70 | continue
71 | }
72 | // Handle the connection in a separate goroutine
73 | go handler(conn)
74 | }
75 | }
76 | }()
77 | return nil
78 | }
79 |
80 | // Stop gracefully stops the TCP listener.
81 | func (t *TCPListener) Stop() error {
82 | if t.listener != nil {
83 | err := t.listener.Close()
84 | t.wg.Wait()
85 | return err
86 | }
87 | return nil
88 | }
89 |
--------------------------------------------------------------------------------
/pkg/ingress/transparent_proxy_linux.go:
--------------------------------------------------------------------------------
1 | //go:build linux
2 | // +build linux
3 |
4 | package ingress
5 |
6 | import (
7 | "bytes"
8 | "encoding/binary"
9 | "errors"
10 | "net"
11 | "os"
12 | "syscall"
13 | "unsafe"
14 | )
15 |
16 | func TransparentProxy(file *os.File) error {
17 | fd := int(file.Fd())
18 | if err := syscall.SetsockoptInt(fd, syscall.SOL_IP, syscall.IP_TRANSPARENT, 1); err != nil {
19 | return err
20 | }
21 | if err := syscall.SetsockoptInt(fd, syscall.SOL_IP, syscall.IP_RECVORIGDSTADDR, 1); err != nil {
22 | return err
23 | }
24 | return nil
25 | }
26 |
27 | // 一些在 Go 中尚未定义/导出的常量,手动补齐
28 | const (
29 | // IP_RECVORIGDSTADDR IPv4 原始目的地址常量
30 | IP_RECVORIGDSTADDR = 0x14
31 |
32 | // IPV6_RECVORIGDSTADDR IPv6 原始目的地址常量
33 | IPV6_RECVORIGDSTADDR = 0x4a
34 | )
35 |
36 | // getOrigDst 提取 UDP 原始目的地址
37 | func getOrigDst(oob []byte, oobn int) (*net.UDPAddr, error) {
38 | msgs, err := syscall.ParseSocketControlMessage(oob[:oobn])
39 | if err != nil {
40 | return nil, err
41 | }
42 |
43 | var origDst *net.UDPAddr
44 |
45 | for _, msg := range msgs {
46 | switch {
47 | // ===================== IPv4 =====================
48 | case msg.Header.Level == syscall.SOL_IP && msg.Header.Type == IP_RECVORIGDSTADDR:
49 | origDstRaw := &syscall.RawSockaddrInet4{}
50 | if err := binary.Read(bytes.NewReader(msg.Data), binary.LittleEndian, origDstRaw); err != nil {
51 | return nil, err
52 | }
53 | if origDstRaw.Family != syscall.AF_INET {
54 | return nil, errors.New("unsupported family for IPv4")
55 | }
56 |
57 | pp := (*syscall.RawSockaddrInet4)(unsafe.Pointer(origDstRaw))
58 | p := (*[2]byte)(unsafe.Pointer(&pp.Port))
59 |
60 | origDst = &net.UDPAddr{
61 | IP: net.IPv4(pp.Addr[0], pp.Addr[1], pp.Addr[2], pp.Addr[3]),
62 | Port: int(p[0])<<8 + int(p[1]),
63 | }
64 |
65 | // ===================== IPv6 (TProxy) =====================
66 | case msg.Header.Level == syscall.SOL_IPV6 && msg.Header.Type == IPV6_RECVORIGDSTADDR:
67 | origDstRaw := &syscall.RawSockaddrInet6{}
68 | if err := binary.Read(bytes.NewReader(msg.Data), binary.LittleEndian, origDstRaw); err != nil {
69 | return nil, err
70 | }
71 | if origDstRaw.Family != syscall.AF_INET6 {
72 | return nil, errors.New("unsupported family for IPv6")
73 | }
74 |
75 | pp := (*syscall.RawSockaddrInet6)(unsafe.Pointer(origDstRaw))
76 | p := (*[2]byte)(unsafe.Pointer(&pp.Port))
77 |
78 | origDst = &net.UDPAddr{
79 | IP: net.IP(pp.Addr[:]),
80 | Port: int(p[0])<<8 + int(p[1]),
81 | Zone: zoneFromIndex(pp.Scope_id),
82 | }
83 | }
84 | }
85 |
86 | if origDst == nil {
87 | return nil, errors.New("original destination not found in control messages")
88 | }
89 |
90 | return origDst, nil
91 | }
92 |
93 | // zoneFromIndex 将作用域 ID 转换为网卡名称,比如 "eth0"。
94 | func zoneFromIndex(index uint32) string {
95 | ifi, err := net.InterfaceByIndex(int(index))
96 | if err != nil {
97 | return ""
98 | }
99 | return ifi.Name
100 | }
101 |
--------------------------------------------------------------------------------
/pkg/ingress/transparent_proxy_other.go:
--------------------------------------------------------------------------------
1 | //go:build !linux
2 | // +build !linux
3 |
4 | package ingress
5 |
6 | import (
7 | "errors"
8 | "net"
9 | "os"
10 | )
11 |
12 | func TransparentProxy(file *os.File) error {
13 | return errors.New("TransparentProxy not supported on this platform")
14 | }
15 | func getOrigDst(oob []byte, oobn int) (*net.UDPAddr, error) {
16 | return nil, errors.New("getOrigDst not supported on this platform")
17 | }
18 |
--------------------------------------------------------------------------------
/pkg/ingress/udp_listener.go:
--------------------------------------------------------------------------------
1 | package ingress
2 |
3 | import (
4 | "context"
5 | "hachimi/pkg/config"
6 | "log"
7 | "net"
8 | "sync"
9 | )
10 |
11 | // UDPListener represents a managed UDP listener.
12 | type UDPListener struct {
13 | Host string
14 | Port int
15 | conn *net.UDPConn
16 | wg *sync.WaitGroup
17 | transport bool
18 | }
19 |
20 | // NewUDPListener creates a new UDPListener instance.
21 | func NewUDPListener(host string, port int) *UDPListener {
22 | return &UDPListener{
23 | Host: host,
24 | Port: port,
25 | wg: &sync.WaitGroup{},
26 | }
27 | }
28 |
29 | // Start begins listening for UDP packets on the specified address.
30 | func (u *UDPListener) Start(ctx context.Context, handler func(*net.UDPConn, *net.UDPAddr, *net.UDPAddr, []byte)) error {
31 | addr := &net.UDPAddr{
32 | IP: net.ParseIP(u.Host),
33 | Port: u.Port,
34 | }
35 | var err error
36 | u.conn, err = net.ListenUDP("udp", addr)
37 | if err != nil {
38 | return err
39 | }
40 | file, err := u.conn.File()
41 | if err == nil {
42 | defer file.Close()
43 | // TransparentProxy
44 | err = TransparentProxy(file)
45 | if err != nil {
46 | log.Printf("Warning: Failed to set socket option (IP_TRANSPARENT/IP_RECVORIGDSTADDR): %s\n", err)
47 | log.Printf("Fallback to normal UDP listener. Full Port Forwarding is not available.\n")
48 | } else {
49 | u.transport = true
50 | }
51 | } else {
52 | log.Printf("Warning: Failed to get socket file descriptor: %s\n", err)
53 | log.Printf("Fallback to normal UDP listener. Full Port Forwarding is not available.\n")
54 | }
55 |
56 | u.wg.Add(1)
57 | go func() {
58 | defer u.wg.Done()
59 | buf := make([]byte, 65535)
60 | oob := make([]byte, 2048)
61 | for {
62 | select {
63 | case <-ctx.Done():
64 | return
65 | default:
66 | n, oobN, _, src, err := u.conn.ReadMsgUDP(buf, oob)
67 | if err != nil {
68 | log.Printf("Failed to read UDP packet: %s\n", err)
69 | continue
70 | }
71 | var dst *net.UDPAddr
72 | if u.transport {
73 | origDst, err := getOrigDst(oob, oobN)
74 | if err == nil {
75 | dst = origDst
76 | } else {
77 | if config.GetPotConfig().Debug {
78 | log.Printf("Failed to get original destination: %s\n", err)
79 | }
80 | }
81 | }
82 |
83 | handler(u.conn, src, dst, buf[:n])
84 | }
85 | }
86 | }()
87 | return nil
88 |
89 | }
90 |
91 | // Stop gracefully stops the UDP listener.
92 | func (u *UDPListener) Stop() error {
93 | if u.conn != nil {
94 | err := u.conn.Close()
95 | u.wg.Wait()
96 | return err
97 | }
98 | return nil
99 | }
100 |
--------------------------------------------------------------------------------
/pkg/logger/jsonl.go:
--------------------------------------------------------------------------------
1 | package logger
2 |
3 | import (
4 | "encoding/json"
5 | "hachimi/pkg/types"
6 | "io"
7 | "sync"
8 | "time"
9 | )
10 |
11 | type JSONLLogger struct {
12 | logChan chan Loggable
13 | writer io.Writer
14 | wg sync.WaitGroup
15 | buffer []Loggable
16 | maxSize int
17 | mu sync.Mutex
18 | nodeName string
19 | }
20 |
21 | // NewJSONLLogger 创建 JSONLLogger
22 | func NewJSONLLogger(output io.Writer, bufferSize int, nodeName string) *JSONLLogger {
23 | logger := &JSONLLogger{
24 | logChan: make(chan Loggable, 100),
25 | writer: output,
26 | maxSize: bufferSize,
27 | buffer: make([]Loggable, 0, bufferSize),
28 | nodeName: nodeName,
29 | }
30 | logger.wg.Add(1)
31 | go logger.processLogs()
32 | return logger
33 | }
34 |
35 | func (j *JSONLLogger) processLogs() {
36 | defer j.wg.Done()
37 |
38 | ticker := time.NewTicker(1 * time.Second) // 每 1 秒强制写入一次
39 | defer ticker.Stop()
40 |
41 | for {
42 | select {
43 | case log, ok := <-j.logChan:
44 | if !ok {
45 | // 通道关闭,写入剩余日志
46 | j.mu.Lock()
47 | j.Flush()
48 | j.mu.Unlock()
49 | return
50 | }
51 | // 收到新日志,加入缓冲区
52 | j.mu.Lock()
53 | j.buffer = append(j.buffer, log)
54 | // 如果缓冲区已满,触发写入
55 | if len(j.buffer) >= j.maxSize {
56 | j.Flush()
57 | }
58 | j.mu.Unlock()
59 | case <-ticker.C:
60 | // 定时器触发,写入缓冲区中的日志
61 | j.mu.Lock()
62 | j.Flush()
63 | j.mu.Unlock()
64 | }
65 | }
66 | }
67 |
68 | func (j *JSONLLogger) Flush() {
69 | for _, log := range j.buffer {
70 | jsonData, _ := json.Marshal(types.HoneyData{Type: log.Type(), Data: log, Time: time.Now().Unix(), NodeName: j.nodeName})
71 | j.writer.Write(append(jsonData, '\n'))
72 | }
73 | j.buffer = j.buffer[:0]
74 | }
75 |
76 | func (j *JSONLLogger) Log(data Loggable) error {
77 | j.logChan <- data
78 | return nil
79 | }
80 |
81 | func (j *JSONLLogger) Close() error {
82 | close(j.logChan)
83 | j.wg.Wait()
84 | // 判断 writer 是否是文件
85 | if closer, ok := j.writer.(io.Closer); ok {
86 | return closer.Close()
87 | }
88 | return nil
89 | }
90 |
--------------------------------------------------------------------------------
/pkg/logger/logger.go:
--------------------------------------------------------------------------------
1 | package logger
2 |
3 | // Logger 通用日志接口
4 | type Logger interface {
5 | Log(data Loggable) error
6 | Flush()
7 | Close() error
8 | }
9 | type Loggable interface {
10 | Type() string // 返回数据库表名
11 | }
12 |
--------------------------------------------------------------------------------
/pkg/logger/nsq.go:
--------------------------------------------------------------------------------
1 | package logger
2 |
3 | import (
4 | "encoding/json"
5 | "github.com/nsqio/go-nsq"
6 | "hachimi/pkg/types"
7 | "log"
8 | "sync"
9 | "time"
10 | )
11 |
12 | type NSQLogger struct {
13 | logChan chan Loggable
14 | producer *nsq.Producer
15 | topic string
16 | wg sync.WaitGroup
17 | buffer []Loggable
18 | bufSize int
19 | mu sync.Mutex
20 | nodeName string
21 | }
22 |
23 | // NewNSQLogger 创建 NSQLogger
24 | func NewNSQLogger(producer *nsq.Producer, topic string, bufSize int, nodeName string) (*NSQLogger, error) {
25 | logger := &NSQLogger{
26 | logChan: make(chan Loggable, 100),
27 | producer: producer,
28 | topic: topic,
29 | bufSize: bufSize,
30 | buffer: make([]Loggable, 0, bufSize),
31 | nodeName: nodeName,
32 | }
33 | logger.wg.Add(1)
34 | go logger.processLogs()
35 | return logger, nil
36 | }
37 |
38 | func (o *NSQLogger) processLogs() {
39 | defer o.wg.Done()
40 | ticker := time.NewTicker(1 * time.Second) // 每 1 秒强制写入一次
41 | defer ticker.Stop()
42 | for {
43 | select {
44 | case log, ok := <-o.logChan:
45 | if !ok {
46 | // 通道关闭,写入剩余日志
47 | o.mu.Lock()
48 | o.Flush()
49 | o.mu.Unlock()
50 | return
51 | }
52 | // 收到新日志,加入缓冲区
53 | o.mu.Lock()
54 | o.buffer = append(o.buffer, log)
55 | // 如果缓冲区已满,触发写入
56 | if len(o.buffer) >= o.bufSize {
57 | o.Flush()
58 | }
59 | o.mu.Unlock()
60 | case <-ticker.C:
61 | // 定时器触发,写入缓冲区中的日志
62 | o.mu.Lock()
63 | o.Flush()
64 | o.mu.Unlock()
65 | }
66 | }
67 |
68 | // Flush remaining logs
69 | o.mu.Lock()
70 | o.Flush()
71 | o.mu.Unlock()
72 | }
73 |
74 | func (o *NSQLogger) Flush() {
75 | if len(o.buffer) == 0 {
76 | return
77 | }
78 | var buf [][]byte
79 | for _, logData := range o.buffer {
80 | jsonData, _ := json.Marshal(types.HoneyData{Type: logData.Type(), Data: logData, Time: time.Now().Unix(), NodeName: o.nodeName})
81 | buf = append(buf, jsonData)
82 | }
83 | o.buffer = o.buffer[:0]
84 | err := o.producer.MultiPublish(o.topic, buf)
85 | //高延迟时 消息队列可能阻塞 开一个新线程避免阻塞请求 一直失败可能会爆内存?
86 | //发送失败 nsq客户端会一直存放在内存中 等待重试
87 | if err != nil {
88 | log.Println(err)
89 | }
90 | }
91 |
92 | func (o *NSQLogger) Log(data Loggable) error {
93 | o.logChan <- data
94 | return nil
95 | }
96 |
97 | func (o *NSQLogger) Close() error {
98 | close(o.logChan)
99 | o.wg.Wait()
100 | o.producer.Stop()
101 | return nil
102 | }
103 |
--------------------------------------------------------------------------------
/pkg/mq/utils.go:
--------------------------------------------------------------------------------
1 | package mq
2 |
3 | import (
4 | "crypto/tls"
5 | "crypto/x509"
6 | "github.com/nsqio/go-nsq"
7 | )
8 |
9 | func NewNsqConsumer(Topic string, Channel string, AuthSecret string, Compression bool, CompressionLevel int, Tls bool, EnableTlsVerify bool, ClientCertPath string, ClientKeyPath string, CaCertPath string) (*nsq.Consumer, error) {
10 | config, err := NewNsqConfig(AuthSecret, Compression, CompressionLevel, Tls, EnableTlsVerify, ClientCertPath, ClientKeyPath, CaCertPath)
11 | if err != nil {
12 | return nil, err
13 | }
14 | c, err := nsq.NewConsumer(Topic, Channel, config)
15 | if err != nil {
16 | return nil, err
17 | }
18 | return c, nil
19 | }
20 | func NewNsqProducer(Host string, AuthSecret string, Compression bool, CompressionLevel int, Tls bool, EnableTlsVerify bool, ClientCertPath string, ClientKeyPath string, CaCertPath string) (*nsq.Producer, error) {
21 | config, err := NewNsqConfig(AuthSecret, Compression, CompressionLevel, Tls, EnableTlsVerify, ClientCertPath, ClientKeyPath, CaCertPath)
22 | if err != nil {
23 | return nil, err
24 | }
25 | p, err := nsq.NewProducer(Host, config)
26 | if err != nil {
27 | return nil, err
28 | }
29 | return p, nil
30 | }
31 |
32 | func NewNsqConfig(AuthSecret string, Compression bool, CompressionLevel int, Tls bool, EnableTlsVerify bool, ClientCertPath string, ClientKeyPath string, CaCertPath string) (*nsq.Config, error) {
33 | config := nsq.NewConfig()
34 | //config.UserAgent = "hachimi"
35 | //config.TlsConfig //TODO
36 | config.AuthSecret = AuthSecret
37 | config.Deflate = Compression
38 | var tlsConfig *tls.Config
39 | if Tls {
40 | tlsConfig = &tls.Config{
41 | InsecureSkipVerify: true,
42 | }
43 | if EnableTlsVerify {
44 | tlsConfig.InsecureSkipVerify = false
45 | }
46 | if ClientCertPath != "" && ClientKeyPath != "" {
47 | cert, err := tls.LoadX509KeyPair(ClientCertPath, ClientKeyPath)
48 | if err != nil {
49 | return nil, err
50 | }
51 | tlsConfig.Certificates = []tls.Certificate{cert}
52 | }
53 | if CaCertPath != "" {
54 | caCert, err := tls.LoadX509KeyPair(CaCertPath, CaCertPath)
55 | if err != nil {
56 | return nil, err
57 | }
58 | caCertPool, err := x509.SystemCertPool()
59 | if err != nil {
60 | caCertPool = x509.NewCertPool()
61 | }
62 | for i := range caCert.Certificate {
63 | caCertPool.AppendCertsFromPEM(caCert.Certificate[i])
64 | }
65 | tlsConfig.RootCAs = caCertPool
66 | }
67 | }
68 |
69 | if Compression && CompressionLevel > 0 && CompressionLevel <= 9 {
70 | config.DeflateLevel = CompressionLevel
71 | }
72 | config.TlsConfig = tlsConfig
73 |
74 | return config, nil
75 | }
76 |
--------------------------------------------------------------------------------
/pkg/plugin/registry.go:
--------------------------------------------------------------------------------
1 | package plugin
2 |
--------------------------------------------------------------------------------
/pkg/plugin/symbols/github_com-fasthttp-router.go:
--------------------------------------------------------------------------------
1 | // Code generated by 'yaegi extract github.com/fasthttp/router'. DO NOT EDIT.
2 |
3 | package symbols
4 |
5 | import (
6 | "github.com/fasthttp/router"
7 | "go/constant"
8 | "go/token"
9 | "reflect"
10 | )
11 |
12 | func init() {
13 | Symbols["github.com/fasthttp/router/router"] = map[string]reflect.Value{
14 | // function, constant and variable definitions
15 | "MatchedRoutePathParam": reflect.ValueOf(&router.MatchedRoutePathParam).Elem(),
16 | "MethodWild": reflect.ValueOf(constant.MakeFromLiteral("\"*\"", token.STRING, 0)),
17 | "New": reflect.ValueOf(router.New),
18 |
19 | // type definitions
20 | "Group": reflect.ValueOf((*router.Group)(nil)),
21 | "Router": reflect.ValueOf((*router.Router)(nil)),
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/pkg/plugin/symbols/hachimi-pkg-plugin.go:
--------------------------------------------------------------------------------
1 | // Code generated by 'yaegi extract hachimi/pkg/plugin'. DO NOT EDIT.
2 |
3 | //go:build go1.22
4 | // +build go1.22
5 |
6 | package symbols
7 |
8 | import (
9 | "hachimi/pkg/plugin"
10 | "reflect"
11 | )
12 |
13 | func init() {
14 | Symbols["hachimi/pkg/plugin/plugin"] = map[string]reflect.Value{
15 | // function, constant and variable definitions
16 | "RequestHandler": reflect.ValueOf(plugin.RequestHandler),
17 | "ServeIndex": reflect.ValueOf(&plugin.ServeIndex).Elem(),
18 | "ServeList": reflect.ValueOf(&plugin.ServeList).Elem(),
19 | "TitleIndex": reflect.ValueOf(&plugin.TitleIndex).Elem(),
20 | "TitleList": reflect.ValueOf(&plugin.TitleList).Elem(),
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/pkg/plugin/symbols/hachimi-pkg-types.go:
--------------------------------------------------------------------------------
1 | // Code generated by 'yaegi extract hachimi/pkg/types'. DO NOT EDIT.
2 |
3 | //go:build go1.22
4 | // +build go1.22
5 |
6 | package symbols
7 |
8 | import (
9 | "hachimi/pkg/types"
10 | "reflect"
11 | )
12 |
13 | func init() {
14 | Symbols["hachimi/pkg/types/types"] = map[string]reflect.Value{
15 | // type definitions
16 | "Http": reflect.ValueOf((*types.Http)(nil)),
17 | "Session": reflect.ValueOf((*types.Session)(nil)),
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/pkg/plugin/symbols/symbols.go:
--------------------------------------------------------------------------------
1 | package symbols
2 |
3 | import "reflect"
4 |
5 | //go:generate go install github.com/traefik/yaegi/cmd/yaegi@latest
6 | //go:generate yaegi extract github.com/valyala/fasthttp
7 | //go:generate yaegi extract github.com/fasthttp/router
8 | //go:generate yaegi extract hachimi/pkg/plugin
9 | //go:generate yaegi extract hachimi/pkg/types
10 |
11 | var Symbols = map[string]map[string]reflect.Value{}
12 |
--------------------------------------------------------------------------------
/pkg/rules/engine.go:
--------------------------------------------------------------------------------
1 | package rules
2 |
3 | import (
4 | "github.com/expr-lang/expr"
5 | "github.com/fsnotify/fsnotify"
6 | "github.com/pelletier/go-toml"
7 | "log"
8 | "os"
9 | "path/filepath"
10 | )
11 |
12 | func EvaluateRule(rule *Rule, data map[string]interface{}) (*[]string, error) {
13 | output, err := expr.Run(rule.program, map[string]any{
14 | "data": data,
15 | "unquote": unquote,
16 | "indexOfH": indexOfH,
17 | "lastIndexOfH": lastIndexOfH,
18 | "hasPrefixH": hasPrefixH,
19 | "hasSuffixH": hasSuffixH,
20 | "compareBytesH": compareBytesH,
21 | })
22 | if err != nil {
23 | return nil, err
24 | }
25 | if output.(bool) {
26 | return &rule.Tags, nil
27 | }
28 | return nil, nil
29 |
30 | }
31 |
32 | func loadRulesFromFolder(folderPath string) {
33 | filepath.Walk(folderPath, func(path string, info os.FileInfo, err error) error {
34 | if err != nil {
35 | log.Println("Error walking through the folder:", err)
36 | return nil
37 | }
38 | if info.IsDir() {
39 | return nil
40 | }
41 |
42 | if filepath.Ext(path) == ".toml" {
43 | // 解析Toml文件
44 | rulesFromFile, err := parseTomlFile(path)
45 | if err != nil {
46 | log.Println("Error parsing Toml file:", err)
47 | } else {
48 | // 合并规则到数组
49 | updateRules(rulesFromFile)
50 | }
51 | }
52 | return nil
53 | })
54 | }
55 |
56 | func parseTomlFile(filePath string) ([]Rule, error) {
57 | config, err := toml.LoadFile(filePath)
58 | if err != nil {
59 | return nil, err
60 | }
61 | var rulesFromFile []Rule
62 | r := config.Get("rule")
63 | if r != nil {
64 | ruleArray := r.([]*toml.Tree)
65 | for _, ruleTree := range ruleArray {
66 | var rule Rule
67 | if err := ruleTree.Unmarshal(&rule); err != nil {
68 | return nil, err
69 | }
70 | rulesFromFile = append(rulesFromFile, rule)
71 | }
72 | }
73 |
74 | return rulesFromFile, nil
75 | }
76 | func updateRules(newRules []Rule) {
77 | rulesMutex.Lock()
78 | defer rulesMutex.Unlock()
79 |
80 | for _, newRule := range newRules {
81 | found := false
82 | for i, existingRule := range rules {
83 | if existingRule.Name == newRule.Name {
84 | if existingRule.Rule != newRule.Rule {
85 |
86 | program, err := expr.Compile(newRule.Rule)
87 | if err != nil {
88 | log.Println(newRule.Name, err)
89 | found = true
90 | break
91 | }
92 | newRule.program = program
93 | rules[i] = newRule
94 |
95 | }
96 | found = true
97 | break
98 | }
99 | }
100 |
101 | if !found {
102 | program, err := expr.Compile(newRule.Rule)
103 | if err != nil {
104 | log.Println(newRule.Name, err)
105 | continue
106 | }
107 | newRule.program = program
108 | rules = append(rules, newRule)
109 |
110 | }
111 | }
112 | }
113 |
114 | func watchConfigFolder(folderPath string) {
115 | watcher, err := fsnotify.NewWatcher()
116 | if err != nil {
117 | log.Println("Error creating watcher:", err)
118 | return
119 | }
120 | defer watcher.Close()
121 |
122 | // 添加文件夹监控
123 | err = filepath.Walk(folderPath, func(path string, info os.FileInfo, err error) error {
124 | if err != nil {
125 | return err
126 | }
127 | if info.IsDir() {
128 | return watcher.Add(path)
129 | }
130 | return nil
131 | })
132 | if err != nil {
133 | log.Println("Error adding folder to watcher:", err)
134 | return
135 | }
136 |
137 | for {
138 | select {
139 | case event, ok := <-watcher.Events:
140 | if !ok {
141 | return
142 | }
143 | if event.Op&fsnotify.Write == fsnotify.Write || event.Op&fsnotify.Create == fsnotify.Create {
144 | // 文件写入或创建事件,重新加载规则
145 | rulesFromFile, err := parseTomlFile(event.Name)
146 | if err != nil {
147 | log.Println("Error parsing Toml file:", err)
148 | } else {
149 | // 合并规则到数组
150 | updateRules(rulesFromFile)
151 | }
152 | }
153 | case err, ok := <-watcher.Errors:
154 | if !ok {
155 | return
156 | }
157 | log.Println("Error in file watcher:", err)
158 | }
159 | }
160 | }
161 |
--------------------------------------------------------------------------------
/pkg/rules/rule.go:
--------------------------------------------------------------------------------
1 | package rules
2 |
3 | import (
4 | "github.com/expr-lang/expr/vm"
5 | "sync"
6 | )
7 |
8 | type Rule struct {
9 | Name string `toml:"name"`
10 | Rule string `toml:"rule"`
11 | Tags []string `toml:"tags"`
12 | Version int `toml:"version"`
13 | program *vm.Program
14 | }
15 |
16 | var rulesMutex sync.RWMutex
17 | var rules []Rule
18 |
--------------------------------------------------------------------------------
/pkg/rules/util.go:
--------------------------------------------------------------------------------
1 | package rules
2 |
3 | import (
4 | "crypto/sha1"
5 | "encoding/hex"
6 | "strconv"
7 | "strings"
8 | )
9 |
10 | func boolToUint8(b bool) uint8 {
11 | if b {
12 | return 1
13 | }
14 | return 0
15 | }
16 |
17 | func calculateSHA1(input string) string {
18 |
19 | hasher := sha1.New()
20 | hasher.Write([]byte(input))
21 | hashSum := hasher.Sum(nil)
22 |
23 | hashString := hex.EncodeToString(hashSum)
24 | return hashString
25 | }
26 | func unquote(data string) []byte {
27 | s, err := strconv.Unquote(`"` + data + `"`)
28 | if err != nil {
29 | return []byte(data)
30 | }
31 | return []byte(s)
32 | }
33 |
34 | func indexOfH(data []byte, hexString string) int {
35 | hexBytes, err := ParseHex(hexString)
36 | if err != nil {
37 | return -1
38 | }
39 |
40 | for i := 0; i <= len(data)-len(hexBytes); i++ {
41 | if compareBytes(data[i:i+len(hexBytes)], hexBytes) {
42 | return i
43 | }
44 | }
45 |
46 | return -1
47 | }
48 |
49 | func lastIndexOfH(data []byte, hexString string) int {
50 | hexBytes, err := ParseHex(hexString)
51 | if err != nil {
52 | return -1
53 | }
54 |
55 | for i := len(data) - len(hexBytes); i >= 0; i-- {
56 | if compareBytes(data[i:i+len(hexBytes)], hexBytes) {
57 | return i
58 | }
59 | }
60 |
61 | return -1
62 | }
63 |
64 | func hasPrefixH(data []byte, hexString string) bool {
65 | hexBytes, err := ParseHex(hexString)
66 | if err != nil || len(data) < len(hexBytes) {
67 | return false
68 | }
69 |
70 | return compareBytes(data[:len(hexBytes)], hexBytes)
71 | }
72 |
73 | func hasSuffixH(data []byte, hexString string) bool {
74 | hexBytes, err := ParseHex(hexString)
75 | if err != nil || len(data) < len(hexBytes) {
76 | return false
77 | }
78 |
79 | return compareBytes(data[len(data)-len(hexBytes):], hexBytes)
80 | }
81 | func compareBytesH(a []byte, hexString string) bool {
82 | b, err := ParseHex(hexString)
83 | if err != nil {
84 | return false
85 | }
86 |
87 | if len(a) != len(b) {
88 | return false
89 | }
90 |
91 | for i := range a {
92 | if a[i] != b[i] {
93 | return false
94 | }
95 | }
96 |
97 | return true
98 | }
99 | func compareBytes(a, b []byte) bool {
100 | if len(a) != len(b) {
101 | return false
102 | }
103 |
104 | for i := range a {
105 | if a[i] != b[i] {
106 | return false
107 | }
108 | }
109 |
110 | return true
111 | }
112 |
113 | func ParseHex(hexString string) ([]byte, error) {
114 | hexString = strings.ReplaceAll(hexString, " ", "")
115 | hexString = strings.ReplaceAll(hexString, "\\x", "")
116 | // Decode the hexadecimal string
117 | decoded, err := hex.DecodeString(hexString)
118 | if err != nil {
119 | return nil, err
120 | }
121 |
122 | return decoded, nil
123 | }
124 |
--------------------------------------------------------------------------------
/pkg/types/handler.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | import (
4 | "net"
5 | )
6 |
7 | type ProtocolHandler interface {
8 | Handle(conn net.Conn, session *Session)
9 | }
10 |
--------------------------------------------------------------------------------
/pkg/types/http.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | import (
4 | "bytes"
5 | "time"
6 | )
7 |
8 | //TODO JA3
9 |
10 | type Http struct {
11 | Session
12 | ID string `json:"id"`
13 | SessionID string `json:"session_id"`
14 | StartTime time.Time `json:"start_time"`
15 | EndTime time.Time `json:"end_time"`
16 | Header map[string]string `json:"header"`
17 | UriParam map[string]string `json:"uri_param"`
18 | BodyParam map[string]string `json:"body_param"`
19 | Method string `json:"method"`
20 | Path string `json:"path"`
21 | UA string `json:"ua"`
22 | Host string `json:"host"`
23 | RawHeader string `json:"raw_header"`
24 | Body string `json:"body"`
25 | Service string `json:"service"`
26 | //经过的时间 ms
27 | Duration int `json:"duration"`
28 | inBuffer *bytes.Buffer
29 | outBuffer *bytes.Buffer
30 | }
31 |
32 | func (h Http) Type() string {
33 | return "http_session"
34 | }
35 |
--------------------------------------------------------------------------------
/pkg/types/log.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | type HoneyData struct {
4 | // Type 日志类型
5 | Type string `json:"type"`
6 | // Data 日志内容
7 | Data interface{} `json:"data"`
8 | // Time 日志时间
9 | Time int64 `json:"time"`
10 | // Error 日志错误 可空
11 | Error error `json:"error"`
12 | // NodeName 节点名称
13 | NodeName string `json:"nodeName"`
14 | // NodeIP 节点公网IP
15 | //NodeIP string `json:"nodeIP"`
16 | }
17 |
--------------------------------------------------------------------------------
/pkg/types/redis.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | import (
4 | "time"
5 | )
6 |
7 | type RedisSession struct {
8 | Session
9 | ID string `gorm:"primaryKey" json:"id"`
10 | SessionID string `gorm:"index" json:"session_id"`
11 | Protocol string `gorm:"index" json:"protocol"`
12 | StartTime time.Time `gorm:"index" json:"start_time"`
13 | EndTime time.Time `gorm:"index" json:"end_time"`
14 | Duration int `json:"duration"`
15 | Error bool `json:"error"`
16 | Service string `json:"service"`
17 | Data string `json:"data"`
18 | User string `json:"user"`
19 | PassWord string `json:"password"`
20 | }
21 |
22 | func (r RedisSession) Type() string {
23 | return "redis_session"
24 | }
25 |
--------------------------------------------------------------------------------
/pkg/types/session.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | import (
4 | "bytes"
5 | "net"
6 | "time"
7 | )
8 |
9 | // TODO JA3
10 | type Session struct {
11 | ID string `json:"id"`
12 | Protocol string `json:"protocol"`
13 | connection interface{}
14 | StartTime time.Time `json:"start_time"`
15 | EndTime time.Time `json:"end_time"`
16 | SrcIP string `json:"src_ip"`
17 | SrcPort int `json:"src_port"`
18 | DstIP string `json:"dst_ip"`
19 | DstPort int `json:"dst_port"`
20 | IsTls bool `json:"is_tls"`
21 | IsGmTls bool `json:"is_gm_tls"`
22 | IsHandled bool `json:"is_handled"`
23 | IsHttp bool `json:"is_http"`
24 | Data string `json:"data"`
25 | Service string `json:"service"`
26 | //经过的时间 ms
27 | Duration int `json:"duration"`
28 | inBuffer *bytes.Buffer
29 | outBuffer *bytes.Buffer
30 | }
31 |
32 | func (s *Session) SetConnection(conn interface{}) {
33 | s.connection = conn
34 |
35 | }
36 |
37 | func (s *Session) SetInBuffer(buffer *bytes.Buffer) {
38 | s.inBuffer = buffer
39 | }
40 | func (s *Session) SetOutBuffer(buffer *bytes.Buffer) {
41 | s.outBuffer = buffer
42 | }
43 | func (s *Session) GetOutBuffer() *bytes.Buffer {
44 | return s.outBuffer
45 | }
46 |
47 | // Close 关闭原始连接
48 | func (s *Session) Close() {
49 | if s.connection != nil {
50 | //只关闭TCP UDP不需要关闭 UDP是无状态的 关闭就会停止监听
51 | if conn, ok := s.connection.(*net.TCPConn); ok {
52 | conn.Close()
53 | }
54 | }
55 | }
56 | func (s Session) Type() string {
57 | return "session"
58 | }
59 |
--------------------------------------------------------------------------------
/pkg/types/ssh.go:
--------------------------------------------------------------------------------
1 | package types
2 |
--------------------------------------------------------------------------------
/pkg/utils/utils.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "crypto/sha1"
5 | "encoding/hex"
6 | "errors"
7 | "fmt"
8 | "io"
9 | "net"
10 | "reflect"
11 | "strings"
12 | "time"
13 | )
14 |
15 | type NewConn struct {
16 | net.Conn
17 | Reader io.Reader
18 | Writer io.Writer
19 | Counter int64
20 | limit int64
21 | }
22 |
23 | func NewLoggedConn(conn net.Conn, reader io.Reader, logWriter io.Writer, limit int64) *NewConn {
24 | if logWriter == nil {
25 | logWriter = io.Discard // 默认丢弃所有日志
26 | }
27 | return &NewConn{
28 | Conn: conn,
29 | Reader: io.TeeReader(reader, logWriter), // 记录读取流量
30 | Writer: conn,
31 | limit: limit,
32 | }
33 | }
34 |
35 | func (c *NewConn) Read(p []byte) (int, error) {
36 | // 全局读取限制
37 | if c.limit != 0 && c.Counter >= c.limit {
38 | //EOF
39 | return 0, io.EOF
40 | }
41 | n, err := c.Reader.Read(p)
42 | c.Counter += int64(n)
43 | return n, err
44 | }
45 |
46 | func (c *NewConn) Close() error {
47 | // 所有的连接关闭在最外层控制 防止被其他组件意外关闭
48 | return nil
49 | }
50 |
51 | // EscapeBytes escapes a byte slice to a string, using C-style escape sequences.
52 | func EscapeBytes(data []byte) string {
53 | lowerhex := "0123456789abcdef"
54 | var builder strings.Builder
55 | builder.Grow(len(data) * 2) // 预分配空间
56 |
57 | for _, b := range data {
58 | if b == '\\' { // always backslashed
59 | builder.WriteByte('\\')
60 | builder.WriteByte(b)
61 | continue
62 | }
63 |
64 | switch b {
65 | case '\a':
66 | builder.WriteString(`\a`)
67 | case '\b':
68 | builder.WriteString(`\b`)
69 | case '\f':
70 | builder.WriteString(`\f`)
71 | case '\n':
72 | builder.WriteString(`\n`)
73 | case '\r':
74 | builder.WriteString(`\r`)
75 | case '\t':
76 | builder.WriteString(`\t`)
77 | case '\v':
78 | builder.WriteString(`\v`)
79 | default:
80 | switch {
81 | case b < 0x20 || b == 0x7f || b >= 0x80:
82 | builder.WriteString(`\x`)
83 | builder.WriteByte(lowerhex[b>>4])
84 | builder.WriteByte(lowerhex[b&0xF])
85 | default:
86 | builder.WriteByte(b)
87 | }
88 | }
89 | }
90 | return builder.String()
91 | }
92 |
93 | // UnescapeBytes unescapes a string to a byte slice, using C-style escape sequences.
94 | func UnescapeBytes(data string) ([]byte, error) {
95 | var result []byte
96 | for i := 0; i < len(data); i++ {
97 | if data[i] == '\\' {
98 | i++ // Skip the backslash
99 | if i >= len(data) {
100 | return nil, fmt.Errorf("invalid escape sequence at end of string")
101 | }
102 | switch data[i] {
103 | case 'a':
104 | result = append(result, '\a')
105 | case 'b':
106 | result = append(result, '\b')
107 | case 'f':
108 | result = append(result, '\f')
109 | case 'n':
110 | result = append(result, '\n')
111 | case 'r':
112 | result = append(result, '\r')
113 | case 't':
114 | result = append(result, '\t')
115 | case 'v':
116 | result = append(result, '\v')
117 | case 'x':
118 | if i+2 >= len(data) {
119 | return nil, fmt.Errorf("invalid \\x escape sequence")
120 | }
121 | high := decodeHex(data[i+1])
122 | low := decodeHex(data[i+2])
123 | if high < 0 || low < 0 {
124 | return nil, fmt.Errorf("invalid hex digit in \\x escape sequence")
125 | }
126 | result = append(result, byte(high<<4|low))
127 | i += 2
128 | case 'u':
129 | if i+4 >= len(data) {
130 | return nil, fmt.Errorf("invalid \\u escape sequence")
131 | }
132 | var r rune
133 | for j := 0; j < 4; j++ {
134 | v := decodeHex(data[i+1+j])
135 | if v < 0 {
136 | return nil, fmt.Errorf("invalid hex digit in \\u escape sequence")
137 | }
138 | r = r<<4 | rune(v)
139 | }
140 | result = append(result, string(r)...)
141 | i += 4
142 | case '\\':
143 | result = append(result, '\\')
144 | default:
145 | return nil, fmt.Errorf("unknown escape sequence: \\%c", data[i])
146 | }
147 | } else {
148 | result = append(result, data[i])
149 | }
150 | }
151 | return result, nil
152 | }
153 |
154 | // decodeHex decodes a hexadecimal digit.
155 | func decodeHex(b byte) int {
156 | switch {
157 | case '0' <= b && b <= '9':
158 | return int(b - '0')
159 | case 'a' <= b && b <= 'f':
160 | return int(b - 'a' + 10)
161 | case 'A' <= b && b <= 'F':
162 | return int(b - 'A' + 10)
163 | default:
164 | return -1
165 | }
166 | }
167 | func Min(a, b int) int {
168 | if a < b {
169 | return a
170 | }
171 | return b
172 | }
173 |
174 | // ReadAll 读取所有数据 直到达到限制 返回是否超出限制
175 | func ReadAll(conn net.Conn, limit int64) bool {
176 | var buffer = make([]byte, 1024)
177 | var total int64
178 | for {
179 | n, err := conn.Read(buffer)
180 | if err != nil {
181 | return false
182 | }
183 | total += int64(n)
184 | if total >= limit {
185 | return true
186 | }
187 | }
188 | }
189 |
190 | // ToMap 将结构体转为 map 不支持嵌套结构体
191 | func ToMap(obj interface{}) (map[string]interface{}, error) {
192 | // 如果 obj 是 nil 或者不是结构体,直接返回错误
193 | if obj == nil {
194 | return nil, errors.New("input object is nil")
195 | }
196 | val := reflect.ValueOf(obj)
197 | if val.Kind() == reflect.Ptr {
198 | val = val.Elem() // 解引用指针
199 | }
200 | if val.Kind() != reflect.Struct {
201 | return nil, errors.New("input is not a struct")
202 | }
203 | // 遍历结构体字段,将其转为 map
204 | result := make(map[string]interface{})
205 | typ := val.Type()
206 | for i := 0; i < val.NumField(); i++ {
207 | field := typ.Field(i)
208 | if field.PkgPath != "" {
209 | // 跳过未导出的字段
210 | continue
211 | }
212 | result[field.Name] = val.Field(i).Interface()
213 | }
214 | return result, nil
215 | }
216 |
217 | // StringToTime time.Time字符串转为 time.Time
218 | func StringToTime(s string) (time.Time, error) {
219 | dateTime, err := time.Parse(time.RFC3339Nano, s)
220 | if err != nil {
221 | dateTime, err = time.Parse(time.RFC3339, s)
222 | }
223 | return dateTime, err
224 |
225 | }
226 |
227 | // map[string]interface {} to map[string]string
228 |
229 | func MapInterfaceToString(m map[string]interface{}) map[string]string {
230 | result := make(map[string]string)
231 | for k, v := range m {
232 | if str, ok := v.(string); ok {
233 | result[k] = str
234 | }
235 | }
236 | return result
237 | }
238 | func SHA1(input string) string {
239 |
240 | hasher := sha1.New()
241 | hasher.Write([]byte(input))
242 | hashSum := hasher.Sum(nil)
243 |
244 | hashString := hex.EncodeToString(hashSum)
245 | return hashString
246 | }
247 |
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | # hachimi
2 |
3 | 一个分布式蜜网系统,用于收集和分析来自互联网的背景噪音 (Internet Background Noise)。
4 |
5 | 互联网背景噪音是指来自互联网的大量扫描、攻击、恶意软件传播等流量。这些流量通常是由恶意软件、僵尸网络、漏洞扫描器等产生的,对网络安全和数据分析有很大的帮助。
6 |
7 | 该项目通过linux透明代理来实现全端口监听,根据请求数据推测请求协议并模拟对应服务的响应。
8 | # 系统架构
9 | ```
10 | ┌─────┐ ┌─────┐ ┌─────┐
11 | │ POT │ │ POT │ │ POT │ ...
12 | └──┬──┘ └──┬──┘ └──┬──┘
13 | │ │ logs │
14 | │ ┌────▼────┐ │
15 | ┌──────────┐ └──►│ NSQ │◄──┘
16 | │ │ └────┬────┘
17 | │ BeeKeeper│ ┌──────┴───────┐
18 | │ │ │ │
19 | └──────────┘ ┌────▼───┐ ┌────▼───┐
20 | ▲ │ hunter ├──┬──┤ hunter │ ...
21 | │ └────────┘ │ └────────┘
22 | │ │
23 | │ ┌──────▼───────┐
24 | │ │ │
25 | └────────────────┤ Clickhouse │
26 | │ │
27 | └──────────────┘
28 | POT: 蜜罐节点
29 | NSQ: 消息队列服务器 NSQD
30 | hunter: 分析节点
31 | Clickhouse: 数据库
32 | BeeKeeper: 蜜网管理平台
33 |
34 | 蜜罐节点将流量分析后发送到消息队列中,分析节点消费消息队列中的数据,将数据存储到数据库中,最后通过可视化工具展示数据。
35 | ```
36 |
37 |
38 |
39 | ## 网络支持
40 | - [x] IPv4
41 | - [x] IPv6
42 |
43 | ## 协议支持
44 | - TCP
45 | - UDP
46 | - HTTP
47 | - TLS
48 | - SSH
49 | - REDIS
50 | - TODO
51 | ### HTTP
52 | - [x] 自动识别
53 | - [x] 协议解析
54 | - [x] 响应模拟
55 | - [x] 插件系统
56 | - [x] 日志记录
57 | #### 协议自动识别
58 | - 根据首包开头前10字节 判断是否是 HTTP 请求方法. `POST, GET, HEAD, OPTIONS, DELETE, PUT, TRACE, CONNECT, PATCH`
59 | - 根据首包开头前1k字节 判断是否存在 HTTP协议头 `GET / HTTP/1.1`
60 | #### 协议解析
61 | - 使用`fasthttp`接管传入的连接,解析请求头,请求体等信息
62 | #### 响应模拟
63 | - 使用`fasthttp` 生成响应数据
64 | #### 插件系统
65 | - 使用 `yaegi` 实现插件系统,可以自由的使用golang编写灵活的HTTP请求处理逻辑 来处理`fasthttp`传入的请求. 可以实现对某些特定请求的处理.
66 | - 插件系统支持热加载,无需重启系统即可生效
67 |
68 | 例如: 诱捕D-Link NAS设备命令注入漏洞(CVE-2024-3273)
69 | ```go
70 | package server
71 | func RequestHandler(plog *types.HttpLog, ctx *fasthttp.RequestCtx) {
72 | r := router.New()
73 | r.GET("/cgi-bin/nas_sharing.cgi", dlinkNas)
74 | }
75 | func dlinkNas(ctx *fasthttp.RequestCtx) {
76 | ctx.Response.Header.Set("Content-Language", "en")
77 | ctx.Response.Header.Set("Server", "lighttpd/1.4.28")
78 | ctx.Response.Header.Set("P3p", "CP='CURa ADMa DEVa PSAo PSDo OUR BUS UNI PUR INT DEM STA PRE COM NAV OTC NOI DSP COR'")
79 | user := string(ctx.QueryArgs().Peek("user"))
80 | if user == "messagebus" {
81 | system := string(ctx.QueryArgs().Peek("system"))
82 |
83 | switch system {
84 | case "ZWNobwktZQlcXHg2OVxceDY0fHNo":
85 | ctx.WriteString("uid=0(root) gid=0(root)\n")
86 | break
87 | case "aWQ=":
88 | ctx.WriteString("uid=0(root) gid=0(root)\n")
89 | break
90 | case "bHM=":
91 | ctx.WriteString("account_mgr.cgi\napkg_mgr.cgi\napp_mgr.cgi\nbox.cgi\ncodepage_mgr.cgi\ndownload_mgr.cgi\nfolder_tree.cgi\ngdrive.cgi\nget_xml.cgi\ngui_mgr.cgi\nhd_config.cgi\ninfo.cgi\nisomount_mgr.cgi\nlocal_backup_mgr.cgi\nlogin_mgr.cgi\nmyMusic.cgi\nmydlink.cgi\nmydlink_account_mgr.cgi\nmydlink_sync_mgr.cgi\nnas_sharing.cgi\nnetwork_mgr.cgi\np2p.cgi\np2p_upload.cgi\nphotocenter_mgr.cgi\nremote_backup.cgi\ns3.cgi\nscan_dsk.cgi\nsmart.cgi\nstatus_mgr.cgi\nsystem_mgr.cgi\ntime_machine.cgi\nusb_backup.cgi\nusb_device.cgi\nve_mgr.cgi\nwebdav_mgr.cgi\nwebfile_mgr.cgi\nwidget_api.cgi\nwizard_mgr.cgi\n")
92 | break
93 | case "dW5hbWUJLW0=":
94 | ctx.WriteString("armv5tel\n")
95 | default:
96 | decodeString, _ := base64.StdEncoding.DecodeString(system)
97 | a, err := strconv.Unquote(`"` + string(decodeString) + `"`)
98 | if err == nil {
99 | decodeString = []byte(a)
100 | a, err = strconv.Unquote(`"` + string(decodeString) + `"`)
101 | if err == nil {
102 | decodeString = []byte(a)
103 | }
104 |
105 | }
106 | if string(decodeString) != "" {
107 | ctx.WriteString(string(decodeString) + "\n")
108 | }
109 |
110 | break
111 | }
112 |
113 | ctx.WriteString(`
114 | 1
115 | `)
116 | } else {
117 | ctx.WriteString(`
118 | 0
119 | `)
120 | }
121 | }
122 | ```
123 | #### 日志记录
124 | 除了请求IP端口等标准会话信息外,还记录了请求的详细信息,包括请求头,请求体,请求方法,请求路径等信息
125 |
126 | 日志格式
127 | ``` go
128 | type Http struct {
129 | Session
130 | ID string `json:"id"`
131 | SessionID string `json:"session_id"`
132 | StartTime time.Time `json:"start_time"`
133 | EndTime time.Time `json:"end_time"`
134 | Header map[string]string `json:"header"`
135 | UriParam map[string]string `json:"uri_param"`
136 | BodyParam map[string]string `json:"body_param"`
137 | Method string `json:"method"`
138 | Path string `json:"path"`
139 | UA string `json:"ua"`
140 | Host string `json:"host"`
141 | RawHeader string `json:"raw_header"`
142 | Body string `json:"body"`
143 | Service string `json:"service"`
144 | //经过的时间 ms
145 | Duration int `json:"duration"`
146 | }
147 | ```
148 | ### TLS
149 | - [x] 自动识别
150 | - [x] 协议解析
151 | - [x] 响应模拟
152 | - [ ] 插件系统
153 | - [ ] 日志记录
154 | #### 协议自动识别
155 | - 根据首包开头前2字节 判断是否是 TLS ClientHello `0x16 0x03` (TLS 1.0 1.1 1.2 1.3)
156 | #### 协议解析
157 | - 使用go标准库`crypto/tls`接管传入的连接,解析TLS握手包,
158 | #### 响应模拟
159 | - 接管后的TLS连接会发送到下游其他协议识别模块进行处理
160 | #### 日志记录
161 | - JA3指纹记录 TODO
162 | - SNI记录 TODO
163 | ### SSH
164 | - [x] 自动识别
165 | - [x] 协议解析
166 | - [x] 响应模拟
167 | - [ ] 插件系统
168 | - [x] 日志记录
169 | #### 协议自动识别
170 | - 根据首包开头前10字节 判断是否是 SSH 客户端版本信息 `SSH-2.0-`
171 | #### 协议解析
172 | - 使用go标准库`golang.org/x/crypto/ssh`接管传入的连接,解析SSH握手包,
173 | #### 响应模拟
174 | - 实现了SSH协议的简单的交互和非交互的shell处理
175 | - 实现SSH中继与录像 用于真实环境模拟 `TODO`
176 | #### 日志记录
177 | - SSH会话记录包含了SSH会话的详细信息,包括SSH客户端版本,非交互shell命令,`SSH channel`原始数据,SSH认证公钥,是否交互,认证用户密码等信息
178 | - 日志中记录了完整的SHELL会话 包含了非交互和交互的shell命令与会话数据
179 |
180 | ```go
181 | type SSHSession struct {
182 | types.Session
183 | ID string `gorm:"primaryKey" json:"id"`
184 | SessionID string `gorm:"index" json:"session_id"`
185 | StartTime time.Time `gorm:"index" json:"start_time"`
186 | EndTime time.Time `gorm:"index" json:"end_time"`
187 | Duration int `json:"duration"`
188 | ClientVersion string `json:"client_version"`
189 | Shell string `json:"shell"`
190 | Request string `json:"request"`
191 | Error bool `json:"error"`
192 | PublicKey string `json:"public_key"`
193 | Service string `json:"service"`
194 | User string `json:"user"`
195 | Data string `json:"data"`
196 | IsInteract bool `json:"is_interact"`
197 | PassWord string `json:"password"`
198 | }
199 | ```
200 | ### REDIS
201 | - [x] 自动识别
202 | - [x] 协议解析
203 | - [x] 响应模拟
204 | - [ ] 插件系统
205 | - [x] 日志记录
206 | #### 协议自动识别
207 | - 根据首包开头前2字节 判断是否是 REDIS 请求格式 `*n`
208 | - 根据端口与数据反序列化判断是否是REDIS服务
209 | #### 协议解析
210 | - 解析REDIS请求包,提取REDIS请求命令. 这里使用了 `Docker`的部分代码 用于解析REDIS请求包
211 | #### 响应模拟
212 | - 目前只有对所有已知的REDIS命令直接进行OK响应
213 | #### 日志记录
214 | - REDIS会话记录包含了REDIS请求命令,请求参数等信息
215 |
216 |
217 |
218 | ## 部署
219 | [部署文档](docs/deploy.md)
220 |
221 |
222 | ## 数据展示
223 |
224 | 
225 | 
226 |
227 | ## 数据分析
228 | ### BeeKeeper
229 |
230 | 
231 | 
232 | 
233 | 
234 | 
235 |
236 |
237 | ## 开源数据
238 | 开源互联网背景噪音数据集,包含了2024年9月到2025年1月的约1000万条HTTP请求数据。,数据集为Parquet格式,包含了请求的时间、源IP端口、请求方法、请求路径、请求头、请求体等信息。
239 | [数据集地址](https://huggingface.co/datasets/burpheart/Internet-background-noise)
--------------------------------------------------------------------------------
/servers.txt:
--------------------------------------------------------------------------------
1 | micro_httpd
2 | Linux/5.10.60-qnap,UPnP/1.0,PortableSDKforUPnPdevices/1.6.22
3 | GEM ver1
4 | Safedog/4.0.0
5 | Embedthis-Appweb/3.4.2
6 | LOVE
7 | Goahead/2.5.0 PeerSec-MatrixSSL/3.2.1-OPEN
8 | streamserver
9 | HTTPD_ac1.0
10 | squid/3.5.20
11 | Microsoft-IIS/7.0
12 | lighttpd/1.4.25-devel-v2.2.24-45-gc66fb0e4
13 | Innbox_webserv
14 | WebSphere Application Server/7.0
15 | Satrack
16 | Dr.COM Server
17 | AvigilonGateway/1.0 Microsoft-HTTPAPI/2.0
18 | Oracle XML DB/Oracle Database
19 | Microsoft-HTTPAPI/2.0
20 | APPWebs/
21 | RTK Web 0.9
22 | ZLMediaKit(git hash:b4207240,branch:master,build time:Nov 2 2021 11:36:02)
23 | WintenDo
24 | Network_Module/1.0 (RX-V581)
25 | TOPSEC
26 | mhttpd v1.1
27 | WWW Server/1.1
28 | uhttpd/1.0.0
29 | L7Engine
30 | RomPager/4.07 UPnP/1.0
31 | boss/1.0 (BOSS)
32 | WebsServer
33 | none
34 | Microsoft-IIS/7.5
35 | mongo/2.0
36 | TwistedWeb/18.4.0
37 | Linux/2.6.21, UPnP/1.0, Portable SDK for UPnP devices/1.3.1
38 | axhttpd/1.5.3
39 | sslvpn 1.0
40 | HFS 2.3c
41 | fwebserver
42 | ReeCam IP Camera
43 | Electrex HTTPserver
44 | miniupnpd/1.0 UPnP/1.0
45 | NBB-414
46 | Indy/9.0.11
47 | squid/3.1.20
48 | BlueServer/5.5.4.1
49 | Linux,WEBACCESS/1.0,DIR-850LVer1.06
50 | LIVE555 Streaming Media v2020.04.24
51 | squid/4.13
52 | nginx-rc
53 | gunicorn
54 | Server Version 11.0
55 | web server
56 | RapidLogic/1.1
57 | Zabbix Zabbix
58 | Jetty(i-jetty 6.0-1570509416)
59 | IPC@CHIP
60 | Seeyon-Server/1.0
61 | mcdhttpd/1.2
62 | openresty/1.13.6.2
63 | TwistedWeb/12.1.0
64 | openresty/1.15.8.1
65 | secure
66 | Microsoft-IIS/6.0
67 | CJServer/1.1
68 | thttpd/2.29 23May2018
69 | PRTG/19.4.52.3515
70 | orbis
71 | Clayster.Library.Internet/1.1
72 | Embedthis-Appweb/3.3.1
73 | Arcadyan httpd 1.0
74 | Werkzeug/0.14.1Python/3.7.3
75 | SRS/4.0.161(Leo)
76 | Router Webserver
77 | Webio Embedded server v1.0
78 | hmhttpd/1.24-20160808
79 | BigIP
80 | HtNanoHttpd
81 | Jetty(9.4.z-SNAPSHOT)
82 | HASP LM/16.00
83 | Neobox Web Server
84 | kangle/3.5.8.2
85 | Jetty(7.6.13.v20130916)
86 | chan-cubox
87 | Sunny WebBox
88 | XCDN
89 | Http Server
90 | kx-ns1000
91 | demce.tk
92 | Jetty(winstone-2.8)
93 | Ruijie Servrer
94 | Mini web server 2.0 CDATA corp 2017.
95 | vws 1.7.12
96 | WSGIServer/0.2 CPython/3.7.7
97 | CradlepointHTTPService/1.0.0
98 | DasanNetwork Solution
99 | LSWebServer/1.0.0
100 | TASK-SERVER-NAS
101 | REST-Webs
102 | Docker/17.03.0-ce (linux)
103 | IPCamera-Webs/2.5.0
104 | WildFly/10
105 | lighttpd/1.4.26-devel-v14.07.2
106 | Titan
107 | Wintendo 1.3.3.7
108 | e7fd6736-5b0f-4af1-54a0-c95a00f208b
109 | Keil-EWEB/2.0
110 | H3C-Miniware-Webs
111 | CherryPy/3.1.2 WSGI Server
112 | CE_E
113 | PasteWSGIServer/0.5 Python/2.7.6
114 | alphapd/2.1.8
115 | HTTPD_ac 1.0
116 | FC03-HTTPS
117 | Sierra Wireless Inc, Embedded Server
118 | BackupSystem
119 | Oracle-HTTP-Server-11g
120 | -
121 | uc-httpd/1.0.0
122 | appnode/ccenter
123 | GoAhead-Webs/2.5.0PeerSec-MatrixSSL/3.1.3-OPEN
124 | squid/3.5.12
125 | openresty/1.19.9.1
126 | squid
127 | mini_httpd/1.1919dec2003
128 | TP-LINK HTTPD/1.0
129 | TeleEye/1.1
130 | Resin/3.1.8
131 | IdeaWebServer/0.83.415
132 | Linux/2.6.36, UPnP/1.0, Portable SDK for UPnP devices/1.3.1
133 | cdn
134 | OrientDB Server v.2.2.22 (build fb2b7d321ea8a5a5b18a82237049804aace9e3de)
135 | GoAhead-Webs/2.5.0 PeerSec-MatrixSSL/3.4.2-OPEN
136 | sposhttpd/4.8
137 | K2
138 | CUPS/1.7 IPP/2.1
139 | Sanoil Bayi Portal
140 | Oracle XML DB/Oracle9i Enterprise Edition Release 9.2.0.1.0 - Production
141 | IS2 Web Server 1.36
142 | JAWS/1.0 Aug 26 2015
143 | Jetty(i-jetty 3.8)
144 | Abyss/2.8.0.5-X2/B2-Win32 AbyssLib/2.8.0.1
145 | Apache/2.4.43 (Unix) OpenSSL/1.1.1g
146 | Asterisk/13.13.1
147 | WSGIServer/0.2 CPython/3.8.5
148 | BarracudaHTTP 4.0
149 | Cherokee/0.99.9 (UNIX)
150 | Jetty(6.1.x)
151 | Werkzeug/2.2.3 Python/3.10.11
152 | iVMS-Web
153 | sundray
154 | gws
155 | router webs
156 | HTTP Server 1.0
157 | Rumpus
158 | Stoneoim
159 | Linux,STUNNEL/1.0,DIR-850LVer1.13
160 | Werkzeug/0.14.1 Python/3.7.3
161 | SonicWALL
162 | Plex Media plexnifique,Plex Media Server
163 | Alpha_webserv
164 | Unknown
165 | ScreenConnect/21.14.5924.8013-1372366428 Microsoft-HTTPAPI/2.0
166 | Embedthis-http/4.0.0
167 | cc-web/1.6.3
168 | TwistedWeb/13.2.0
169 | tinyproxy/1.11.1
170 | NVR EXT SERVER
171 | Resin/4.0.36
172 | Safedog WAF
173 | Jexus/5.8.2.21 Linux
174 | Cassini/5.0.4.0
175 | Apache/2.4.9 (Win32) OpenSSL/1.0.1g PHP/5.5.11
176 | FN-Httpd 1.0 [HTTP/1.1]
177 | StartHTTP-Server/1.1
178 | ksc/1223
179 | localhost
180 | WebSockify Python/2.7.12
181 | sw-cp-server
182 | Astra
183 | Jetty(6.1.26.hwx)
184 | HP-iLO-Server/1.30
185 | ZLMediaKit-5.0(build in Apr 8 2021 03:17:35)
186 | Gnway Web Server
187 | Output-Streamer/0.2
188 | Mathopd/1.5p6
189 | SonicWALL SSL-VPN Web Server
190 | TornadoServer/6.0.4
191 | SEPM
192 | openresty/1.15.8.3
193 | yealink embed httpd
194 | MoxaHttp/2.3
195 | Server
196 | Easy-html
197 | Boa/0.93.15
198 | nginx-more
199 | eCos
200 | IPCGK7101HttpServer1.0
201 | gunicorn/20.0.4 831
202 | Ruijie Server
203 | TornadoServer/4.4.3
204 | swoole-http-server
205 | Jetty(9.2.14.v20151106)
206 | WebSphere Application Server/6.1
207 | nginx/1.19.1
208 | BaseHTTP/0.3 Python/2.7.5
209 | WSGIServer/0.1 Python/2.7.12
210 | openresty/1.19.3.1
211 | adong
212 | HttpServer
213 | Mongoose/6.11
214 | phpiis.com
215 | Ag [47]
216 | H3C-CVM
217 | mini_httpd
218 | gaaa
219 | RealVNC/E4
220 | DWS
221 | eWON
222 | Boa/0.94.14rc21
223 | CherryPy/3.2.5
224 | HFS 2.3 beta
225 | gunicorn/19.8.1
226 | lighttpd/1.4.19
227 | Webs
228 | shield
229 | SRS/4.0.201(Leo)
230 | NVR Web server
231 | Indy/9.0.50
232 | Web/0.02.07
233 | SinforHttpd/1.0
234 | X-web
235 | Abyss/2.7.0.0-X1-Win32 AbyssLib/2.7.0.0
236 | WebServer/1.0
237 | uc-httpd 1.0.0
238 | HCMSActiveX Viewer
239 | fasthttp
240 | MiniServ/1.610
241 | nginx/1.14.2
242 | AkamaiGHost
243 | Docker/19.03.8 (linux)
244 | Cimer
245 | WebBox-20
246 | Switch
247 | JAWS/1.0 Apr 8 2014
248 | Splunkd
249 | Docker/20.10.6 (linux)
250 | nginx-V-ddos
251 | WMSServer/2.0.1.0
252 | RTKWeb0.9
253 | Werkzeug/2.0.1 Python/3.7.11
254 | FPBX-14.0.17(13.38.3)
255 | QWS
256 | VA Web Server
257 | mORMot (Windows) Microsoft-HTTPAPI/2.0
258 | HTTP Server
259 | Microsoft-IIS/5.1
260 | WebServer-Webs/2.5.0
261 | HFS 2.4.0 RC7
262 | thttpd/2.25b PHP/20030920
263 | ttyd/1.6.3(libwebsockets/4.3.2-20221001-705-gae12d2f77)
264 | Web Server/1.0
265 | Octopus Deploy/ Microsoft-HTTPAPI/2.0
266 | IST OIS
267 | gunicorn/19.7.0
268 | squid/4.14
269 | WindRiver-WebServer/4.5
270 | Jetty(7.6.0.v20120127)
271 | Cross Web Server
272 | Check Point SVN foundation
273 | Linux,WEBACCESS/1.0,DIR-860LVer1.07
274 | 4fd7d672-bbd6-f059-9dff-51e02d6e25ee
275 | TornadoServer/5.0.2
276 | h2o/2.3.0-DEV@1d12c355
277 | VIAWEB system/1.0.0 (http://www.viawebsystem.com.br)
278 | GoAhead-Webs/2.5.0 PeerSec-MatrixSSL/3.1.3-OPEN
279 | DNVRS-Webs
280 | K1
281 | SuperSign
282 | thttpd/2.25b-lxc 29dec2003
283 | Unkown
284 | cloudflare-nginx
285 | SimpleHTTPWithUpload/0.1 Python/2.7.5
286 | Apache/2.4.12 (Win32) OpenSSL/1.0.1l PHP/5.6.8
287 | Nexus/3.9.0-01 (OSS)
288 | tsbox
289 | lighttpd/1.4.54
290 | 728/09824 HTTP Server version 2.0 - TELDAT S.A.
291 | mpd web server
292 | TwistedWeb/8.2.0
293 | TeamSpeak Server 3.13.2
294 | RomPager/4.51 UPnP/1.0
295 | WCY_WEBServer/2.0
296 | DOSarrest
297 | Tengine/1.5.2
298 | BaseHTTP/0.3 Python/2.7.17
299 | Python/3.11 aiohttp/3.8.5
300 | LOS HTTP Server 1.0
301 | Trend Chip UPnP/1.0 DMS
302 | lighttpd/1.4.37
303 | Polycom SoundPoint IP Telephone HTTPd
304 | lighttpd/1.4.28-devel-171:172M
305 | hidden
306 | Mini web server 1.0 ZTE corp 2005.
307 | d7b452-d70-1255-4515-9b56f13a6dab
308 | SimpleHTTP/0.6 Python/2.7.16
309 | lwIP/1.4.0 (http://savannah.nongnu.org/projects/lwip)
310 | Apache/1.3.31 (Unix)
311 | FunshionService1.0.1.63Beta
312 | Werkzeug/2.0.1 Python/3.6.9
313 | Qualvision -HTTPServer
314 | waitress
315 | CouchDB/2.1.0 (Erlang OTP/17)
316 | Resin/4.0.58
317 | nginx/0.7.64
318 | Destiny
319 | Streamd
320 | HTTPD_gw 1.0
321 | BaseHTTP/0.3 Python/2.7.10
322 | Boa/0.94.11
323 | Cloud
324 | openresty/1.11.2.1
325 | squid/3.1.23
326 | lighttpd/1.4.61
327 | jjhttpd v0.1.0
328 | nginx-upupw/1.8.0
329 | c01.sfy.nod
330 | gunicorn/19.4.5
331 | Linux/2.6.30.9-cig-4, UPnP/1.0, Portable SDK for UPnP devices/1.6.18
332 | Werkzeug/0.10.4 Python/2.7.1
333 | TwistedWeb/20.3.0
334 | Linux/2.6.5-it0, UPnP/1.0, Intel SDK for UPnP devices /1.2
335 | ch
336 | Beaver
337 | ATS/7.0.0
338 | Microsoft-IIS/8.5
339 | Jetty(9.4.33.v20201020)
340 | mginx
341 | contiki
342 | Tengine/2.1.2
343 | Tableau
344 | BlueServer/5.1.0.4
345 | Tengine/2.3.2
346 | ComfileHMI Runtime
347 | Icecast 2.4.2
348 | Linux/4.4.180+, UPnP/1.0, Portable SDK for UPnP devices/1.12.1
349 | ZLMediaKit(git hash:1292ec6,branch:master,build time:Sep 29 2021 18:28:24)
350 | AirTunes/377.30.01
351 | bks400
352 | Linux,WEBACCESS/1.0,DIR-850LVer1.13
353 | uc-httpd1.0.0
354 | AirDroid 2.0
355 | MobileSMARTS/5.2.46.14758 Microsoft-HTTPAPI/2.0
356 | TOSHIBA TEC CORPORATION
357 | RomPager/4.07
358 | Jetty(9.3.5.v20151012)
359 | wanduck
360 | Coruscant
361 | Apache
362 | HFS 2.4.0 RC6
363 | XovisPCSeries
364 | awselb/2.0
365 | pve-api-daemon/3.0
366 | nPerf/2.2.0 2019-04-02
367 | httpd/2.0
368 | HTTPServer
369 | yawcam/0.6.0
370 | K2P
371 | Jetty(8.2.0.v20160908)
372 | fibjs
373 | lighttpd/1.4.28-devel-10177
374 | airCube
375 | OpenCms/11.0.2
376 | webs
377 | lighttpd/1.4.35
378 | nginx-reuseport/1.13.4
379 | MQCache/2.1.0
380 | Keil-EWEB/2.1
381 | nhttpd/3.4.0 (yhttpd_core/1.3.2)
382 | gSOAP/2.7
383 | Linux/3.0.8, UPnP/1.0, Portable SDK for UPnP devices/1.6.19
384 | JAWS/1.0Mar172016
385 | JAWS/1.0Oct232015
386 | Jetty/5.1.x(Linux/2.6.26-2-686i386java/1.6.0_22
387 | ZTE-Webs/2.5.0 PeerSec-MatrixSSL/3.1.3-OPEN
388 |
389 | DGuard Center 7.1.6.9
390 | ReeCamIPCamera
391 | GoAhead-Webs/2.5.0
392 | (null)
393 | MochiWeb/1.0 (Any of you quaids got a smint?)
394 | Snorkel/02.03.00.04
395 | RomPager/4.37
396 | ioLogik Web Server/1.0
397 | hts_server
398 | Docker/20.10.2 (linux)
399 | mini_httpd/1.27 07Mar2017
400 | debut/1.30
401 | gSOAP/2.8
402 | Httpd
403 | PanWeb Server/ -
404 | Linux UPnP/1.0 Huawei-ATP-IGD
405 | K3
406 | dcs-lig-httpd
407 | WhoAmI
408 | TOS/1.12.1
409 | Linux/2.x.x, UPnP/1.0, pvConnect UPnP SDK/1.0, Twonky UPnP SDK/1.1
410 | Serv-U/15.3.2.155
411 | Jetty(6.1.14)
412 | Huawei Auth-Http Server 1.0
413 | Roku/10.0.0 UPnP/1.0 Roku/10.0.0
414 | nginx/1.10.1
415 | httpserver1.0
416 | Web Switch
417 | DVRDVS-Webs
418 | xxxx
419 | PoweredbyElectronicArts
420 | WebServer2
421 | WVS
422 | Boa/0.94.14rc20
423 | mini_httpd/1.21 18oct2014
424 | StreamSystem 4.0
425 | CherryPy/8.6.0
426 | ThunderDcdn
427 | Embedded HTTP Server.
428 | GlassFish Server Open Source Edition 4.1.2
429 | Jetty(6.1.26)
430 | UPnP/1.0 DLNADOC/1.50
431 | GoAhead-Webs
432 | CrushFTP HTTP Server
433 | WSTL CPE 1.0
434 | RGOS HTTP-Server/1.1
435 | Reposify
436 | OPNsense
437 | Linux/3.18.20, UPnP/1.0, Portable SDK for UPnP devices/1.6.21
438 | Kerio Connect 7.0.1
439 | HTTPD1.0
440 | kngx/1.10.2
441 | cwpsrv
442 | NetDataEmbeddedHTTPServer20200416-1574-g011c015a6
443 | convesio/2.1
444 | iPlanet-Web-Proxy-Server/3.6
445 | httpd_gargoyle/1.0 14mar2008
446 | H3C-CVM 5049
447 | openresty/1.11.2.5
448 | Apache/2.4.27 (Unix) OpenSSL/1.0.2k
449 | WebServer
450 | cPanel
451 | Nexus/3.0.0-03 (OSS)
452 | Intoto Http Server v1.0
453 | GoAheadWebs
454 | mini_httpd/1.24 10May2016
455 | Caddy
456 | MiniServ/1.941
457 | openresty/1.17.8.2
458 | iSpy
459 | Kerio Connect 9.2.8 patch 1
460 | SRS/5.0.12(Leo)
461 | A8000
462 | SY8045
463 | Microsoft-WinCE/6.00
464 | Jetty(9.4.44.v20210927)
465 | Mini web server 1.0 ZXIC corp 2005.
466 | Caddy v0.11.1
467 | 3S_WebServer
468 | RainLoop
469 |
--------------------------------------------------------------------------------
/tests/Listen_test.go:
--------------------------------------------------------------------------------
1 | package tests
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "context"
7 | "crypto/tls"
8 | "fmt"
9 | "hachimi/pkg/config"
10 | "hachimi/pkg/ingress"
11 | "hachimi/pkg/logger"
12 | "math/rand"
13 | "net/http"
14 | "net/url"
15 | "sync"
16 | "testing"
17 | "time"
18 | )
19 |
20 | type SimpleLineWriter struct {
21 | buffer bytes.Buffer
22 | mu sync.Mutex
23 | }
24 |
25 | func (w *SimpleLineWriter) Write(p []byte) (n int, err error) {
26 | w.mu.Lock()
27 | defer w.mu.Unlock()
28 | return w.buffer.Write(p)
29 | }
30 |
31 | func (w *SimpleLineWriter) ReadLines() []string {
32 | w.mu.Lock()
33 | defer w.mu.Unlock()
34 |
35 | var lines []string
36 | scanner := bufio.NewScanner(&w.buffer)
37 | for scanner.Scan() {
38 | lines = append(lines, scanner.Text())
39 | }
40 | // Clear the buffer after reading lines
41 | w.buffer.Reset()
42 | return lines
43 | }
44 |
45 | func NewSimpleLineWriter() *SimpleLineWriter {
46 | return &SimpleLineWriter{}
47 | }
48 |
49 | func TestListenerManager(t *testing.T) {
50 | // Test TCP Listeners
51 | writer := NewSimpleLineWriter()
52 |
53 | config.Logger = logger.NewJSONLLogger(writer, 100, "test")
54 | t.Run("Listeners", func(t *testing.T) {
55 | // Create a ListenerManager
56 | lm := ingress.NewListenerManager()
57 | // Add TCP listeners
58 | tcpListener := ingress.NewTCPListener("127.0.0.1", 54321)
59 | udpListener := ingress.NewUDPListener("127.0.0.1", 54321)
60 |
61 | lm.AddTCPListener(tcpListener)
62 | lm.AddUDPListener(udpListener)
63 | // Start all listeners
64 | lm.StartAll(context.Background())
65 | time.Sleep(1 * time.Second)
66 | })
67 |
68 | // Test HTTP and HTTPS Listeners
69 | t.Run("HTTP and HTTPS Log", func(t *testing.T) {
70 |
71 | specialParams := generateComprehensiveParams()
72 | binaryBody := generateRandBinary(1234)
73 | randomHeaders := generateRandomHeaders(5)
74 | randomPath := generateRandomString(10)
75 | // HTTP method
76 | methods := []string{"GET", "POST", "PUT", "DELETE", "PATCH", "OPTIONS", "HEAD"}
77 | //非标准请求
78 | methods = append(methods, generateRandomMethod())
79 | client := &http.Client{
80 | Transport: &http.Transport{
81 | TLSClientConfig: &tls.Config{
82 | InsecureSkipVerify: true,
83 | },
84 | },
85 | } // Prepare the HTTP request
86 | // http/https
87 | scheme := []string{"http", "https"}
88 | for _, s := range scheme {
89 | for _, method := range methods {
90 | // Prepare the HTTP request
91 |
92 | testUrl := fmt.Sprintf("%s://127.0.0.1:54321/%s?%s", s, randomPath, specialParams)
93 | req, err := http.NewRequest(method, testUrl, bytes.NewReader(binaryBody))
94 | if err != nil {
95 | t.Fatalf("Failed to create HTTP request: %v", err)
96 | }
97 | // Add random headers to the request
98 | for k, v := range randomHeaders {
99 | req.Header.Add(k, v)
100 | }
101 | // Perform the HTTP request
102 | resp, err := client.Do(req)
103 | if err != nil {
104 | t.Fatalf("Method %s URL %s failed: %v", method, testUrl, err)
105 | }
106 | resp.Body.Close()
107 | // Log the response
108 | t.Logf("Response: %d %s", resp.StatusCode, resp.Status)
109 | if resp.StatusCode != http.StatusOK {
110 | t.Errorf("Method %s URL %s failed with status code %d", method, testUrl, resp.StatusCode)
111 | }
112 | }
113 | config.Logger.Flush()
114 | logs := writer.ReadLines()
115 | if len(logs) != len(methods)*2 {
116 | t.Errorf("Expected %d log lines, got %d", len(methods)*2, len(logs))
117 | }
118 | //for _, logData := range logs {
119 | //TODO HTTP 日志完整性测试
120 | //}
121 | t.Logf("%s test passed", s)
122 | }
123 | t.Logf("HTTP and HTTPS test passed")
124 | })
125 | }
126 |
127 | // Helper function to generate all special URL characters and binary data as parameters
128 | func generateComprehensiveParams() string {
129 | // Define all special URL characters
130 | specialChars := `!#$&'()*+,/:;=?@[]%`
131 |
132 | // Generate a key-value pair using all special characters and binary data
133 | key := url.QueryEscape("specialChars")
134 |
135 | value := url.QueryEscape(specialChars + string(generateFullBinary()))
136 |
137 | return fmt.Sprintf("%s=%s", key, value)
138 | }
139 |
140 | // Helper function to generate binary data containing all 0x00-0xFF bytes
141 | func generateFullBinary() []byte {
142 | body := make([]byte, 256)
143 | for i := 0; i < 256; i++ {
144 | body[i] = byte(i)
145 | }
146 | return body
147 | }
148 |
149 | // Helper function to generate rand binary data
150 | func generateRandBinary(len int) []byte {
151 | body := make([]byte, len)
152 | rand.Read(body)
153 | return body
154 | }
155 |
156 | func generateRandomHeaders(count int) map[string]string {
157 | headers := map[string]string{}
158 | for i := 0; i < count; i++ {
159 | key := generateRandomString(5)
160 | value := generateRandomString(10)
161 | headers[key] = value
162 | }
163 | return headers
164 | }
165 |
166 | func generateRandomString(length int) string {
167 | letters := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
168 | result := make([]byte, length)
169 | for i := range result {
170 | result[i] = letters[rand.Intn(len(letters))]
171 | }
172 | return string(result)
173 | }
174 | func generateRandomMethod() string {
175 | letters := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
176 | result := make([]byte, 5)
177 | for i := range result {
178 | result[i] = letters[rand.Intn(len(letters))]
179 | }
180 | return string(result)
181 | }
182 |
--------------------------------------------------------------------------------
/tests/headers.txt:
--------------------------------------------------------------------------------
1 | WWW-Authenticate: Basic realm="DVR"
2 | WWW-Authenticate: Basic realm="Broadband Router"
--------------------------------------------------------------------------------
/tests/servers.txt:
--------------------------------------------------------------------------------
1 | micro_httpd
2 | Microsoft-IIS/7.0
3 | Apache/2.2.14
--------------------------------------------------------------------------------
/tests/titles.txt:
--------------------------------------------------------------------------------
1 | Login
2 | 302 Moved
3 | Solr Admin
4 | Web user login
5 | Laravel
--------------------------------------------------------------------------------
/tests/utils_test.go:
--------------------------------------------------------------------------------
1 | package tests
2 |
3 | import (
4 | "crypto/rand"
5 | "fmt"
6 | "github.com/savsgio/gotils/bytes"
7 | "hachimi/pkg/utils"
8 | "testing"
9 | )
10 |
11 | func TestEscapeBytes(t *testing.T) {
12 | //00-FF
13 | var data []byte
14 | for i := 0; i <= 255; i++ {
15 | data = append(data, byte(i))
16 | }
17 | fmt.Println(utils.EscapeBytes(data))
18 | unescapeBytes, err := utils.UnescapeBytes(utils.EscapeBytes(data))
19 | if err != nil {
20 | t.Error(err)
21 | }
22 | if !bytes.Equal(unescapeBytes, data) {
23 | t.Error("UnescapeBytes error")
24 | }
25 | //随机10240
26 | data = make([]byte, 10240)
27 | rand.Read(data)
28 | unescapeBytes, err = utils.UnescapeBytes(utils.EscapeBytes(data))
29 | if err != nil {
30 | t.Error(err)
31 | }
32 | if !bytes.Equal(unescapeBytes, data) {
33 | t.Error("UnescapeBytes error")
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/tool/gen_sql/gen_sql.go:
--------------------------------------------------------------------------------
1 | // gen_clickhouse.go
2 | package main
3 |
4 | import (
5 | "bytes"
6 | "fmt"
7 | "go/ast"
8 | "go/parser"
9 | "go/token"
10 | "io/ioutil"
11 | "os"
12 | "reflect"
13 | "strings"
14 | )
15 |
16 | func main() {
17 | if len(os.Args) < 2 {
18 | fmt.Println("Usage: gen_clickhouse ")
19 | os.Exit(1)
20 | }
21 | sourceFile := os.Args[1]
22 |
23 | // 解析源文件
24 | fset := token.NewFileSet()
25 | node, err := parser.ParseFile(fset, sourceFile, nil, parser.AllErrors)
26 | if err != nil {
27 | fmt.Printf("Failed to parse file: %v\n", err)
28 | os.Exit(1)
29 | }
30 |
31 | // 提取结构体定义并生成代码
32 | var generatedCode bytes.Buffer
33 | for _, decl := range node.Decls {
34 | if code := processDecl(decl); code != "" {
35 | generatedCode.WriteString(code + "\n")
36 | }
37 | }
38 |
39 | // 如果有生成的代码,插入到源文件
40 | if generatedCode.Len() > 0 {
41 | err = updateSourceFile(sourceFile, generatedCode.String())
42 | if err != nil {
43 | fmt.Printf("Failed to update file: %v\n", err)
44 | os.Exit(1)
45 | }
46 | fmt.Printf("Successfully updated %s\n", sourceFile)
47 | }
48 | }
49 |
50 | // 解析结构体定义并生成表创建和批量插入方法
51 | func processDecl(decl ast.Decl) string {
52 | genDecl, ok := decl.(*ast.GenDecl)
53 | if !ok || genDecl.Tok != token.TYPE {
54 | return ""
55 | }
56 |
57 | var result strings.Builder
58 | for _, spec := range genDecl.Specs {
59 | typeSpec, ok := spec.(*ast.TypeSpec)
60 | if !ok {
61 | continue
62 | }
63 |
64 | structType, ok := typeSpec.Type.(*ast.StructType)
65 | if !ok {
66 | continue
67 | }
68 |
69 | // 提取结构体信息
70 | structName := typeSpec.Name.Name
71 | fields := extractFields(structType)
72 |
73 | // 生成表创建语句方法
74 | result.WriteString(generateCreateTableMethod(structName, fields))
75 | result.WriteString("\n")
76 |
77 | // 生成批量插入方法
78 | result.WriteString(generateInsertMethod(structName, fields))
79 | result.WriteString("\n")
80 | }
81 | return result.String()
82 | }
83 |
84 | // 提取结构体字段信息
85 | func extractFields(structType *ast.StructType) []map[string]string {
86 | fields := []map[string]string{}
87 |
88 | for _, field := range structType.Fields.List {
89 | if field.Tag == nil || len(field.Names) == 0 {
90 | continue
91 | }
92 |
93 | tags := reflect.StructTag(strings.Trim(field.Tag.Value, "`"))
94 | columnName := tags.Get("ch_name")
95 | columnType := tags.Get("ch_type")
96 | order := tags.Get("ch_order")
97 |
98 | if columnName != "" && columnType != "" {
99 | fields = append(fields, map[string]string{
100 | "FieldName": field.Names[0].Name,
101 | "ColumnName": columnName,
102 | "ColumnType": columnType,
103 | "order": order,
104 | })
105 | }
106 | }
107 |
108 | return fields
109 | }
110 |
111 | // 生成表创建语句的方法
112 | func generateCreateTableMethod(structName string, fields []map[string]string) string {
113 | var query string
114 | var orderBy string
115 |
116 | // 生成硬编码的 SQL 语句
117 | query = fmt.Sprintf("\nfunc CreateTable%s() string {\nquery := `CREATE TABLE IF NOT EXISTS %s (\n", CamelToUnderline(structName), CamelToUnderline(structName))
118 | // 遍历字段,生成列定义
119 | for _, field := range fields {
120 | columnName := field["ColumnName"]
121 | columnType := field["ColumnType"]
122 | query += fmt.Sprintf("\t%s %s,\n", columnName, columnType)
123 | if order := field["order"]; order != "" {
124 | orderBy = columnName
125 | }
126 | }
127 |
128 | // 去掉最后的逗号并添加 ENGINE 和 ORDER BY
129 | query = query[:len(query)-2] + "\n) ENGINE = MergeTree()"
130 | if orderBy != "" {
131 | query += fmt.Sprintf(" ORDER BY %s", orderBy)
132 | } else {
133 | // 默认排序字段为 id
134 | query += " ORDER BY id"
135 | }
136 | query += "`\n"
137 |
138 | query += "\n\treturn query\n}\n"
139 |
140 | // 返回完整的 SQL 生成代码
141 | return query
142 | }
143 |
144 | // 生成批量插入的方法
145 | func generateInsertMethod(structName string, fields []map[string]string) string {
146 | var query string
147 | var columnNames []string
148 | var fieldVars []string
149 |
150 | // 构建 INSERT INTO 语句的列名部分
151 | for _, field := range fields {
152 | columnName := field["ColumnName"]
153 | columnNames = append(columnNames, columnName)
154 | fieldVars = append(fieldVars, strings.ToLower(structName)+"."+field["FieldName"])
155 | }
156 |
157 | // 拼接 INSERT INTO 语句
158 | query = fmt.Sprintf(`
159 | func Insert%s(conn clickhouse.Conn, %ss []%s) error {
160 | batch, err := conn.PrepareBatch(context.Background(), "INSERT INTO %s (%s)")
161 | if err != nil {
162 | return fmt.Errorf("failed to prepare batch: %%w", err)
163 | }
164 |
165 | for _, %s := range %ss {
166 | if err := batch.Append(%s); err != nil {
167 | return fmt.Errorf("failed to append data: %%w", err)
168 | }
169 | }
170 |
171 | if err := batch.Send(); err != nil {
172 | return fmt.Errorf("failed to send batch: %%w", err)
173 | }
174 | return nil
175 | }
176 | `, structName, structName, structName, CamelToUnderline(structName), join(columnNames, ", "), strings.ToLower(structName), structName, join(fieldVars, ", "))
177 |
178 | return query
179 | }
180 |
181 | // join 是一个辅助函数,用于将字符串切片连接为一个由逗号分隔的字符串
182 | func join(arr []string, separator string) string {
183 | return strings.Join(arr, separator)
184 | }
185 |
186 | // 更新源文件
187 | func updateSourceFile(sourceFile string, generatedCode string) error {
188 | content, err := ioutil.ReadFile(sourceFile)
189 | if err != nil {
190 | return fmt.Errorf("failed to read source file: %w", err)
191 | }
192 | startMarker := "// Code generated by gen_clickhouse.go DO NOT EDIT.\n"
193 | endMarker := "// End of generated code"
194 | startIndex := bytes.Index(content, []byte(startMarker))
195 | endIndex := bytes.Index(content, []byte(endMarker))
196 |
197 | var updatedContent []byte
198 | if startIndex != -1 && endIndex != -1 {
199 | startIndex += len(startMarker)
200 | updatedContent = append(content[:startIndex-1], []byte(generatedCode)...)
201 | updatedContent = append(updatedContent, content[endIndex:]...)
202 | } else {
203 | updatedContent = append(content, []byte("\n"+startMarker+"\n")...)
204 | updatedContent = append(updatedContent, []byte(generatedCode)...)
205 | updatedContent = append(updatedContent, []byte(endMarker+"\n")...)
206 | }
207 |
208 | return ioutil.WriteFile(sourceFile, updatedContent, 0644)
209 | }
210 |
211 | // CamelToUnderline converts a camel case string to an underscore string.
212 |
213 | func CamelToUnderline(s string) string {
214 | var result string
215 | for i, c := range s {
216 | if 'A' <= c && c <= 'Z' {
217 | if i != 0 {
218 | result += "_"
219 | }
220 | result += string(c + 32)
221 | } else {
222 | result += string(c)
223 | }
224 | }
225 | return result
226 | }
227 |
--------------------------------------------------------------------------------
/tool/iptables.sh:
--------------------------------------------------------------------------------
1 |
2 | #修改默认SSH端口
3 | sudo sed -i 's/#Port 22/Port 65532/g' /etc/ssh/sshd_config && sudo systemctl restart sshd
4 | sudo yum install -y iptables-services
5 | sudo iptables -P INPUT ACCEPT
6 | sudo iptables -P FORWARD ACCEPT
7 | sudo iptables -P OUTPUT ACCEPT
8 | sudo iptables -F INPUT
9 | sudo iptables -F FORWARD
10 | sudo iptables -F OUTPUT
11 | sudo iptables -F
12 | sudo iptables -X
13 | sudo iptables -Z
14 | sudo iptables -t mangle -F
15 | sudo iptables -t mangle -N DIVERT
16 | sudo iptables -t mangle -A DIVERT -j MARK --set-mark 1
17 | sudo iptables -t mangle -A DIVERT -j ACCEPT
18 | sudo iptables -t mangle -I PREROUTING -p tcp -m socket -j DIVERT
19 | sudo iptables -t mangle -I PREROUTING -p udp -m socket -j DIVERT
20 | sudo iptables -t mangle -A PREROUTING -i $(ip -o -4 route show to default | awk '{print $5}') -p tcp -d $(hostname -I | awk '{print $1}') --dport 0:12344 -j TPROXY --tproxy-mark 0x1/0x1 --on-ip $(hostname -I | awk '{print $1}') --on-port 12345
21 | sudo iptables -t mangle -A PREROUTING -i $(ip -o -4 route show to default | awk '{print $5}') -p tcp -d $(hostname -I | awk '{print $1}') --dport 12346:65520 -j TPROXY --tproxy-mark 0x1/0x1 --on-ip $(hostname -I | awk '{print $1}') --on-port 12345
22 | sudo iptables -t mangle -A PREROUTING -i $(ip -o -4 route show to default | awk '{print $5}') -p udp -d $(hostname -I | awk '{print $1}') --dport 0:12344 -j TPROXY --tproxy-mark 0x1/0x1 --on-ip $(hostname -I | awk '{print $1}') --on-port 12345
23 | sudo iptables -t mangle -A PREROUTING -i $(ip -o -4 route show to default | awk '{print $5}') -p udp -d $(hostname -I | awk '{print $1}') --dport 12346:65520 -j TPROXY --tproxy-mark 0x1/0x1 --on-ip $(hostname -I | awk '{print $1}') --on-port 12345
24 |
25 | sudo ip6tables -P INPUT ACCEPT
26 | sudo ip6tables -P FORWARD ACCEPT
27 | sudo ip6tables -P OUTPUT ACCEPT
28 | sudo ip6tables -F INPUT
29 | sudo ip6tables -F FORWARD
30 | sudo ip6tables -F OUTPUT
31 | sudo ip6tables -F
32 | sudo ip6tables -X
33 | sudo ip6tables -Z
34 | sudo ip6tables -t mangle -F
35 | sudo ip6tables -t mangle -N DIVERT
36 | sudo ip6tables -t mangle -A DIVERT -j MARK --set-mark 1
37 | sudo ip6tables -t mangle -A DIVERT -j ACCEPT
38 | sudo ip6tables -t mangle -I PREROUTING -p tcp -m socket -j DIVERT
39 | sudo ip6tables -t mangle -I PREROUTING -p udp -m socket -j DIVERT
40 | sudo ip6tables -t mangle -A PREROUTING -i $(ip -o -6 route show to default | awk '{print $5}') -p udp -d $(hostname -I | awk '{print $2}') --dport 0:65535 -j TPROXY --tproxy-mark 0x1/0x1 --on-ip $(hostname -I | awk '{print $2}') --on-port 12345
41 | sudo ip6tables -t mangle -A PREROUTING -i $(ip -o -6 route show to default | awk '{print $5}') -p tcp -d $(hostname -I | awk '{print $2}') --dport 0:65535 -j TPROXY --tproxy-mark 0x1/0x1 --on-ip $(hostname -I | awk '{print $2}') --on-port 12345
42 |
43 | sudo service iptables save
44 | sudo systemctl start iptables
45 | sudo systemctl enable iptables
46 |
47 | sudo service ip6tables save
48 | sudo systemctl start ip6tables
49 | sudo systemctl enable ip6tables
50 |
--------------------------------------------------------------------------------