├── .travis.yml ├── LICENSE ├── LICENSE.golang ├── README.md ├── conn.go ├── conn_test.go ├── doc.go ├── gtcp_test.go ├── handler.go ├── handler_test.go ├── limiter.go ├── limiter_test.go ├── log.go ├── retry.go ├── retry_test.go ├── server.go ├── server_test.go ├── statistics.go ├── statistics_test.go ├── tracker.go └── tracker_test.go /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - 1.7 5 | - tip 6 | 7 | script: 8 | - go test -v -covermode=count -coverprofile=profile.cov 9 | after_success: 10 | - go get -v github.com/mattn/goveralls 11 | - export PATH=$PATH:$HOME/gopath/bin 12 | - goveralls -coverprofile=profile.cov -service=travis-ci 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2017 cat2neat. 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE.golang: -------------------------------------------------------------------------------- 1 | Copyright (c) 2009 The Go Authors. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are 5 | met: 6 | 7 | * Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above 10 | copyright notice, this list of conditions and the following disclaimer 11 | in the documentation and/or other materials provided with the 12 | distribution. 13 | * Neither the name of Google Inc. nor the names of its 14 | contributors may be used to endorse or promote products derived from 15 | this software without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | gtcp 2 | ==== 3 | 4 | [![Go Report Card](https://goreportcard.com/badge/cat2neat/gtcp)](https://goreportcard.com/report/cat2neat/gtcp) [![Build Status](https://travis-ci.org/cat2neat/gtcp.svg?branch=master)](https://travis-ci.org/cat2neat/gtcp) [![Coverage Status](https://coveralls.io/repos/github/cat2neat/gtcp/badge.svg?branch=master)](https://coveralls.io/github/cat2neat/gtcp?branch=master) [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/cat2neat/gtcp) 5 | 6 | Package gtcp is a TCP server framework that inherits battle-tested code from net/http 7 | and can be extended through built-in interfaces. 8 | 9 | ### Features 10 | 11 | - Can be used in the same manner with http.Server(>= 1.8). 12 | - Make API as much compatible as possible. 13 | - Make the zero value useful. 14 | 15 | - Inherits as much battle tested code from net/http. 16 | 17 | - Provides much flexiblity through built-in interfaces. 18 | - ConnHandler 19 | - ConnHandler 20 | - KeepAliveHandler that makes it easy to implement keepalive. 21 | - PipelineHandler that makes it easy to implement pipelining. 22 | - ConnTracker 23 | - MapConnTracker that handles force closing active connections also graceful shutdown. 24 | - WGConnTracker that handles only graceful shutdown using a naive way with sync.WaitGroup. 25 | - Conn 26 | - BufferedConn that wraps Conn in bufio.Reader/Writer. 27 | - StatsConn that wraps Conn to measure incomming/outgoing bytes. 28 | - DebugConn that wraps Conn to output debug information. 29 | - Logger 30 | - BuiltinLogger that logs using standard log package. 31 | - Retry 32 | - ExponentialRetry that implements exponential backoff algorithm without jitter. 33 | - Statistics 34 | - TrafficStatistics that measures incomming/outgoing traffic across a server. 35 | - Limiter 36 | - MaxConnLimiter that limits connections based on the maximum number. 37 | 38 | - Gets GC pressure as little as possible with sync.Pool. 39 | 40 | - Zero 3rd party depentencies. 41 | 42 | ### TODO 43 | 44 | - Support TLS 45 | 46 | - Support multiple listeners 47 | 48 | Example 49 | ------- 50 | 51 | ```go 52 | import "github.com/cat2neat/gtcp" 53 | 54 | // echo server: 55 | // https://tools.ietf.org/html/rfc862 56 | func echoServer() error { 57 | srv := >cp.Server{ 58 | Addr: ":1979", 59 | NewConn: gtcp.NewStatsConn, 60 | ConnHandler: func(ctx context.Context, conn gtcp.Conn) { 61 | buf := make([]byte, 1024) 62 | for { 63 | n, err := conn.Read(buf) 64 | if err != nil { 65 | return 66 | } 67 | conn.Write(buf[:n]) 68 | err = conn.Flush() 69 | if err != nil { 70 | return 71 | } 72 | select { 73 | case <-ctx.Done(): 74 | // canceled by parent 75 | return 76 | default: 77 | } 78 | } 79 | }, 80 | } 81 | return srv.ListenAndServe() 82 | } 83 | ``` 84 | 85 | Install 86 | ------- 87 | 88 | ```shell 89 | go get -u github.com/cat2neat/gtcp 90 | ``` 91 | -------------------------------------------------------------------------------- /conn.go: -------------------------------------------------------------------------------- 1 | package gtcp 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "errors" 7 | "log" 8 | "net" 9 | "sync" 10 | "sync/atomic" 11 | ) 12 | 13 | type ( 14 | // Conn is the interface that wraps connetcion specific operations 15 | // in addition to net.Conn. 16 | Conn interface { 17 | net.Conn 18 | // Flush writes any buffered data to the underlying net.Conn. 19 | Flush() error 20 | 21 | // SetCancelFunc sets context.CancelFunc that called automatically 22 | // when Read/Write failed. 23 | SetCancelFunc(context.CancelFunc) 24 | 25 | // Stats returns in/out bytes gone through this Conn. 26 | Stats() (int64, int64) 27 | 28 | // SetIdle sets whether this Conn is idle or not. 29 | // It's used to realize gtcp.Server.Shutdown. 30 | SetIdle(bool) 31 | 32 | // IsIdle returns whether this Conn is idle or not. 33 | // It's used to realize gtcp.Server.Shutdown. 34 | IsIdle() bool 35 | 36 | // Peek returns the next n bytes without advancing the reader. 37 | Peek(int) ([]byte, error) 38 | } 39 | 40 | // NewConn is the function that takes a Conn and returns another Conn 41 | // that enables to generate multi layered Conn. 42 | // ex) 43 | // func(conn Conn) Conn { 44 | // return gtcp.NewBufferedConn(gtcp.NewStatsConn(conn)) 45 | // } 46 | NewConn func(Conn) Conn 47 | 48 | baseConn struct { 49 | net.Conn 50 | CancelFunc context.CancelFunc 51 | idle atomicBool 52 | hasByte bool 53 | byteBuf [1]byte 54 | mu sync.Mutex // guard Close invoked from multiple goroutines 55 | } 56 | 57 | // BufferedConn implements Conn that wraps Conn in bufio.Reader|Writer. 58 | BufferedConn struct { 59 | Conn 60 | bufr *bufio.Reader 61 | bufw *bufio.Writer 62 | once sync.Once 63 | } 64 | 65 | // StatsConn implements Conn that holds in/out bytes. 66 | StatsConn struct { 67 | Conn 68 | // InBytes stores incomming bytes. 69 | InBytes int64 70 | // OutBytes stores outgoing bytes. 71 | OutBytes int64 72 | } 73 | 74 | // DebugConn implements Conn that logs every Read/Write/Close operations using standard log. 75 | DebugConn struct { 76 | Conn 77 | } 78 | 79 | atomicBool int32 80 | ) 81 | 82 | var ( 83 | // ErrBufferFull returned when Peek takes a larger value than its buffer. 84 | ErrBufferFull = errors.New("gtcp: buffer full") 85 | ) 86 | 87 | var ( 88 | readerPool sync.Pool 89 | writerPool sync.Pool 90 | basePool sync.Pool 91 | ) 92 | 93 | func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } 94 | func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } 95 | func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } 96 | 97 | // NewBaseConn takes net.Conn and returns Conn that wraps net.Conn. 98 | // It's exported only for test purpose. 99 | func NewBaseConn(conn net.Conn) Conn { 100 | if v := basePool.Get(); v != nil { 101 | bc := v.(*baseConn) 102 | bc.Conn = conn 103 | return bc 104 | } 105 | return &baseConn{ 106 | Conn: conn, 107 | } 108 | } 109 | 110 | func (bc *baseConn) Read(buf []byte) (n int, err error) { 111 | if bc.hasByte { 112 | buf[0] = bc.byteBuf[0] 113 | bc.hasByte = false 114 | return 1, nil 115 | } 116 | n, err = bc.Conn.Read(buf) 117 | if err != nil && bc.CancelFunc != nil { 118 | bc.CancelFunc() 119 | } 120 | return 121 | } 122 | 123 | func (bc *baseConn) Write(buf []byte) (n int, err error) { 124 | n, err = bc.Conn.Write(buf) 125 | if err != nil && bc.CancelFunc != nil { 126 | bc.CancelFunc() 127 | } 128 | return 129 | } 130 | 131 | func (bc *baseConn) Flush() error { 132 | return nil 133 | } 134 | 135 | func (bc *baseConn) SetCancelFunc(cancel context.CancelFunc) { 136 | bc.CancelFunc = cancel 137 | } 138 | 139 | func (bc *baseConn) Stats() (int64, int64) { 140 | return 0, 0 141 | } 142 | 143 | func (bc *baseConn) SetIdle(idle bool) { 144 | if idle { 145 | bc.idle.setTrue() 146 | } else { 147 | bc.idle.setFalse() 148 | } 149 | } 150 | 151 | func (bc *baseConn) IsIdle() bool { 152 | return bc.idle.isSet() 153 | } 154 | 155 | func (bc *baseConn) Peek(n int) (buf []byte, err error) { 156 | if n > 1 { 157 | err = ErrBufferFull 158 | } 159 | if bc.hasByte { 160 | return bc.byteBuf[:], err 161 | } 162 | rn, rerr := bc.Conn.Read(bc.byteBuf[:]) 163 | if rn == 1 { 164 | bc.hasByte = true 165 | buf = bc.byteBuf[:] 166 | } 167 | if rerr != nil { 168 | err = rerr // override 169 | } 170 | return 171 | } 172 | 173 | func (bc *baseConn) Close() (err error) { 174 | bc.mu.Lock() 175 | defer bc.mu.Unlock() 176 | if bc.Conn == nil { 177 | return 178 | } 179 | err = bc.Conn.Close() 180 | bc.Conn = nil 181 | basePool.Put(bc) 182 | return 183 | } 184 | 185 | // NewBufferedConn returns Conn wraps a given Conn in bufio.Reader|Writer. 186 | func NewBufferedConn(conn Conn) Conn { 187 | var br *bufio.Reader 188 | var bw *bufio.Writer 189 | if v := readerPool.Get(); v != nil { 190 | br = v.(*bufio.Reader) 191 | br.Reset(conn) 192 | } else { 193 | br = bufio.NewReader(conn) 194 | } 195 | if v := writerPool.Get(); v != nil { 196 | bw = v.(*bufio.Writer) 197 | bw.Reset(conn) 198 | } else { 199 | bw = bufio.NewWriter(conn) 200 | } 201 | return &BufferedConn{ 202 | Conn: conn, 203 | bufr: br, 204 | bufw: bw, 205 | } 206 | } 207 | 208 | // Read reads data into buf using internal bufio.Reader. 209 | // It returns the number of bytes read into buf. 210 | func (b *BufferedConn) Read(buf []byte) (n int, err error) { 211 | n, err = b.bufr.Read(buf) 212 | return 213 | } 214 | 215 | // Write writes the contents of buf into the internal bufio.Writer. 216 | // It returns the number of bytes written. 217 | func (b *BufferedConn) Write(buf []byte) (n int, err error) { 218 | n, err = b.bufw.Write(buf) 219 | return 220 | } 221 | 222 | // Close closes the internal bufio.Reader|Writer and also Conn. 223 | // It's protected by sync.Once as multiple goroutines can call 224 | // especially in case using gtcp.Server.Close|Shutdown. 225 | func (b *BufferedConn) Close() (err error) { 226 | b.once.Do(func() { 227 | b.bufr.Reset(nil) 228 | readerPool.Put(b.bufr) 229 | b.bufr = nil 230 | err = b.bufw.Flush() 231 | b.bufw.Reset(nil) 232 | writerPool.Put(b.bufw) 233 | b.bufw = nil 234 | e := b.Conn.Close() 235 | if err == nil { 236 | err = e 237 | } 238 | }) 239 | return 240 | } 241 | 242 | // Flush writes any buffered data to the underlying Conn. 243 | func (b *BufferedConn) Flush() (err error) { 244 | return b.bufw.Flush() 245 | } 246 | 247 | // Peek returns the next n bytes without advancing the reader. 248 | func (b *BufferedConn) Peek(n int) ([]byte, error) { 249 | return b.bufr.Peek(n) 250 | } 251 | 252 | // NewStatsConn returns Conn that holds in/out bytes. 253 | func NewStatsConn(conn Conn) Conn { 254 | return &StatsConn{Conn: conn} 255 | } 256 | 257 | // Read reads data into buf and returns the number of bytes read into buf. 258 | // It also adds InBytes and bytes read up. 259 | func (s *StatsConn) Read(buf []byte) (n int, err error) { 260 | n, err = s.Conn.Read(buf) 261 | s.InBytes += int64(n) 262 | return 263 | } 264 | 265 | // Write writes the contents of buf and returns the number of bytes written. 266 | // It also adds OutBytes and bytes written up. 267 | func (s *StatsConn) Write(buf []byte) (n int, err error) { 268 | n, err = s.Conn.Write(buf) 269 | s.OutBytes += int64(n) 270 | return 271 | } 272 | 273 | // Stats returns in/out bytes gone through this Conn. 274 | func (s *StatsConn) Stats() (int64, int64) { 275 | return s.InBytes, s.OutBytes 276 | } 277 | 278 | // NewDebugConn returns Conn that logs debug information using standard log. 279 | func NewDebugConn(conn Conn) Conn { 280 | return &DebugConn{Conn: conn} 281 | } 282 | 283 | // Read reads data into buf and returns the number of bytes read into buf. 284 | // It also outputs debug information before/after calling internal Conn.Read. 285 | func (d *DebugConn) Read(buf []byte) (n int, err error) { 286 | log.Printf("Read(%d) = ....", len(buf)) 287 | n, err = d.Conn.Read(buf) 288 | log.Printf("Read(%d) = %d, %v", len(buf), n, err) 289 | return 290 | } 291 | 292 | // Write writes the contents of buf and returns the number of bytes written. 293 | // It also outputs debug information before/after calling internal Conn.Write. 294 | func (d *DebugConn) Write(buf []byte) (n int, err error) { 295 | log.Printf("Write(%d) = ....", len(buf)) 296 | n, err = d.Conn.Write(buf) 297 | log.Printf("Write(%d) = %d, %v", len(buf), n, err) 298 | return 299 | } 300 | 301 | // Close closes the internal Conn. 302 | // It also outputs debug information before/after calling internal Conn.Close(). 303 | func (d *DebugConn) Close() (err error) { 304 | log.Printf("Close() = ...") 305 | err = d.Conn.Close() 306 | log.Printf("Close() = %v", err) 307 | return 308 | } 309 | -------------------------------------------------------------------------------- /conn_test.go: -------------------------------------------------------------------------------- 1 | package gtcp_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/cat2neat/gtcp" 8 | ) 9 | 10 | func TestBaseConn(t *testing.T) { 11 | t.Parallel() 12 | dc := &debugNetConn{} 13 | bc := gtcp.NewBaseConn(dc) 14 | // read related 15 | buf := make([]byte, 4) 16 | dc.ReadFunc = func(buf []byte) (int, error) { 17 | copy(buf, smashingStr) 18 | return len(smashingStr), nil 19 | } 20 | n, err := bc.Read(buf) 21 | if n != 4 || string(buf) != smashingStr || err != nil { 22 | t.Errorf("gtcp_test: BaseConn.Read expected: 4, nil actual: %d, %+v\n", n, err) 23 | } 24 | dc.ReadFunc = func(buf []byte) (int, error) { 25 | return 0, errTest 26 | } 27 | n, err = bc.Read(buf) 28 | if n != 0 || err != errTest { 29 | t.Errorf("gtcp_test: BaseConn.Read expected: 0, nil actual: %d, %+v\n", n, err) 30 | } 31 | ctx, cancel := context.WithCancel(context.Background()) 32 | bc.SetCancelFunc(cancel) 33 | n, err = bc.Read(buf) 34 | if n != 0 || err != errTest { 35 | t.Errorf("gtcp_test: BaseConn.Read expected: 0, nil actual: %d, %+v\n", n, err) 36 | } 37 | <-ctx.Done() // CancelFunc should be called when error happened 38 | // idle related 39 | idle := bc.IsIdle() 40 | if idle { 41 | t.Errorf("gtcp_test: BaseConn.IsIdle expected: false actual: %v\n", idle) 42 | } 43 | bc.SetIdle(true) 44 | idle = bc.IsIdle() 45 | if !idle { 46 | t.Errorf("gtcp_test: BaseConn.IsIdle expected: true actual: %v\n", idle) 47 | } 48 | bc.SetIdle(false) 49 | idle = bc.IsIdle() 50 | if idle { 51 | t.Errorf("gtcp_test: BaseConn.IsIdle expected: false actual: %v\n", idle) 52 | } 53 | // write related 54 | dc.WriteFunc = func(buf []byte) (int, error) { 55 | return 4, nil 56 | } 57 | n, err = bc.Write(buf) 58 | if n != 4 || err != nil { 59 | t.Errorf("gtcp_test: BaseConn.Write expected: 4, nil actual: %d, %+v\n", n, err) 60 | } 61 | bc.SetCancelFunc(nil) 62 | dc.WriteFunc = func(buf []byte) (int, error) { 63 | return 0, errTest 64 | } 65 | n, err = bc.Write(buf) 66 | if n != 0 || err != errTest { 67 | t.Errorf("gtcp_test: BaseConn.Write expected: 0, errTest actual: %d, %+v\n", n, err) 68 | } 69 | ctx, cancel = context.WithCancel(context.Background()) 70 | bc.SetCancelFunc(cancel) 71 | n, err = bc.Write(buf) 72 | if n != 0 || err != errTest { 73 | t.Errorf("gtcp_test: BaseConn.Write expected: 0, errTest actual: %d, %+v\n", n, err) 74 | } 75 | <-ctx.Done() // CancelFunc should be called when error happened 76 | in, out := bc.Stats() 77 | if in != 0 || out != 0 { 78 | t.Errorf("gtcp_test: BaseConn.Stats expected: 0, 0 actual: %d, %d\n", in, out) 79 | } 80 | // peek related 81 | dc.ReadFunc = func(buf []byte) (int, error) { 82 | copy(buf, smashingStr[:len(buf)]) 83 | return len(buf), nil 84 | } 85 | pbuf, err := bc.Peek(1) 86 | if len(pbuf) != 1 || err != nil { 87 | t.Errorf("gtcp_test: BaseConn.Peek expected: 1, nil actual: %d, %+v\n", len(pbuf), err) 88 | } 89 | pbuf, err = bc.Peek(2) 90 | if len(pbuf) != 1 || err != gtcp.ErrBufferFull { 91 | t.Errorf("gtcp_test: BaseConn.Peek expected: 1, %+v actual: %d, %+v\n", gtcp.ErrBufferFull, len(pbuf), err) 92 | } 93 | n, err = bc.Read(buf) 94 | if n != 1 || err != nil { 95 | t.Errorf("gtcp_test: BaseConn.Read expected: 1,nil actual: %d, %+v\n", n, err) 96 | } 97 | n, err = bc.Read(buf) 98 | if n != 4 || err != nil { 99 | t.Errorf("gtcp_test: BaseConn.Read expected: 4,nil actual: %d, %+v\n", n, err) 100 | } 101 | dc.ReadFunc = func(buf []byte) (int, error) { 102 | return 0, errTest 103 | } 104 | pbuf, err = bc.Peek(4) 105 | if pbuf != nil || err != errTest { 106 | t.Errorf("gtcp_test: BaseConn.Peek expected: nil, errTest actual: %+v, %+v\n", pbuf, err) 107 | } 108 | // others 109 | bc.Flush() 110 | err = bc.Close() 111 | if err != nil { 112 | t.Errorf("gtcp_test: BaseConn.Close expected: nil actual: %+v\n", err) 113 | } 114 | bc.Close() 115 | bc = gtcp.NewBaseConn(dc) 116 | bc.Close() 117 | } 118 | 119 | func TestBufferedConn(t *testing.T) { 120 | t.Parallel() 121 | dc := &debugNetConn{} 122 | bc := gtcp.NewBaseConn(dc) 123 | var rr struct { 124 | buf []byte 125 | n int 126 | err error 127 | } 128 | dc.ReadFunc = func(buf []byte) (int, error) { 129 | copy(buf, smashingStr) 130 | return len(smashingStr), nil 131 | } 132 | dc.WriteFunc = func(buf []byte) (int, error) { 133 | rr.buf, rr.n, rr.err = buf, len(buf), nil 134 | return rr.n, rr.err 135 | } 136 | bufc := gtcp.NewBufferedConn(bc) 137 | buf := make([]byte, 4) 138 | n, err := bufc.Read(buf) 139 | if n != 4 || string(buf) != smashingStr || err != nil { 140 | t.Errorf("gtcp_test: BufferedConn.Read expected: 4, nil actual: %d, %+v\n", n, err) 141 | } 142 | n, err = bufc.Write([]byte(smashingStr)) 143 | if n != 4 || err != nil { 144 | t.Errorf("gtcp_test: BufferedConn.Write expected: 4, nil actual: %d, %+v\n", n, err) 145 | } 146 | if rr.buf != nil { 147 | t.Errorf("gtcp_test: BufferedConn.Write should not call the wrapped BaseConn.Write\n") 148 | } 149 | err = bufc.Flush() // Flush trigger the wrapped BaseConn.Write 150 | if err != nil || rr.n != len(smashingStr) || rr.err != nil || string(rr.buf) != smashingStr { 151 | t.Errorf("gtcp_test: BufferedConn.Flush expected: nil actual: %+v\n", err) 152 | } 153 | err = bufc.Close() 154 | bc = gtcp.NewBaseConn(dc) 155 | bufc = gtcp.NewBufferedConn(bc) // whether bufio reused can be confirmed by coverage 156 | buf, err = bufc.Peek(2) 157 | if len(buf) != 2 || string(buf) != smashingStr[:2] || err != nil { 158 | t.Errorf("gtcp_test: BufferedConn.Peek expected: 2, nil actual: %d, %+v\n", n, err) 159 | } 160 | rr.buf, rr.n, rr.err = nil, 0, nil 161 | _, _ = bufc.Write([]byte(smashingStr)) 162 | bufc.Close() // Close trigger the wrapped BaseConn.Write 163 | if rr.n != len(smashingStr) || rr.err != nil || string(rr.buf) != smashingStr { 164 | t.Errorf("gtcp_test: BufferedConn.Close should trigger the wrapped BaseConn.Write\n") 165 | } 166 | dc.WriteFunc = func(buf []byte) (int, error) { 167 | return 0, errTest 168 | } 169 | bc = gtcp.NewBaseConn(dc) 170 | bufc = gtcp.NewBufferedConn(bc) 171 | n, err = bufc.Write([]byte(smashingStr)) 172 | if err != nil { 173 | t.Errorf("gtcp_test: BufferedConn.Write expected: 4, nil actual: %d, %+v\n", n, err) 174 | } 175 | err = bufc.Close() 176 | if err != errTest { 177 | t.Errorf("gtcp_test: BufferedConn.Close expected: errTest actual: %+v\n", err) 178 | } 179 | } 180 | 181 | func TestStatsConn(t *testing.T) { 182 | t.Parallel() 183 | dc := &debugNetConn{} 184 | bc := gtcp.NewBaseConn(dc) 185 | dc.ReadFunc = func(buf []byte) (int, error) { 186 | copy(buf, smashingStr) 187 | return len(smashingStr), nil 188 | } 189 | dc.WriteFunc = func(buf []byte) (int, error) { 190 | return len(buf), nil 191 | } 192 | sc := gtcp.NewStatsConn(bc) 193 | buf := make([]byte, 4) 194 | n, err := sc.Read(buf) 195 | if n != 4 || string(buf) != smashingStr || err != nil { 196 | t.Errorf("gtcp_test: StatsConn.Read expected: 4, nil actual: %d, %+v\n", n, err) 197 | } 198 | _, _ = sc.Read(buf) 199 | _, _ = sc.Read(buf) 200 | n, err = sc.Write([]byte(smashingStr)) 201 | if n != 4 || err != nil { 202 | t.Errorf("gtcp_test: StatsConn.Write expected: 4, nil actual: %d, %+v\n", n, err) 203 | } 204 | in, out := sc.Stats() 205 | if in != 12 || out != 4 { 206 | t.Errorf("gtcp_test: StatsConn.Stats expected: 12, 4 actual: %d, %d\n", in, out) 207 | } 208 | } 209 | 210 | func TestDebugConn(t *testing.T) { 211 | dc := &debugNetConn{} 212 | bc := gtcp.NewBaseConn(dc) 213 | dc.ReadFunc = func(buf []byte) (int, error) { 214 | copy(buf, smashingStr) 215 | return len(smashingStr), nil 216 | } 217 | dc.WriteFunc = func(buf []byte) (int, error) { 218 | return len(buf), nil 219 | } 220 | c := gtcp.NewDebugConn(bc) 221 | buf := make([]byte, 4) 222 | n, err := c.Read(buf) 223 | if n != 4 || string(buf) != smashingStr || err != nil { 224 | t.Errorf("gtcp_test: DebugConn.Read expected: 4, nil actual: %d, %+v\n", n, err) 225 | } 226 | n, err = c.Write([]byte(smashingStr)) 227 | if n != 4 || err != nil { 228 | t.Errorf("gtcp_test: DebugConn.Write expected: 4, nil actual: %d, %+v\n", n, err) 229 | } 230 | err = c.Close() 231 | if err != nil { 232 | t.Errorf("gtcp_test: DebugConn.Close expected: nil actual: %+v\n", err) 233 | } 234 | } 235 | 236 | func TestLayeredConn(t *testing.T) { 237 | dc := &debugNetConn{} 238 | dc.ReadFunc = func(buf []byte) (int, error) { 239 | copy(buf, smashingStr) 240 | return len(smashingStr), nil 241 | } 242 | dc.WriteFunc = func(buf []byte) (int, error) { 243 | return len(buf), nil 244 | } 245 | c := gtcp.NewStatsConn(gtcp.NewDebugConn(gtcp.NewBaseConn(dc))) 246 | buf := make([]byte, 4) 247 | n, err := c.Read(buf) 248 | if n != 4 || string(buf) != smashingStr || err != nil { 249 | t.Errorf("gtcp_test: Read expected: 4, nil actual: %d, %+v\n", n, err) 250 | } 251 | n, err = c.Write([]byte(smashingStr)) 252 | if n != 4 || err != nil { 253 | t.Errorf("gtcp_test: Write expected: 4, nil actual: %d, %+v\n", n, err) 254 | } 255 | err = c.Close() 256 | if err != nil { 257 | t.Errorf("gtcp_test: Close expected: nil actual: %+v\n", err) 258 | } 259 | in, out := c.Stats() 260 | if in != 4 || out != 4 { 261 | t.Errorf("gtcp_test: Stats expected: 4, 4 actual: %d, %d\n", in, out) 262 | } 263 | c = gtcp.NewBufferedConn(gtcp.NewStatsConn(gtcp.NewDebugConn(gtcp.NewBaseConn(dc)))) 264 | n, err = c.Read(buf) 265 | if n != 4 || string(buf) != smashingStr || err != nil { 266 | t.Errorf("gtcp_test: Read expected: 4, nil actual: %d, %+v\n", n, err) 267 | } 268 | n, err = c.Write([]byte(smashingStr)) 269 | if n != 4 || err != nil { 270 | t.Errorf("gtcp_test: Write expected: 4, nil actual: %d, %+v\n", n, err) 271 | } 272 | c.Write([]byte(smashingStr)) 273 | c.Write([]byte(smashingStr)) 274 | c.Write([]byte(smashingStr)) 275 | in, out = c.Stats() 276 | if in != 4 || out != 0 { 277 | // Write still buffered 278 | t.Errorf("gtcp_test: Stats expected: 4, 0 actual: %d, %d\n", in, out) 279 | } 280 | err = c.Close() 281 | if err != nil { 282 | t.Errorf("gtcp_test: Close expected: nil actual: %+v\n", err) 283 | } 284 | in, out = c.Stats() 285 | if in != 4 || out != 16 { 286 | t.Errorf("gtcp_test: Stats expected: 4, 16 actual: %d, %d\n", in, out) 287 | } 288 | } 289 | -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package gtcp is a TCP server framework that inherits battle-tested code from net/http 3 | and can be extended through built-in interfaces. 4 | 5 | ### Features 6 | 7 | - Can be used in the same manner with http.Server(>= 1.8). 8 | - Make API as much compatible as possible. 9 | - Make the zero value useful. 10 | 11 | - Inherits as much battle tested code from net/http. 12 | 13 | - Provides much flexiblity through built-in interfaces. 14 | - ConnHandler 15 | - ConnHandler 16 | - KeepAliveHandler that makes it easy to implement keepalive. 17 | - PipelineHandler that makes it easy to implement pipelining. 18 | - ConnTracker 19 | - MapConnTracker that handles force closing active connections also graceful shutdown. 20 | - WGConnTracker that handles only graceful shutdown using a naive way with sync.WaitGroup. 21 | - Conn 22 | - BufferedConn that wraps Conn in bufio.Reader/Writer. 23 | - StatsConn that wraps Conn to measure incomming/outgoing bytes. 24 | - DebugConn that wraps Conn to output debug information. 25 | - Logger 26 | - BuiltinLogger that logs using standard log package. 27 | - Retry 28 | - ExponentialRetry that implements exponential backoff algorithm without jitter. 29 | - Statistics 30 | - TrafficStatistics that measures incomming/outgoing traffic across a server. 31 | - Limiter 32 | - MaxConnLimiter that limits connections based on the maximum number. 33 | 34 | - Gets GC pressure as little as possible with sync.Pool. 35 | 36 | - Zero 3rd party depentencies. 37 | 38 | ### TODO 39 | 40 | - Support TLS 41 | 42 | - Support multiple listeners 43 | 44 | */ 45 | package gtcp 46 | -------------------------------------------------------------------------------- /gtcp_test.go: -------------------------------------------------------------------------------- 1 | package gtcp_test 2 | 3 | import ( 4 | "errors" 5 | "net" 6 | "strconv" 7 | "time" 8 | ) 9 | 10 | type ( 11 | debugNetConn struct { 12 | ReadFunc func([]byte) (int, error) 13 | WriteFunc func([]byte) (int, error) 14 | Local string 15 | Remote string 16 | } 17 | nullLogger struct{} 18 | ) 19 | 20 | const ( 21 | smashingInt = 1979 22 | ) 23 | 24 | var ( 25 | errTest = errors.New("errTest") 26 | smashingStr = strconv.FormatInt(smashingInt, 10) 27 | nl = nullLogger{} 28 | ) 29 | 30 | func (l nullLogger) Errorf(fmt string, args ...interface{}) { 31 | // nop 32 | } 33 | 34 | func (dc *debugNetConn) Read(b []byte) (int, error) { 35 | return dc.ReadFunc(b) 36 | } 37 | 38 | func (dc *debugNetConn) Write(b []byte) (int, error) { 39 | return dc.WriteFunc(b) 40 | } 41 | 42 | func (dc *debugNetConn) Close() error { 43 | return nil 44 | } 45 | 46 | func (dc *debugNetConn) LocalAddr() net.Addr { 47 | return &net.TCPAddr{ 48 | IP: net.ParseIP(dc.Local), 49 | Port: smashingInt, 50 | } 51 | } 52 | 53 | func (dc *debugNetConn) RemoteAddr() net.Addr { 54 | return &net.TCPAddr{ 55 | IP: net.ParseIP(dc.Remote), 56 | Port: smashingInt, 57 | } 58 | } 59 | 60 | func (dc *debugNetConn) SetDeadline(t time.Time) error { 61 | return nil 62 | } 63 | 64 | func (dc *debugNetConn) SetReadDeadline(t time.Time) error { 65 | return nil 66 | } 67 | 68 | func (dc *debugNetConn) SetWriteDeadline(t time.Time) error { 69 | return nil 70 | } 71 | -------------------------------------------------------------------------------- /handler.go: -------------------------------------------------------------------------------- 1 | package gtcp 2 | 3 | import ( 4 | "context" 5 | "io" 6 | "runtime" 7 | "time" 8 | ) 9 | 10 | type ( 11 | // WriteFlusher is the interface that wraps write operations on Conn. 12 | WriteFlusher interface { 13 | io.Writer 14 | // Flush writes any buffered data to the underlying Conn. 15 | Flush() error 16 | } 17 | 18 | // ConnHandler is the callback function called when Conn gets ready to communicate with peer. 19 | // You can use ConnHandler to gain full control on socket. 20 | ConnHandler func(context.Context, Conn) 21 | 22 | // ReqHandler is the callback function used with SetKeepAliveHandler. 23 | // It's called when Conn gets ready to communicate specifically 24 | // - accepted by listener 25 | // - receiving one more byte while being in keepalive 26 | // You can use ReqHandler with SetKeepAliveHandler to implement keepalive easily. 27 | ReqHandler func(Conn) error 28 | 29 | // PipelineReader is the callback function used with SetPipelineHandler. 30 | // SetPipelineHandler enables to implement protocol pipelining easily. 31 | // It's used for reading part of pipelining and dispatch meaningful []byte to 32 | // PipelineWriter via return value. 33 | PipelineReader func(io.Reader) ([]byte, error) 34 | 35 | // PipelineWriter is the callback function used with SetPipelineHandler. 36 | // SetPipelineHandler enables to implement protocol pipelining easily. 37 | // It's used for writing part of pipelining and 38 | // called when receiving a meaningful []byte from PipelineReader. 39 | PipelineWriter func([]byte, WriteFlusher) error 40 | ) 41 | 42 | // SetKeepAliveHandler enables to implement keepalive easily. 43 | // It call h and call again repeatedly if receiving one more byte while waiting idle time 44 | // or stop communicating. 45 | // It also stop when detecting listener got closed. 46 | func (s *Server) SetKeepAliveHandler(idle time.Duration, h ReqHandler) { 47 | s.ConnHandler = func(ctx context.Context, conn Conn) { 48 | for { 49 | err := h(conn) 50 | if err != nil { 51 | // don't reuse if some error happened 52 | return 53 | } 54 | select { 55 | case <-ctx.Done(): 56 | // canceled by parent 57 | return 58 | default: 59 | } 60 | conn.SetIdle(true) 61 | conn.SetReadDeadline(time.Now().Add(idle)) 62 | if _, err = conn.Peek(1); err != nil { 63 | return 64 | } 65 | conn.SetIdle(false) 66 | conn.SetReadDeadline(time.Time{}) 67 | } 68 | } 69 | } 70 | 71 | // SetPipelineHandler enables to implement protocol pipelining easily. 72 | // It combines pr and pw with a buffered channel that has numBuf. 73 | // pr need to implement reading part of pipelining and dispatch meaningful []byte to pw. 74 | // pw need to implement writing part of pipelining. 75 | // It stops if pr returns nil buf or any error. 76 | // It also stop when detecting listener got closed. 77 | func (s *Server) SetPipelineHandler( 78 | numBuf int, 79 | pr PipelineReader, 80 | pw PipelineWriter) { 81 | s.ConnHandler = func(ctx context.Context, conn Conn) { 82 | packet := make(chan []byte, numBuf) 83 | go func() { 84 | defer func() { 85 | if err := recover(); err != nil && err != ErrAbortHandler { 86 | const size = 64 << 10 87 | buf := make([]byte, size) 88 | buf = buf[:runtime.Stack(buf, false)] 89 | s.Logger.Errorf("gtcp: panic serving %v: %v\n%s", conn.RemoteAddr(), err, buf) 90 | } 91 | close(packet) 92 | }() 93 | for { 94 | // reader 95 | buf, err := pr(conn) 96 | if buf == nil || err != nil { 97 | return 98 | } 99 | select { 100 | case packet <- buf: 101 | case <-ctx.Done(): 102 | // canceled by parent 103 | return 104 | } 105 | } 106 | 107 | }() 108 | // writer 109 | for { 110 | select { 111 | case buf := <-packet: 112 | if buf == nil { 113 | // context canceled or tcp session closed or error happened at reader 114 | return 115 | } 116 | err := pw(buf, conn) 117 | if err != nil { 118 | // continue until reader failed 119 | continue 120 | } 121 | } 122 | } 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /handler_test.go: -------------------------------------------------------------------------------- 1 | package gtcp_test 2 | 3 | import ( 4 | "context" 5 | "io" 6 | "sync/atomic" 7 | "testing" 8 | "time" 9 | 10 | "github.com/cat2neat/gtcp" 11 | ) 12 | 13 | func TestSetKeepAliveHandler(t *testing.T) { 14 | t.Parallel() 15 | dc := &debugNetConn{} 16 | dc.ReadFunc = func(buf []byte) (int, error) { 17 | copy(buf, smashingStr[:len(buf)]) 18 | return len(buf), nil 19 | } 20 | c := gtcp.NewBaseConn(dc) 21 | srv := gtcp.Server{Logger: gtcp.DefaultLogger} 22 | first := true 23 | srv.SetKeepAliveHandler(time.Millisecond, func(conn gtcp.Conn) error { 24 | if first { 25 | first = false 26 | return nil 27 | } 28 | // consume buffer in BaseConn that filled by keepalive handler 29 | var bb [1]byte 30 | c.Read(bb[:]) 31 | dc.ReadFunc = func(buf []byte) (int, error) { 32 | return 0, errTest 33 | } 34 | return nil 35 | }) 36 | srv.ConnHandler(context.Background(), c) 37 | srv.SetKeepAliveHandler(time.Millisecond, func(conn gtcp.Conn) error { 38 | return errTest 39 | }) 40 | srv.ConnHandler(context.Background(), c) 41 | ctx, cancel := context.WithCancel(context.Background()) 42 | cancel() 43 | srv.SetKeepAliveHandler(time.Millisecond, func(conn gtcp.Conn) error { 44 | return nil 45 | }) 46 | srv.ConnHandler(ctx, c) 47 | } 48 | 49 | func TestSetPipelineHandler(t *testing.T) { 50 | dc := &debugNetConn{} 51 | dc.Remote = "127.0.0.1" 52 | dc.ReadFunc = func(buf []byte) (int, error) { 53 | copy(buf, smashingStr) 54 | return len(smashingStr), nil 55 | } 56 | c := gtcp.NewBufferedConn(gtcp.NewBaseConn(dc)) 57 | srv := gtcp.Server{Logger: nl} 58 | var cnt uint32 59 | srv.SetPipelineHandler(16, func(r io.Reader) ([]byte, error) { 60 | if atomic.LoadUint32(&cnt) < 16 { 61 | buf := make([]byte, len(smashingStr)) 62 | r.Read(buf) 63 | return buf, nil 64 | } 65 | return nil, errTest 66 | }, func(buf []byte, wf gtcp.WriteFlusher) error { 67 | new := atomic.AddUint32(&cnt, 1) 68 | if new%2 == 0 { 69 | return nil 70 | } 71 | return errTest 72 | }) 73 | srv.ConnHandler(context.Background(), c) 74 | ctx, cancel := context.WithCancel(context.Background()) 75 | cancel() 76 | srv.SetPipelineHandler(1, func(r io.Reader) ([]byte, error) { 77 | buf := make([]byte, len(smashingStr)) 78 | r.Read(buf) 79 | return buf, nil 80 | }, func(buf []byte, wf gtcp.WriteFlusher) error { 81 | return nil 82 | }) 83 | srv.ConnHandler(ctx, c) 84 | srv.SetPipelineHandler(1, func(r io.Reader) ([]byte, error) { 85 | panic("gtcp_test: panic for PipelineReader test") 86 | }, func(buf []byte, wf gtcp.WriteFlusher) error { 87 | return nil 88 | }) 89 | srv.ConnHandler(context.Background(), c) 90 | } 91 | -------------------------------------------------------------------------------- /limiter.go: -------------------------------------------------------------------------------- 1 | package gtcp 2 | 3 | import "sync/atomic" 4 | 5 | type ( 6 | // Limiter is the interface that limits connections accepted. 7 | Limiter interface { 8 | // OnConnected is called when Conn accepted on a listener. 9 | // Returns false if limits it otherwise true. 10 | OnConnected(Conn) bool 11 | // OnClosed is called when Conn closed. 12 | OnClosed(Conn) 13 | } 14 | 15 | // MaxConnLimiter implements Limiter based on the maximum connections. 16 | MaxConnLimiter struct { 17 | // Max defines the maximum connections. 18 | Max uint32 19 | current uint32 // accessed atomically 20 | } 21 | ) 22 | 23 | // OnConnected returns false if the number of current active connections exceeds Max, 24 | // otherwise true. 25 | func (mc *MaxConnLimiter) OnConnected(conn Conn) bool { 26 | new := atomic.AddUint32(&mc.current, 1) 27 | if new > mc.Max { 28 | atomic.AddUint32(&mc.current, ^uint32(0)) 29 | return false 30 | } 31 | return true 32 | } 33 | 34 | // OnClosed decreases the number of current active connections. 35 | func (mc *MaxConnLimiter) OnClosed(conn Conn) { 36 | atomic.AddUint32(&mc.current, ^uint32(0)) 37 | } 38 | -------------------------------------------------------------------------------- /limiter_test.go: -------------------------------------------------------------------------------- 1 | package gtcp_test 2 | 3 | import ( 4 | "sync" 5 | "testing" 6 | 7 | "github.com/cat2neat/gtcp" 8 | ) 9 | 10 | func TestMaxConnLimiter(t *testing.T) { 11 | t.Parallel() 12 | const max = 64 13 | ml := gtcp.MaxConnLimiter{Max: max} 14 | bc := gtcp.NewBaseConn(&debugNetConn{}) 15 | 16 | // single 17 | for i := 0; i < max; i++ { 18 | if !ml.OnConnected(bc) { 19 | t.Errorf("gtcp_test: MaxConnLimiter.OnConnected expected: true actual: false\n") 20 | } 21 | } 22 | if ml.OnConnected(bc) { 23 | t.Errorf("gtcp_test: MaxConnLimiter.OnConnected expected: false actual: true\n") 24 | } 25 | ml.OnClosed(bc) 26 | if !ml.OnConnected(bc) { 27 | t.Errorf("gtcp_test: MaxConnLimiter.OnConnected expected: true actual: false\n") 28 | } 29 | // parallel 30 | ml = gtcp.MaxConnLimiter{Max: max} 31 | wg := sync.WaitGroup{} 32 | for i := 0; i < max/2; i++ { 33 | wg.Add(1) 34 | go func() { 35 | ml.OnConnected(bc) 36 | wg.Done() 37 | }() 38 | } 39 | wg.Wait() 40 | for i := 0; i < max/2; i++ { 41 | li := i // capture 42 | wg.Add(1) 43 | go func() { 44 | if li%2 == 0 { 45 | ml.OnConnected(bc) 46 | } else { 47 | ml.OnClosed(bc) 48 | } 49 | wg.Done() 50 | }() 51 | } 52 | wg.Wait() 53 | for i := 0; i < max/2; i++ { 54 | ml.OnConnected(bc) 55 | } 56 | if ml.OnConnected(bc) { 57 | t.Errorf("gtcp_test: MaxConnLimiter.OnConnected expected: false actual: true\n") 58 | } 59 | ml.OnClosed(bc) 60 | if !ml.OnConnected(bc) { 61 | t.Errorf("gtcp_test: MaxConnLimiter.OnConnected expected: true actual: false\n") 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /log.go: -------------------------------------------------------------------------------- 1 | package gtcp 2 | 3 | import ( 4 | "log" 5 | ) 6 | 7 | type ( 8 | // Logger is the interface that wraps logging operations. 9 | Logger interface { 10 | // Errorf logs error information. 11 | // Arguments are handled in the manner of fmt.Printf. 12 | Errorf(format string, args ...interface{}) 13 | } 14 | // BuiltinLogger implements Logger based on the standard log package. 15 | BuiltinLogger struct{} 16 | ) 17 | 18 | var ( 19 | // DefaultLogger is the default Logger. 20 | DefaultLogger Logger = BuiltinLogger{} 21 | ) 22 | 23 | // Errorf logs error information using the standard log package. 24 | func (l BuiltinLogger) Errorf(format string, args ...interface{}) { 25 | log.Printf(format, args...) 26 | } 27 | -------------------------------------------------------------------------------- /retry.go: -------------------------------------------------------------------------------- 1 | package gtcp 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | type ( 8 | // Retry is the interface that provides a retry strategy 9 | // based on a given retry counter. 10 | Retry interface { 11 | // Backoff returns a retry interval. 12 | Backoff(uint64) time.Duration 13 | } 14 | 15 | // ExponentialRetry implements exponential backoff algorithm without jitter. 16 | ExponentialRetry struct { 17 | // InitialDelay defines the retry interval at the first retry. 18 | InitialDelay time.Duration 19 | // MaxDelay defines the maximum retry interval. 20 | MaxDelay time.Duration 21 | } 22 | ) 23 | 24 | var ( 25 | // DefaultRetry implements the same behaviour with net/http/Server 26 | DefaultRetry Retry = ExponentialRetry{ 27 | InitialDelay: 5 * time.Millisecond, 28 | MaxDelay: 1 * time.Second, 29 | } 30 | ) 31 | 32 | // Backoff returns a retry interval based on retry. 33 | func (er ExponentialRetry) Backoff(retry uint64) time.Duration { 34 | d := er.InitialDelay * (1 << retry) 35 | if d > er.MaxDelay { 36 | d = er.MaxDelay 37 | } 38 | return d 39 | } 40 | -------------------------------------------------------------------------------- /retry_test.go: -------------------------------------------------------------------------------- 1 | package gtcp_test 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/cat2neat/gtcp" 8 | ) 9 | 10 | func TestExponentialRetry(t *testing.T) { 11 | t.Parallel() 12 | tests := []struct { 13 | expected time.Duration 14 | }{ 15 | {expected: time.Millisecond}, 16 | {expected: 2 * time.Millisecond}, 17 | {expected: 4 * time.Millisecond}, 18 | {expected: 8 * time.Millisecond}, 19 | {expected: 16 * time.Millisecond}, 20 | {expected: 20 * time.Millisecond}, 21 | {expected: 20 * time.Millisecond}, 22 | } 23 | var retry gtcp.Retry = gtcp.ExponentialRetry{ 24 | InitialDelay: time.Millisecond, 25 | MaxDelay: 20 * time.Millisecond, 26 | } 27 | for i, test := range tests { 28 | ret := retry.Backoff(uint64(i)) 29 | if test.expected != ret { 30 | t.Errorf("gtcp_test: ExponentialRetry.Backoff expected: %v actual: %v\n", 31 | test.expected, ret) 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /server.go: -------------------------------------------------------------------------------- 1 | package gtcp 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "net" 7 | "runtime" 8 | "sync" 9 | "time" 10 | ) 11 | 12 | type ( 13 | // Server defines parameters for running a gtcp server. 14 | // The zero value for Server is a valid configuration. 15 | Server struct { 16 | // Addr to listen on, ":1979" if empty. 17 | Addr string 18 | 19 | // ConnHandler handles a tcp connection accepted. 20 | // It can be used not only directly setting ConnHandler but 21 | // - Server.SetKeepAliveHandler 22 | // - Server.SetPipelineHandler 23 | // Panic occurred if empty. 24 | ConnHandler ConnHandler 25 | 26 | // Configurable components 27 | 28 | // NewConn that applied to each tcp connection accepted by listener. 29 | // It can be 30 | // - gtcp.NewBufferedConn 31 | // - gtcp.NewStatsConn 32 | // - gtcp.NewDebugConn 33 | // also can be layered like the below. 34 | // func(conn Conn) Conn { 35 | // return gtcp.NewBufferedConn(gtcp.NewStatsConn(conn)) 36 | // } 37 | // None NewConn is applied if empty. 38 | NewConn NewConn 39 | 40 | // ConnTracker that handles active connections. 41 | // It can be 42 | // - gtcp.MapConnTracker 43 | // - gtcp.WGConnTracker 44 | // gtcp.MapConnTracker is set if empty. 45 | ConnTracker ConnTracker 46 | 47 | // Logger that logs if an error occurred in gtcp. 48 | // It can be 49 | // - gtcp.BuiltinLogger 50 | // gtcp.DefaultLogger is set if empty. 51 | Logger Logger 52 | 53 | // Retry that handles the retry interval when Accept on listner failed. 54 | // It can be 55 | // - gtcp.ExponentialRetry 56 | // gtcp.DefaultRetry is set if empty.(it behaves in the same manner with net/http/Server. 57 | Retry Retry 58 | 59 | // Limiters that limits connections. 60 | // It can be 61 | // - gtcp.MaxConnLimiter 62 | // also multiple limiters can be set. 63 | // None Limiter is set if empty. 64 | Limiters []Limiter 65 | 66 | // Statistics that measures some statistics. 67 | // It can be 68 | // - gtcp.TrafficStatistics 69 | // gtcp.TrafficStatistics is set if empty. 70 | Statistics Statistics 71 | 72 | listener net.Listener 73 | 74 | mu sync.Mutex 75 | doneChan chan struct{} 76 | } 77 | ) 78 | 79 | var ( 80 | // ErrServerClosed returned when listener got closed through Close/Shutdown. 81 | ErrServerClosed = errors.New("gtcp: Server closed") 82 | // ErrAbortHandler is a sentinel panic value to abort a handler. 83 | // panicking with ErrAbortHandler also suppresses logging of a stack 84 | // trace to the server's error log. 85 | ErrAbortHandler = errors.New("gtcp: abort Handler") 86 | ) 87 | 88 | func (s *Server) getDoneChan() <-chan struct{} { 89 | s.mu.Lock() 90 | defer s.mu.Unlock() 91 | return s.getDoneChanLocked() 92 | } 93 | 94 | func (s *Server) getDoneChanLocked() chan struct{} { 95 | if s.doneChan == nil { 96 | s.doneChan = make(chan struct{}) 97 | } 98 | return s.doneChan 99 | } 100 | 101 | func (s *Server) closeDoneChanLocked() { 102 | ch := s.getDoneChanLocked() 103 | select { 104 | case <-ch: 105 | // Already closed. Don't close again. 106 | default: 107 | // Safe to close here. We're the only closer, guarded 108 | // by s.mu. 109 | close(ch) 110 | } 111 | } 112 | 113 | // Close immediately closes the listner and any 114 | // connections tracked by ConnTracker. 115 | // Close returns any error returned from closing the listner. 116 | func (s *Server) Close() (err error) { 117 | s.mu.Lock() 118 | s.closeDoneChanLocked() 119 | s.mu.Unlock() 120 | err = s.listener.Close() 121 | s.ConnTracker.Close() 122 | return 123 | } 124 | 125 | // Shutdown gracefully shuts down the server without interrupting any 126 | // active connections. 127 | // If the provided context expires before the shutdown is complete, 128 | // then the context's error is returned. 129 | func (s *Server) Shutdown(ctx context.Context) (err error) { 130 | s.mu.Lock() 131 | s.closeDoneChanLocked() 132 | s.mu.Unlock() 133 | err = s.listener.Close() 134 | s.ConnTracker.Shutdown(ctx) 135 | return 136 | } 137 | 138 | // ListenerAddr returns the listner.Addr() or nil if listner is empty. 139 | func (s *Server) ListenerAddr() net.Addr { 140 | if s.listener != nil { 141 | return s.listener.Addr() 142 | } 143 | return nil 144 | } 145 | 146 | // ListenAndServe listens on the TCP network address Addr and then 147 | // calls Serve to handle requests on incoming connections. 148 | // If Addr is blank, ":1979" is used. 149 | // ListenAndServe always returns a non-nil error. 150 | func (s *Server) ListenAndServe() error { 151 | addr := s.Addr 152 | if addr == "" { 153 | addr = ":1979" 154 | } 155 | ln, err := net.Listen("tcp", addr) 156 | if err != nil { 157 | return err 158 | } 159 | return s.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}) 160 | } 161 | 162 | type tcpKeepAliveListener struct { 163 | *net.TCPListener 164 | } 165 | 166 | func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { 167 | tc, err := ln.AcceptTCP() 168 | if err != nil { 169 | return 170 | } 171 | tc.SetKeepAlive(true) 172 | tc.SetKeepAlivePeriod(3 * time.Minute) 173 | return tc, nil 174 | } 175 | 176 | // ListenAndServe listens on the TCP network address addr and then 177 | // calls Serve to handle requests on incoming connections. 178 | // ListenAndServe always returns a non-nil error. 179 | func ListenAndServe(addr string, handler ConnHandler) error { 180 | server := &Server{Addr: addr, ConnHandler: handler} 181 | return server.ListenAndServe() 182 | } 183 | 184 | // Serve accepts incoming connections on the Listener l, creating a 185 | // new service goroutine for each. 186 | // The service goroutines call ConnHandler to reply to them. 187 | func (s *Server) Serve(l net.Listener) error { 188 | var retry uint64 189 | defer l.Close() 190 | 191 | if s.ConnHandler == nil { 192 | panic("gtcp: nil handler") 193 | } 194 | 195 | // set reasonable default to each component 196 | if s.ConnTracker == nil { 197 | s.ConnTracker = NewMapConnTracker() 198 | } 199 | if s.Logger == nil { 200 | s.Logger = DefaultLogger 201 | } 202 | if s.Retry == nil { 203 | s.Retry = DefaultRetry 204 | } 205 | if s.Statistics == nil { 206 | s.Statistics = &TrafficStatistics{} 207 | } 208 | 209 | s.listener = l 210 | 211 | // context per listener 212 | // notify goroutines executing ConnHandler that your listner got closed 213 | ctx, cancel := context.WithCancel(context.Background()) 214 | defer cancel() 215 | for { 216 | rw, e := l.Accept() 217 | if e != nil { 218 | select { 219 | case <-s.getDoneChan(): 220 | return ErrServerClosed 221 | default: 222 | } 223 | if ne, ok := e.(net.Error); ok && ne.Temporary() { 224 | delay := s.Retry.Backoff(retry) 225 | s.Logger.Errorf("gtcp: Accept error: %v; retrying in %v", e, delay) 226 | time.Sleep(delay) 227 | retry++ 228 | continue 229 | } 230 | return e 231 | } 232 | retry = 0 233 | conn := NewBaseConn(rw) 234 | if s.NewConn != nil { 235 | conn = s.NewConn(conn) 236 | } 237 | s.serve(ctx, conn) 238 | } 239 | } 240 | 241 | func (s *Server) serve(ctx context.Context, conn Conn) { 242 | for _, limiter := range s.Limiters { 243 | if limiter.OnConnected(conn) == false { 244 | s.Logger.Errorf("gtcp: connection refused %v: by limiter %+v", conn.RemoteAddr(), limiter) 245 | conn.Close() 246 | return 247 | } 248 | } 249 | s.ConnTracker.AddConn(conn) 250 | go func() { 251 | defer func() { 252 | for _, limiter := range s.Limiters { 253 | limiter.OnClosed(conn) 254 | } 255 | s.ConnTracker.DelConn(conn) 256 | s.Statistics.AddConnStats(conn) 257 | if err := recover(); err != nil && err != ErrAbortHandler { 258 | const size = 64 << 10 259 | buf := make([]byte, size) 260 | buf = buf[:runtime.Stack(buf, false)] 261 | s.Logger.Errorf("gtcp: panic serving %v: %v\n%s", conn.RemoteAddr(), err, buf) 262 | } 263 | conn.Close() 264 | }() 265 | // context per connection 266 | ctx, cancel := context.WithCancel(ctx) 267 | conn.SetCancelFunc(cancel) 268 | defer cancel() 269 | s.ConnHandler(ctx, conn) 270 | }() 271 | } 272 | -------------------------------------------------------------------------------- /server_test.go: -------------------------------------------------------------------------------- 1 | package gtcp_test 2 | 3 | import ( 4 | "context" 5 | "io" 6 | "net" 7 | "strings" 8 | "sync" 9 | "testing" 10 | "time" 11 | 12 | "github.com/cat2neat/gtcp" 13 | ) 14 | 15 | const bufSize = 1024 16 | 17 | func echoServer() *gtcp.Server { 18 | srv := >cp.Server{ 19 | Addr: ":0", 20 | ConnHandler: func(ctx context.Context, conn gtcp.Conn) { 21 | buf := make([]byte, bufSize) 22 | for { 23 | n, err := conn.Read(buf) 24 | if err != nil { 25 | return 26 | } 27 | conn.Write(buf[:n]) 28 | err = conn.Flush() 29 | if err != nil { 30 | return 31 | } 32 | } 33 | }, 34 | } 35 | return srv 36 | } 37 | 38 | func echoServerKeepAlive() *gtcp.Server { 39 | srv := >cp.Server{ 40 | Addr: ":0", 41 | } 42 | srv.SetKeepAliveHandler(5*time.Millisecond, 43 | func(conn gtcp.Conn) error { 44 | buf := make([]byte, bufSize) 45 | n, err := conn.Read(buf) 46 | if err != nil { 47 | if err != io.EOF { 48 | srv.Logger.Errorf("gtcp_test: err: %+v\n", err) 49 | } 50 | return err 51 | } 52 | conn.Write(buf[:n]) 53 | err = conn.Flush() 54 | if err != nil { 55 | return err 56 | } 57 | return nil 58 | }) 59 | return srv 60 | } 61 | 62 | func echoServerPipeline() *gtcp.Server { 63 | srv := >cp.Server{Addr: ":0"} 64 | srv.SetPipelineHandler(32, 65 | func(r io.Reader) ([]byte, error) { 66 | buf := make([]byte, bufSize) 67 | n, err := r.Read(buf) 68 | return buf[:n], err 69 | }, func(buf []byte, wf gtcp.WriteFlusher) error { 70 | wf.Write(buf) 71 | return wf.Flush() 72 | }) 73 | return srv 74 | } 75 | 76 | func doEchoClient(addr string, src []string, t testing.TB) { 77 | raw, err := net.Dial("tcp", addr) 78 | if err != nil { 79 | t.Errorf("gtcp_test: err: %+v\n", err) 80 | return 81 | } 82 | defer raw.Close() 83 | conn := raw.(*net.TCPConn) 84 | for _, s := range src { 85 | n, err := conn.Write([]byte(s)) 86 | if n != len(s) || err != nil { 87 | t.Errorf("gtcp_test: err: %+v\n", err) 88 | return 89 | } 90 | } 91 | err = conn.CloseWrite() 92 | if err != nil { 93 | t.Errorf("gtcp_test: err: %+v\n", err) 94 | return 95 | } 96 | buf := make([]byte, bufSize) 97 | var total int 98 | for { 99 | n, err := conn.Read(buf[total:]) 100 | if err != nil { 101 | if err == io.EOF { 102 | break 103 | } else { 104 | t.Errorf("gtcp_test: err: %+v\n", err) 105 | return 106 | } 107 | } 108 | total += n 109 | } 110 | expected := strings.Join(src, "") 111 | actual := string(buf[:total]) 112 | if actual != expected { 113 | t.Errorf("gtcp_test: expected: %s, actual: %s\n", expected, actual) 114 | } 115 | } 116 | 117 | func TestServer(t *testing.T) { 118 | // echo:server 119 | srv := echoServer() 120 | go srv.ListenAndServe() 121 | // echo:client 122 | data := []string{ 123 | "foo", 124 | "bar", 125 | "buzz", 126 | } 127 | time.Sleep(5 * time.Millisecond) 128 | var wg sync.WaitGroup 129 | for i := 0; i < 32; i++ { 130 | wg.Add(1) 131 | go func() { 132 | doEchoClient(srv.ListenerAddr().String(), data, t) 133 | wg.Done() 134 | }() 135 | } 136 | wg.Wait() 137 | // should fail due to port collision 138 | srv.Addr = srv.ListenerAddr().String() 139 | err := srv.ListenAndServe() 140 | if err == nil { 141 | t.Errorf("gtcp_test: ListenAndServe should fail due to port collision\n") 142 | } 143 | srv.Shutdown(context.Background()) 144 | // safe to double close 145 | srv.Close() 146 | } 147 | 148 | func TestServerNilHandler(t *testing.T) { 149 | defer func() { 150 | if err := recover(); err == nil { 151 | t.Errorf("gtcp_test: Nil handler should cause panic\n") 152 | } 153 | }() 154 | gtcp.ListenAndServe(":0", nil) 155 | } 156 | 157 | func connectTCPClient(addr string, t *testing.T) { 158 | conn, err := net.Dial("tcp", addr) 159 | if err != nil { 160 | t.Fatalf("gtcp_test: err: %+v\n", err) 161 | } 162 | // block until socket closed by server 163 | var buf [4]byte 164 | conn.Read(buf[:]) 165 | } 166 | 167 | func TestServerPanicHandler(t *testing.T) { 168 | srv := gtcp.Server{ 169 | Addr: ":0", 170 | ConnHandler: func(ctx context.Context, conn gtcp.Conn) { 171 | panic(gtcp.ErrAbortHandler) 172 | }, 173 | } 174 | go srv.ListenAndServe() 175 | defer srv.Shutdown(context.Background()) 176 | time.Sleep(5 * time.Millisecond) 177 | connectTCPClient(srv.ListenerAddr().String(), t) 178 | } 179 | 180 | func TestServerWithLimitter(t *testing.T) { 181 | srv := gtcp.Server{ 182 | Addr: ":0", 183 | ConnHandler: func(ctx context.Context, conn gtcp.Conn) { 184 | time.Sleep(10 * time.Millisecond) 185 | }, 186 | ConnTracker: >cp.WGConnTracker{}, 187 | Limiters: append([]gtcp.Limiter(nil), >cp.MaxConnLimiter{Max: 2}), 188 | } 189 | go srv.ListenAndServe() 190 | defer srv.Shutdown(context.Background()) 191 | time.Sleep(5 * time.Millisecond) 192 | var wg sync.WaitGroup 193 | for i := 0; i < 8; i++ { 194 | wg.Add(1) 195 | go func() { 196 | connectTCPClient(srv.ListenerAddr().String(), t) 197 | wg.Done() 198 | }() 199 | } 200 | wg.Wait() 201 | } 202 | 203 | func TestServerForceClose(t *testing.T) { 204 | srv := gtcp.Server{ 205 | Addr: ":0", 206 | ConnHandler: func(ctx context.Context, conn gtcp.Conn) { 207 | select { 208 | case <-time.After(time.Second): 209 | t.Errorf("gtcp_test: unexpected timeout happen") 210 | case <-ctx.Done(): 211 | } 212 | }, 213 | ConnTracker: >cp.WGConnTracker{}, 214 | NewConn: gtcp.NewBufferedConn, 215 | } 216 | addr := srv.ListenerAddr() 217 | if addr != nil { 218 | t.Errorf("gtcp_test: expected: nil actual:%+v\n", addr) 219 | } 220 | var err error 221 | go func() { 222 | err = srv.ListenAndServe() 223 | }() 224 | time.Sleep(5 * time.Millisecond) 225 | var wg sync.WaitGroup 226 | for i := 0; i < 4; i++ { 227 | wg.Add(1) 228 | go func() { 229 | connectTCPClient(srv.ListenerAddr().String(), t) 230 | wg.Done() 231 | }() 232 | } 233 | time.Sleep(5 * time.Millisecond) 234 | srv.Close() 235 | wg.Wait() 236 | if err != gtcp.ErrServerClosed { 237 | t.Errorf("gtcp_test: err: %+v\n", err) 238 | } 239 | } 240 | 241 | func BenchmarkRawEchoServer(b *testing.B) { 242 | srv := &rawEchoServer{} 243 | benchEchoServer(srv, b) 244 | } 245 | 246 | func BenchmarkEchoServer(b *testing.B) { 247 | srv := echoServer() 248 | benchEchoServer(srv, b) 249 | } 250 | 251 | func BenchmarkEchoServerPipeline(b *testing.B) { 252 | srv := echoServerPipeline() 253 | benchEchoServer(srv, b) 254 | } 255 | 256 | func BenchmarkEchoServerKeepAlive(b *testing.B) { 257 | srv := echoServerKeepAlive() 258 | benchEchoServer(srv, b) 259 | } 260 | 261 | func benchEchoServer(srv echoer, b *testing.B) { 262 | errChan := make(chan error) 263 | go func() { 264 | errChan <- srv.ListenAndServe() 265 | }() 266 | data := []string{ 267 | "foo", 268 | "bar", 269 | "buzz", 270 | } 271 | time.Sleep(10 * time.Millisecond) 272 | select { 273 | case err := <-errChan: 274 | b.Errorf("gtcp_test: err: %+v\n", err) 275 | return 276 | default: 277 | } 278 | defer srv.Close() 279 | b.ResetTimer() 280 | for n := 0; n < b.N; n++ { 281 | var wg sync.WaitGroup 282 | for i := 0; i < 8; i++ { 283 | wg.Add(1) 284 | go func() { 285 | doEchoClient(srv.ListenerAddr().String(), data, b) 286 | wg.Done() 287 | }() 288 | } 289 | wg.Wait() 290 | } 291 | } 292 | 293 | type echoer interface { 294 | Close() error 295 | ListenerAddr() net.Addr 296 | ListenAndServe() error 297 | } 298 | 299 | type rawEchoServer struct { 300 | *net.TCPListener 301 | } 302 | 303 | func (s *rawEchoServer) Close() error { 304 | return s.TCPListener.Close() 305 | } 306 | 307 | func (s *rawEchoServer) ListenerAddr() net.Addr { 308 | if s.TCPListener != nil { 309 | return s.TCPListener.Addr() 310 | } 311 | return nil 312 | } 313 | 314 | func (s *rawEchoServer) ListenAndServe() error { 315 | ln, err := net.Listen("tcp", ":0") 316 | if err != nil { 317 | return err 318 | } 319 | s.TCPListener = ln.(*net.TCPListener) 320 | for { 321 | conn, err := s.TCPListener.AcceptTCP() 322 | if err != nil { 323 | return err 324 | } 325 | conn.SetKeepAlive(true) 326 | conn.SetKeepAlivePeriod(3 * time.Minute) 327 | go func() { 328 | defer conn.Close() 329 | buf := make([]byte, bufSize) 330 | for { 331 | n, err := conn.Read(buf) 332 | if err != nil { 333 | return 334 | } 335 | _, err = conn.Write(buf[:n]) 336 | if err != nil { 337 | return 338 | } 339 | } 340 | }() 341 | } 342 | } 343 | -------------------------------------------------------------------------------- /statistics.go: -------------------------------------------------------------------------------- 1 | package gtcp 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | type ( 9 | // Statistics is the interface that wraps operations for accumulating Conn statistics. 10 | Statistics interface { 11 | // AddConnStats adds a Conn statistics. 12 | AddConnStats(Conn) 13 | // Reset clears statistics holden now. 14 | Reset() 15 | // String returns a string that represents the current statistics. 16 | String() string 17 | } 18 | 19 | // TrafficStatistics implements Statistics to hold the in/out traffic on a gtcp server. 20 | TrafficStatistics struct { 21 | mu sync.RWMutex 22 | inBytes int64 23 | outBytes int64 24 | } 25 | ) 26 | 27 | // AddConnStats ingests inBytes and outBytes from conn. 28 | // You need to use StatsConn in gtcp.NewConn if you want in/out traffic. 29 | func (ts *TrafficStatistics) AddConnStats(conn Conn) { 30 | in, out := conn.Stats() 31 | ts.mu.Lock() 32 | ts.inBytes += in 33 | ts.outBytes += out 34 | ts.mu.Unlock() 35 | } 36 | 37 | // Reset clears statistics holden now. 38 | func (ts *TrafficStatistics) Reset() { 39 | ts.mu.Lock() 40 | ts.inBytes, ts.outBytes = 0, 0 41 | ts.mu.Unlock() 42 | } 43 | 44 | // String returns the in/out traffic on a gtcp server as a json string. 45 | func (ts *TrafficStatistics) String() (str string) { 46 | ts.mu.RLock() 47 | str = fmt.Sprintf(`{"in_bytes": %d, "out_bytes": %d}`, ts.inBytes, ts.outBytes) 48 | ts.mu.RUnlock() 49 | return 50 | } 51 | -------------------------------------------------------------------------------- /statistics_test.go: -------------------------------------------------------------------------------- 1 | package gtcp_test 2 | 3 | import ( 4 | "encoding/json" 5 | "sync" 6 | "testing" 7 | 8 | "github.com/cat2neat/gtcp" 9 | ) 10 | 11 | func TestTrafficStatistics(t *testing.T) { 12 | t.Parallel() 13 | dc := &debugNetConn{} 14 | dc.ReadFunc = func(buf []byte) (int, error) { 15 | copy(buf, smashingStr) 16 | return len(smashingStr), nil 17 | } 18 | dc.WriteFunc = func(buf []byte) (int, error) { 19 | return len(buf), nil 20 | } 21 | c := gtcp.NewStatsConn(gtcp.NewBaseConn(dc)) 22 | buf := make([]byte, 4) 23 | c.Read(buf) 24 | c.Write([]byte(smashingStr)) 25 | 26 | const max = 64 27 | ts := gtcp.TrafficStatistics{} 28 | wg := sync.WaitGroup{} 29 | for i := 0; i < max; i++ { 30 | wg.Add(1) 31 | go func() { 32 | ts.AddConnStats(c) 33 | wg.Done() 34 | }() 35 | } 36 | wg.Wait() 37 | decoded := struct { 38 | InBytes int `json:"in_bytes"` 39 | OutBytes int `json:"out_bytes"` 40 | }{} 41 | err := json.Unmarshal([]byte(ts.String()), &decoded) 42 | if err != nil { 43 | t.Errorf("gtcp_test: TrafficStatistics.String err: %+v\n", err) 44 | } 45 | if decoded.InBytes != 4*64 || decoded.OutBytes != 4*64 { 46 | t.Errorf("gtcp_test: TrafficStatistics.String raw: %s expected: 256, 256 actual: %d, %d\n", 47 | ts.String(), 48 | decoded.InBytes, 49 | decoded.OutBytes) 50 | } 51 | ts.Reset() 52 | err = json.Unmarshal([]byte(ts.String()), &decoded) 53 | if err != nil { 54 | t.Errorf("gtcp_test: TrafficStatistics.String err: %+v\n", err) 55 | } 56 | if decoded.InBytes != 0 || decoded.OutBytes != 0 { 57 | t.Errorf("gtcp_test: TrafficStatistics.String raw: %s expected: 0, 0 actual: %d, %d\n", 58 | ts.String(), 59 | decoded.InBytes, 60 | decoded.OutBytes) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /tracker.go: -------------------------------------------------------------------------------- 1 | package gtcp 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "time" 7 | ) 8 | 9 | type ( 10 | // ConnTracker is the interface that wraps operations to track active connections. 11 | ConnTracker interface { 12 | // AddConn adds Conn to active connections. 13 | AddConn(Conn) 14 | // DelConn deletes Conn from active connections. 15 | DelConn(Conn) 16 | // Close closes active connections with the same manner on net/http/Server.Close. 17 | // In short, force close. 18 | Close() error 19 | // Shutdown closes active connections with the same manner on net/http/Server.Shutdown. 20 | // In short, graceful shutdown. 21 | Shutdown(context.Context) error 22 | } 23 | 24 | // WGConnTracker implements ConnTracker with sync.WaitGroup. 25 | // Its Close implemntation is semantically different from what ConnTracker.Close should be. 26 | // Use MapConnTracker if you want force close active connections. 27 | WGConnTracker struct { 28 | wg sync.WaitGroup 29 | } 30 | 31 | // MapConnTracker implements ConnTracker with the same manner on net/http/Server. 32 | MapConnTracker struct { 33 | mu sync.Mutex 34 | activeConn map[Conn]struct{} 35 | } 36 | ) 37 | 38 | var ( 39 | shutdownPollInterval = 500 * time.Millisecond 40 | ) 41 | 42 | // AddConn adds Conn to active connections using sync.WaitGroup.Add. 43 | func (ct *WGConnTracker) AddConn(Conn) { 44 | ct.wg.Add(1) 45 | } 46 | 47 | // DelConn deletes Conn from active connections using sync.WaitGroup.Done. 48 | func (ct *WGConnTracker) DelConn(Conn) { 49 | ct.wg.Done() 50 | } 51 | 52 | // Close closes(actually waits for get things done) active connections using sync.WaitGroup.Wait. 53 | func (ct *WGConnTracker) Close() error { 54 | ct.wg.Wait() 55 | return nil 56 | } 57 | 58 | // Shutdown waits for get active connections closed using sync.WaitGroup.Wait. 59 | func (ct *WGConnTracker) Shutdown(ctx context.Context) error { 60 | ct.wg.Wait() 61 | return nil 62 | } 63 | 64 | // NewMapConnTracker returns a new MapConnTracker as a ConnTracker. 65 | func NewMapConnTracker() ConnTracker { 66 | return &MapConnTracker{ 67 | activeConn: make(map[Conn]struct{}), 68 | } 69 | } 70 | 71 | // AddConn adds Conn to active connections using map. 72 | func (ct *MapConnTracker) AddConn(conn Conn) { 73 | ct.mu.Lock() 74 | ct.activeConn[conn] = struct{}{} 75 | ct.mu.Unlock() 76 | } 77 | 78 | // DelConn deletes Conn from active connections using map. 79 | func (ct *MapConnTracker) DelConn(conn Conn) { 80 | ct.mu.Lock() 81 | delete(ct.activeConn, conn) 82 | ct.mu.Unlock() 83 | } 84 | 85 | // Close closes active connections forcefully. 86 | func (ct *MapConnTracker) Close() error { 87 | ct.mu.Lock() 88 | defer ct.mu.Unlock() 89 | for c := range ct.activeConn { 90 | c.Close() 91 | delete(ct.activeConn, c) 92 | } 93 | return nil 94 | } 95 | 96 | // Shutdown closes active connections with the same manner on net/http/Server.Shutdown. 97 | // It's useful when you use gtcp.Server.SetKeepAliveHandler 98 | // or use ConnHandler directly with gtcp.Conn.(SetIdle|IsIdle) 99 | // as Shutdown only try to close idle connections. 100 | // If the provided context expires before the shutdown is complete, 101 | // then the context's error is returned. 102 | func (ct *MapConnTracker) Shutdown(ctx context.Context) error { 103 | ticker := time.NewTicker(shutdownPollInterval) 104 | defer ticker.Stop() 105 | for { 106 | if ct.closeIdleConns() { 107 | return nil 108 | } 109 | select { 110 | case <-ctx.Done(): 111 | return ctx.Err() 112 | case <-ticker.C: 113 | } 114 | } 115 | } 116 | 117 | // closeIdleConns closes all idle connections and reports whether the 118 | // server is quiescent. 119 | func (ct *MapConnTracker) closeIdleConns() bool { 120 | ct.mu.Lock() 121 | defer ct.mu.Unlock() 122 | quiescent := true 123 | for c := range ct.activeConn { 124 | if !c.IsIdle() { 125 | quiescent = false 126 | continue 127 | } 128 | c.Close() 129 | delete(ct.activeConn, c) 130 | } 131 | return quiescent 132 | } 133 | -------------------------------------------------------------------------------- /tracker_test.go: -------------------------------------------------------------------------------- 1 | package gtcp_test 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "testing" 7 | "time" 8 | 9 | "github.com/cat2neat/gtcp" 10 | ) 11 | 12 | func testConnTracker(ct gtcp.ConnTracker, 13 | connHandler func(gtcp.ConnTracker, gtcp.Conn), 14 | finishHandler func(gtcp.ConnTracker), 15 | t *testing.T) { 16 | const max = 64 17 | bcs := make([]gtcp.Conn, max) 18 | for i := 0; i < max/2; i++ { 19 | bcs[i] = gtcp.NewBaseConn(&debugNetConn{}) 20 | ct.AddConn(bcs[i]) 21 | } 22 | for i := 0; i < max/2; i++ { 23 | li := i 24 | go ct.DelConn(bcs[li]) 25 | } 26 | var wg sync.WaitGroup 27 | for i := max / 2; i < max; i++ { 28 | bcs[i] = gtcp.NewBaseConn(&debugNetConn{}) 29 | li := i 30 | wg.Add(1) 31 | go func() { 32 | ct.AddConn(bcs[li]) 33 | wg.Done() 34 | }() 35 | } 36 | wg.Wait() 37 | for i := max / 2; i < max; i++ { 38 | li := i 39 | go connHandler(ct, bcs[li]) 40 | } 41 | finishHandler(ct) 42 | } 43 | 44 | func TestWGConnTracker(t *testing.T) { 45 | t.Parallel() 46 | ct := >cp.WGConnTracker{} 47 | testConnTracker(ct, 48 | func(ct gtcp.ConnTracker, conn gtcp.Conn) { 49 | ct.DelConn(conn) 50 | }, 51 | func(ct gtcp.ConnTracker) { 52 | ct.Close() 53 | }, 54 | t) 55 | testConnTracker(ct, 56 | func(ct gtcp.ConnTracker, conn gtcp.Conn) { 57 | ct.DelConn(conn) 58 | }, 59 | func(ct gtcp.ConnTracker) { 60 | ct.Shutdown(context.Background()) 61 | }, 62 | t) 63 | } 64 | 65 | func TestMapConnTracker(t *testing.T) { 66 | t.Parallel() 67 | ct := gtcp.NewMapConnTracker() 68 | testConnTracker(ct, 69 | func(ct gtcp.ConnTracker, conn gtcp.Conn) { 70 | ct.DelConn(conn) 71 | }, 72 | func(ct gtcp.ConnTracker) { 73 | ct.Close() 74 | }, 75 | t) 76 | testConnTracker(ct, 77 | func(ct gtcp.ConnTracker, conn gtcp.Conn) { 78 | conn.SetIdle(true) 79 | }, 80 | func(ct gtcp.ConnTracker) { 81 | ct.Shutdown(context.Background()) 82 | }, 83 | t) 84 | testConnTracker(ct, 85 | func(ct gtcp.ConnTracker, conn gtcp.Conn) { 86 | conn.SetIdle(false) 87 | }, 88 | func(ct gtcp.ConnTracker) { 89 | ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) 90 | err := ct.Shutdown(ctx) 91 | if err != context.DeadlineExceeded { 92 | t.Errorf("gtcp_test: ConnTracker.Shutdown expected: %+v\n", context.DeadlineExceeded) 93 | } 94 | cancel() 95 | }, 96 | t) 97 | } 98 | --------------------------------------------------------------------------------