├── stateMachine.png ├── tcp_windows.go ├── .gitignore ├── main.go ├── logger.go ├── tcp_linux.go ├── LICENSE ├── timer.go ├── test.go ├── iperf_tcp.go ├── README.md ├── iperf_kcp.go ├── iperf_rudp.go ├── iperf_client.go ├── iperf_server.go ├── iperf.go ├── iperf_api_test.go └── iperf_api.go /stateMachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZezhongWang/iperf-go/HEAD/stateMachine.png -------------------------------------------------------------------------------- /tcp_windows.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | func save_tcpInfo(sp *iperf_stream, rp *iperf_interval_results) int{ 4 | return 0 5 | } 6 | 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # ignore .xml 2 | *.xml 3 | # Generated files 4 | bin/ 5 | gen/ 6 | out/ 7 | # Gradle files 8 | .gradle/ 9 | build/ 10 | release/ 11 | .idea/ 12 | *.xml 13 | *.exe 14 | iperf-go -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | /* 4 | 可能存在的坑: 大端小端问题还没考虑,可能会出问题. 目前默认都是用小端(跟随系统) 5 | */ 6 | 7 | func main() { 8 | test := new_iperf_test() 9 | if test == nil { 10 | log.Error("create new test error") 11 | } 12 | test.init() 13 | 14 | if rtn := test.parse_arguments(); rtn < 0{ 15 | log.Errorf("parse arguments error: %v", rtn) 16 | } 17 | 18 | if rtn := test.run_test(); rtn < 0 { 19 | log.Errorf("run test failed: %v", rtn) 20 | } 21 | 22 | test.free_test() 23 | } 24 | 25 | 26 | -------------------------------------------------------------------------------- /logger.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/op/go-logging" 5 | "os" 6 | ) 7 | 8 | /* 9 | Log setting 10 | */ 11 | 12 | var log = logging.MustGetLogger("iperf") 13 | 14 | // Example format string. Everything except the message has a custom color 15 | // which is dependent on the log level. Many fields have a custom output 16 | // formatting too, eg. the time returns the hour down to the milli second. 17 | var format = logging.MustStringFormatter( 18 | `%{color}%{time:15:04:05.000} %{shortfunc} ▶ %{level:.4s} %{id:03x}%{color:reset} %{message}`, 19 | ) 20 | 21 | func init(){ 22 | // log init 23 | backend := logging.NewLogBackend(os.Stderr, "", 0) 24 | backendFormatter := logging.NewBackendFormatter(backend, format) 25 | logging.SetLevel(logging.ERROR, "iperf") 26 | logging.SetBackend(backendFormatter) 27 | } -------------------------------------------------------------------------------- /tcp_linux.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "golang.org/x/sys/unix" 6 | "net" 7 | ) 8 | 9 | func save_tcpInfo(sp *iperf_stream, rp *iperf_interval_results) int{ 10 | if has_tcpInfo() != true{ 11 | return -1 12 | } 13 | info := getTCPInfo(sp.conn) 14 | rp.rtt = uint(info.Rtt) 15 | rp.rto = uint(info.Rto) 16 | rp.interval_retrans = uint(info.Total_retrans) // temporarily store 17 | //sp.test.r info.Retrans 18 | //PrintTCPInfo(info) 19 | return 0 20 | } 21 | 22 | func getTCPInfo(conn net.Conn) *unix.TCPInfo{ 23 | file, err:= conn.(*net.TCPConn).File() 24 | if err != nil { 25 | fmt.Printf("File err :%v", err) 26 | } 27 | fd := file.Fd() 28 | info, err := unix.GetsockoptTCPInfo(int(fd), unix.SOL_TCP, unix.TCP_INFO) 29 | return info 30 | } 31 | 32 | func PrintTCPInfo(info *unix.TCPInfo){ 33 | fmt.Printf("TcpInfo: rcv_rtt:%v\trtt:%v\tretransmits:%v\trto:%v\tlost:%v\tretrans:%v\ttotal_retrans:%v\n", 34 | info.Rcv_rtt /* 作为接收端,测出的RTT值,单位为微秒*/, info.Rtt, info.Retransmits, info.Rto, info.Lost, info.Retrans /* 重传且未确认的数据段数 */ , info.Total_retrans /* 本连接的总重传个数 */) 35 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /timer.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "time" 4 | 5 | type ITimer struct{ 6 | timer *time.Timer 7 | done chan bool 8 | } 9 | 10 | type ITicker struct{ 11 | ticker *time.Ticker 12 | done chan bool 13 | } 14 | 15 | type TimerClientData struct{ 16 | p interface{} 17 | } 18 | type timerProc func(data TimerClientData, now time.Time) 19 | 20 | 21 | func timer_create(now time.Time, proc timerProc, data TimerClientData, dur uint /* in ms */) ITimer{ 22 | real_dur := time.Now().Sub(now) + time.Duration(dur)*time.Millisecond 23 | timer := time.NewTimer(real_dur) 24 | done := make(chan bool, 1) 25 | go func(){ 26 | defer timer.Stop() 27 | for { 28 | select { 29 | case <- done: 30 | log.Debugf("Timer recv done. dur: %v", dur) 31 | return 32 | case t := <- timer.C: 33 | proc(data, t) 34 | } 35 | } 36 | }() 37 | itimer := ITimer{timer:timer, done: done} 38 | return itimer 39 | } 40 | 41 | func ticker_create(now time.Time, proc timerProc, data TimerClientData, interval uint /* in ms */, max_times uint) ITicker{ 42 | ticker := time.NewTicker(time.Duration(interval) * time.Millisecond) 43 | done := make(chan bool, 1) 44 | go func(){ 45 | var cnt uint = 0 46 | defer ticker.Stop() 47 | for { 48 | select { 49 | case <- done: 50 | log.Debugf("Ticker recv done. interval:%v", interval) 51 | return 52 | case t := <- ticker.C: 53 | if cnt >= max_times{ 54 | return 55 | } 56 | proc(data, t) 57 | cnt ++ 58 | } 59 | } 60 | }() 61 | iticker := ITicker{ticker:ticker, done: done} 62 | return iticker 63 | } 64 | -------------------------------------------------------------------------------- /test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "net" 7 | "os" 8 | "reflect" 9 | "time" 10 | ) 11 | 12 | const SIZE = 128*1024 13 | 14 | func handleConn(c net.Conn) { 15 | totalBytes := 0 16 | buffer := make([]byte, SIZE) 17 | //ctrl_chan := make(chan uint, 5) 18 | for i := 0; i< 10; i++{ 19 | n, err := c.Read(buffer) 20 | totalBytes += n 21 | fmt.Printf("server read totalBytes = %v n = %v\n", totalBytes, n) 22 | if err != nil { 23 | if err != io.EOF { 24 | fmt.Printf("Read error: %s", err) 25 | } 26 | break 27 | } 28 | n, err = c.Write(buffer) 29 | fmt.Printf("server echo n = %v\n", n) 30 | } 31 | fmt.Printf("Server stop\n") 32 | c.Close() 33 | //for { 34 | // time.Sleep(1) 35 | // fmt.Printf("do something\n") 36 | //} 37 | } 38 | 39 | 40 | func server(){ 41 | Address := "127.0.0.1:9999" 42 | Addr, err := net.ResolveTCPAddr("tcp", Address) 43 | if err != nil { 44 | fmt.Printf("err = %v",err) 45 | } 46 | 47 | listener, err := net.ListenTCP("tcp", Addr) 48 | if err != nil { 49 | fmt.Printf("err = %v",err) 50 | } 51 | defer listener.Close() 52 | 53 | //server loop 54 | for { 55 | conn, err := listener.Accept() 56 | if err != nil { 57 | continue 58 | } 59 | go handleConn(conn) 60 | return 61 | } 62 | } 63 | 64 | func client(){ 65 | tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:9999") 66 | if err != nil { 67 | fmt.Printf("err = %v",err) 68 | } 69 | conn, err := net.DialTCP("tcp", nil, tcpAddr) 70 | if err != nil { 71 | fmt.Printf("err = %v",err) 72 | } 73 | defer conn.Close() 74 | buffer := make([]byte, SIZE) 75 | total_size := 0 76 | //if err := conn.SetWriteBuffer(SIZE); err != nil{ 77 | // fmt.Printf("err = %v",err) 78 | //} 79 | 80 | for i := 0 ; i < 10 ; i++ { 81 | n, err := conn.Write(buffer) 82 | total_size += n 83 | fmt.Printf("client write total_size = %v n = %v\n", total_size, n) 84 | start := time.Now() 85 | if serr, ok := err.(*net.OpError); ok{ 86 | fmt.Printf("write serr = %T %+v %v\n", serr, serr, reflect.TypeOf(serr)) 87 | fmt.Printf("aaaa = %T %+v %v\n", serr.Err, serr.Err, reflect.TypeOf(serr.Err)) 88 | fmt.Printf("aaaa = %T %+v %v\n", serr.Err.(*os.SyscallError).Err, serr.Err.(*os.SyscallError).Err, reflect.TypeOf(serr.Err.(*os.SyscallError).Err)) 89 | } 90 | 91 | n, err = conn.Read(buffer) 92 | fmt.Printf("client recv echo n = %v\n", n) 93 | end := time.Now() 94 | if err != nil { 95 | fmt.Printf("write err = %T %+v %v\n", err, err, reflect.TypeOf(err)) 96 | } 97 | fmt.Printf("RTT = %v start = %v end = %v\n", end.Sub(start).Nanoseconds(), start.Nanosecond(), end.Nanosecond()) 98 | 99 | //info := getTCP_Info(conn) 100 | //fmt.Printf("Rtt:%v\tRecv_RTT:%v\tSend_ss:%v\tRcv_ss:%v\tSnd_mss:%v\nAck_recv:%v\nAck_send:%v\ndata_recv:%v\ndata_sent:%v\n", 101 | // info.Rtt, info.Rcv_rtt, info.Snd_ssthresh, info.Rcv_ssthresh, info.Snd_mss, info.Last_ack_recv, info.Last_ack_sent, info.Last_data_recv, info.Last_data_sent) 102 | } 103 | fmt.Printf("Client stop\n") 104 | } 105 | // 106 | //func getTCP_Info(conn net.Conn) *unix.TCPInfo{ 107 | // file, err:= conn.(*net.TCPConn).File() 108 | // if err != nil { 109 | // fmt.Printf("File err :%v", err) 110 | // } 111 | // fd := file.Fd() 112 | // info, err := unix.GetsockoptTCPInfo(int(fd), unix.SOL_TCP, unix.TCP_INFO) 113 | // return info 114 | //} 115 | // 116 | //func main(){ 117 | // go server() 118 | // go client() 119 | // time.Sleep(time.Second *10) 120 | //} -------------------------------------------------------------------------------- /iperf_tcp.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "io" 5 | "net" 6 | "os" 7 | "runtime" 8 | "strconv" 9 | "time" 10 | ) 11 | 12 | type tcp_proto struct{ 13 | } 14 | 15 | func (tcp *tcp_proto) name() string{ 16 | return TCP_NAME 17 | } 18 | 19 | func (tcp *tcp_proto) accept(test *iperf_test) (net.Conn, error){ 20 | log.Debugf("Enter TCP accept") 21 | conn, err := test.proto_listener.Accept() 22 | if err != nil{ 23 | return nil, err 24 | } 25 | return conn, err 26 | } 27 | 28 | func (tcp *tcp_proto) listen(test *iperf_test) (net.Listener, error){ 29 | log.Debugf("Enter TCP listen") 30 | // continue use the formal listener 31 | return test.listener, nil 32 | } 33 | 34 | func (tcp *tcp_proto) connect(test *iperf_test) (net.Conn, error){ 35 | log.Debugf("Enter TCP connect") 36 | tcpAddr, err := net.ResolveTCPAddr("tcp4", test.addr + ":" + strconv.Itoa(int(test.port))) 37 | if err != nil { 38 | return nil, err 39 | } 40 | conn, err := net.DialTCP("tcp", nil, tcpAddr) 41 | if err != nil { 42 | return nil, err 43 | } 44 | conn.SetDeadline(time.Now().Add(time.Duration(test.duration + 5)*time.Second)) 45 | return conn, nil 46 | } 47 | 48 | func (tcp *tcp_proto) send(sp *iperf_stream) int{ 49 | // write is blocking 50 | n, err := sp.conn.(*net.TCPConn).Write(sp.buffer) 51 | if err != nil { 52 | if serr, ok := err.(*net.OpError); ok{ 53 | log.Debugf("tcp conn already close = %v", serr) 54 | return -1 55 | } else if err == os.ErrClosed || err == io.ErrClosedPipe{ 56 | log.Debugf("send tcp socket close.") 57 | return -1 58 | } 59 | log.Errorf("tcp write err = %T %v",err, err) 60 | return -2 61 | } 62 | if n < 0 { 63 | return n 64 | } 65 | sp.result.bytes_sent += uint64(n) 66 | sp.result.bytes_sent_this_interval += uint64(n) 67 | //log.Debugf("tcp send %v bytes of total %v", n, sp.result.bytes_sent) 68 | return n 69 | } 70 | 71 | func (tcp *tcp_proto) recv(sp *iperf_stream) int{ 72 | // recv is blocking 73 | n, err := sp.conn.(*net.TCPConn).Read(sp.buffer) 74 | 75 | if err != nil { 76 | if serr, ok := err.(*net.OpError); ok{ 77 | log.Debugf("tcp conn already close = %v", serr) 78 | return -1 79 | } else if err == io.EOF || err == os.ErrClosed || err == io.ErrClosedPipe{ 80 | log.Debugf("recv tcp socket close. EOF") 81 | return -1 82 | } 83 | log.Errorf("tcp recv err = %T %v",err, err) 84 | return -2 85 | } 86 | if n < 0 { 87 | return n 88 | } 89 | if sp.test.state == TEST_RUNNING { 90 | sp.result.bytes_received += uint64(n) 91 | sp.result.bytes_received_this_interval += uint64(n) 92 | } 93 | //log.Debugf("tcp recv %v bytes of total %v", n, sp.result.bytes_received) 94 | return n 95 | } 96 | 97 | func (tcp *tcp_proto) init(test *iperf_test) int{ 98 | if test.no_delay == true { 99 | for _, sp := range test.streams{ 100 | err := sp.conn.(*net.TCPConn).SetNoDelay(test.no_delay) 101 | if err != nil { 102 | return -1 103 | } 104 | } 105 | } 106 | return 0 107 | } 108 | 109 | func (tcp *tcp_proto) stats_callback(test *iperf_test, sp *iperf_stream, temp_result *iperf_interval_results) int { 110 | if test.proto.name() == TCP_NAME && has_tcpInfo(){ // only linux has tcp info 111 | rp := sp.result 112 | save_tcpInfo(sp, temp_result) 113 | total_retrans := temp_result.interval_retrans // get the temporarily result 114 | temp_result.interval_retrans = total_retrans - rp.stream_prev_total_retrans 115 | rp.stream_retrans += temp_result.interval_retrans 116 | rp.stream_prev_total_retrans = total_retrans 117 | if rp.stream_min_rtt == 0 || temp_result.rtt < rp.stream_min_rtt { 118 | rp.stream_min_rtt = temp_result.rtt 119 | } 120 | if rp.stream_max_rtt == 0 || temp_result.rtt > rp.stream_max_rtt { 121 | rp.stream_max_rtt = temp_result.rtt 122 | } 123 | rp.stream_sum_rtt += temp_result.rtt 124 | rp.stream_cnt_rtt ++ 125 | } 126 | return 0 127 | } 128 | 129 | func (tcp *tcp_proto) teardown(test *iperf_test) int{ 130 | return 0 131 | } 132 | 133 | func has_tcpInfo() bool{ 134 | switch runtime.GOOS { 135 | case "windows": 136 | return false 137 | case "linux": 138 | return true 139 | default: 140 | return false 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # iperf-go 2 | 3 | Due to the high learning cost of the existing relatively complete network testing tools such as NS2 (network simulator), and the poor support for non-C / C + + written network protocols. The lightweight network test level tools, such as iperf3, do not give enough consideration to the horizontal expansion of supporting protocols. Now the latest version can only support TCP, UDP and STCP. However, there are hundreds of protocols in the application layer, and some developers have the need to test custom protocols. So iperf go is such a tool **by implementing a very simple interface, it can measure the network speed of user-defined protocols**. 4 | 5 | The implementation refers to the C source code of iperf3 and is implemented in go language. 6 | 7 | 由于现有的较为完备的网络测试工具如 NS2 (Network Simulator) 学习成本较高,而且对于非 c/c++ 写的网络协议支持性不好。 而轻量级网络测试级工具例如 iperf3,本身架构上对支持协议的横向扩展考虑得不够到位,现在最新版也只能支持 TCP,UDP,STCP 三种协议,然而实际上应用层有上百种协议,并且部分开发者有测试自定义协议的需求。 所以 iperf-go 就是这样一个工具,**通过实现极为简单的接口,就能够对自定义协议进行网络测速**。 8 | 9 | 实现参考了iperf3的C的源码,用Go语言实现。 10 | 11 | ## 使用方法 12 | 13 | ### TCP 测速 14 | 15 | 服务器端 16 | 17 | ./iperf-go -s 18 | 19 | 在默认参数下,在客户端启动对TCP测速 20 | 21 | ./iperf-go -c 22 | ./iperf-go -c -R // -R 表示server send, 测下行 23 | 24 | ### KCP 测速 25 | 26 | [KCP项目代码](https://github.com/xtaci/kcp-go) 27 | 28 | 服务器端 29 | 30 | ./iperf-go -s 31 | 32 | 在客户端启动对KCP测速 33 | 34 | ./iperf-go -c -proto kcp 35 | ./iperf-go -c -proto kcp -sw 512 -rw 512 // 设置发送接收窗口大小 36 | 37 | ### 其他参数 38 | 更加详细的参数参考 `./iperf-go -h` 39 | 40 | > ./iperf-go -h 41 | Usage of ./iperf-go: 42 | -D no delay option 43 | -P uint 44 | The number of simultaneous connections (default 1) 45 | -R reverse mode. client receive, server send 46 | -b uint 47 | bandwidth limit. (Mb/s) 48 | -c string 49 | client side (default "127.0.0.1") 50 | -d uint 51 | duration (s) (default 10) 52 | -debug 53 | debug mode 54 | -f uint 55 | flush interval for rudp (ms) (default 10) 56 | -fr uint 57 | rudp fast resend strategy. 0 indicate turn off fast resend 58 | -h this help 59 | -i uint 60 | test interval (ms) (default 1000) 61 | -info 62 | info mode 63 | -l uint 64 | send/read block size (default 4096) 65 | -nc 66 | no congestion control or BBR (default true) 67 | -p uint 68 | connect/listen port (default 5201) 69 | -proto string 70 | protocol under test (default "tcp") 71 | -rb uint 72 | read buffer size (Kb) (default 4096) 73 | -rw uint 74 | rudp receive window size (default 512) 75 | -s server side 76 | -sw uint 77 | rudp send window size (default 10) 78 | -wb uint 79 | write buffer size (Kb) (default 4096) 80 | 81 | ### 自定义协议测速 82 | 83 | 针对自己写的应用层协议,只需要实现几个简单的接口即可测速。 84 | 85 | type protocol interface {         86 | //name string         87 | name()  string         88 | accept(test *iperf_test) (net.Conn, error)         89 | listen(test *iperf_test) (net.Listener, error)         90 | connect(test *iperf_test) (net.Conn, error)         91 | send(test *iperf_stream) int         92 | recv(test *iperf_stream) int         93 | // init will be called before send/recv data 94 | init(test *iperf_test) int        95 | // teardown will be called before send/recv data 96 | teardown(test *iperf_test) int  97 | // stats_callback will be invoked intervally, please get some other statistics in this function 98 | stats_callback(test *iperf_test, sp *iperf_stream, temp_result *iperf_interval_results) int 99 | } 100 | 101 | 实现了这个接口之后,可获得最基本的带宽数据,对于 RTT,丢包或者其他自定义的统计变量,需要在 `stats_callback` 中从协议中获取 102 | 103 | 具体实现可参考已实现的 `iperf_rudp.go` 104 | 105 | ## 结果展示 106 | 107 | Server listening on 5201 108 | Accept connection from client: 221.4.34.225:22567 109 | [ ID] Interval Transfer Bandwidth RTT Retrans Retrans(%) 110 | [ 0] 0.00-1.00 sec 1.50 MB 12.00 Mb/s 172.6ms 0 0.00% 111 | [ 0] 1.00-2.00 sec 3.12 MB 25.00 Mb/s 169.1ms 0 0.00% 112 | [ 0] 2.00-3.00 sec 2.75 MB 22.00 Mb/s 205.3ms 378 19.14% 113 | [ 0] 3.00-4.00 sec 2.00 MB 16.00 Mb/s 164.8ms 1490 103.73% 114 | [ 0] 4.00-5.00 sec 4.75 MB 38.00 Mb/s 162.9ms 873 25.59% 115 | [ 0] 5.00-6.00 sec 1.25 MB 10.00 Mb/s 163.3ms 0 0.00% 116 | [ 0] 6.00-7.00 sec 2.50 MB 20.00 Mb/s 163.3ms 0 0.00% 117 | [ 0] 7.00-8.00 sec 2.62 MB 21.00 Mb/s 163.5ms 6 0.32% 118 | [ 0] 8.00-9.00 sec 2.38 MB 19.00 Mb/s 162.7ms 0 0.00% 119 | [ 0] 9.00-10.19 sec 2.50 MB 20.00 Mb/s 163.1ms 0 0.00% 120 | - - - - - - - - - - - - - - - - SUMMARY - - - - - - - - - - - - - - - - 121 | [ ID] Interval Transfer Bandwidth RTT Retrans Retrans(%) 122 | [ 0] 0.00-10.19 sec 25.50 MB 20.40 Mb/s 169.1ms 2747 0.15% [SENDER] 123 | 124 | 125 | ## exe binary 下载 126 | 127 | release: https://github.com/ZezhongWang/iperf-go/releases 128 | 129 | ## 内部状态机 130 | 131 | # stateMachine 132 | -------------------------------------------------------------------------------- /iperf_kcp.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | KCP "github.com/xtaci/kcp-go" 5 | "encoding/binary" 6 | "io" 7 | "net" 8 | "os" 9 | "strconv" 10 | "time" 11 | ) 12 | 13 | 14 | type kcp_proto struct{ 15 | } 16 | 17 | func (kcp *kcp_proto) name() string{ 18 | return KCP_NAME 19 | } 20 | 21 | func (kcp *kcp_proto) accept(test *iperf_test) (net.Conn, error){ 22 | log.Debugf("Enter KCP accept") 23 | conn, err := test.proto_listener.Accept() 24 | if err != nil{ 25 | return nil, err 26 | } 27 | buf := make([]byte, 4) 28 | n, err := conn.Read(buf) 29 | signal := binary.LittleEndian.Uint32(buf[:]) 30 | if err != nil || n != 4 || signal != ACCEPT_SIGNAL{ 31 | log.Errorf("KCP Receive Unexpected signal") 32 | } 33 | log.Debugf("KCP accept succeed. signal = %v", signal) 34 | return conn, nil 35 | } 36 | 37 | func (kcp *kcp_proto) listen(test *iperf_test) (net.Listener, error){ 38 | listener, err := KCP.ListenWithOptions(":" + strconv.Itoa(int(test.port)), nil, int(test.setting.data_shards), int(test.setting.parity_shards)) 39 | listener.SetReadBuffer(int(test.setting.read_buf_size)) // all income conn share the same underline packet conn, the buffer should be large 40 | listener.SetWriteBuffer(int(test.setting.write_buf_size)) 41 | 42 | if err != nil { 43 | return nil, err 44 | } 45 | return listener, nil 46 | } 47 | 48 | func (kcp *kcp_proto) connect(test *iperf_test) (net.Conn, error){ 49 | conn, err := KCP.DialWithOptions(test.addr + ":" + strconv.Itoa(int(test.port)), nil, int(test.setting.data_shards), int(test.setting.parity_shards)) 50 | if err != nil { 51 | return nil, err 52 | } 53 | buf := make([]byte, 4) 54 | binary.LittleEndian.PutUint32(buf, ACCEPT_SIGNAL) 55 | n, err := conn.Write(buf) 56 | if err != nil || n != 4 { 57 | log.Errorf("KCP send accept signal failed") 58 | } 59 | log.Debugf("KCP connect succeed.") 60 | return conn, nil 61 | } 62 | 63 | func (kcp *kcp_proto) send(sp *iperf_stream) int{ 64 | n, err := sp.conn.(*KCP.UDPSession).Write(sp.buffer) 65 | if err != nil { 66 | if serr, ok := err.(*net.OpError); ok{ 67 | log.Debugf("kcp conn already close = %v", serr) 68 | return -1 69 | } else if err.Error() == "broken pipe"{ 70 | log.Debugf("kcp conn already close = %v", err.Error()) 71 | return -1 72 | } else if err == os.ErrClosed || err == io.ErrClosedPipe{ 73 | log.Debugf("send kcp socket close.") 74 | return -1 75 | } 76 | log.Errorf("kcp write err = %T %v",err, err) 77 | return -2 78 | } 79 | if n < 0 { 80 | log.Errorf("kcp write err. n = %v" ,n) 81 | return n 82 | } 83 | sp.result.bytes_sent += uint64(n) 84 | sp.result.bytes_sent_this_interval += uint64(n) 85 | //log.Debugf("KCP send %v bytes of total %v", n, sp.result.bytes_sent) 86 | return n 87 | } 88 | 89 | func (kcp *kcp_proto) recv(sp *iperf_stream) int{ 90 | // recv is blocking 91 | n, err := sp.conn.(*KCP.UDPSession).Read(sp.buffer) 92 | 93 | if err != nil { 94 | if serr, ok := err.(*net.OpError); ok{ 95 | log.Debugf("kcp conn already close = %v", serr) 96 | return -1 97 | } else if err.Error() == "broken pipe"{ 98 | log.Debugf("kcp conn already close = %v", err.Error()) 99 | return -1 100 | } else if err == io.EOF || err == os.ErrClosed || err == io.ErrClosedPipe{ 101 | log.Debugf("recv kcp socket close. EOF") 102 | return -1 103 | } 104 | log.Errorf("kcp recv err = %T %v",err, err) 105 | return -2 106 | } 107 | if n < 0 { 108 | return n 109 | } 110 | if sp.test.state == TEST_RUNNING { 111 | sp.result.bytes_received += uint64(n) 112 | sp.result.bytes_received_this_interval += uint64(n) 113 | } 114 | //log.Debugf("KCP recv %v bytes of total %v", n, sp.result.bytes_received) 115 | return n 116 | } 117 | 118 | func (kcp *kcp_proto) init(test *iperf_test) int{ 119 | for _, sp := range test.streams { 120 | sp.conn.(*KCP.UDPSession).SetReadBuffer(int(test.setting.read_buf_size)) 121 | sp.conn.(*KCP.UDPSession).SetWriteBuffer(int(test.setting.write_buf_size)) 122 | sp.conn.(*KCP.UDPSession).SetWindowSize(int(test.setting.snd_wnd), int(test.setting.rcv_wnd)) 123 | sp.conn.(*KCP.UDPSession).SetStreamMode(true) 124 | sp.conn.(*KCP.UDPSession).SetDSCP(46) 125 | sp.conn.(*KCP.UDPSession).SetMtu(1400) 126 | sp.conn.(*KCP.UDPSession).SetACKNoDelay(false) 127 | sp.conn.(*KCP.UDPSession).SetDeadline(time.Now().Add(time.Minute)) 128 | var no_delay, resend, nc int 129 | if test.no_delay { 130 | no_delay = 1 131 | } else { 132 | no_delay = 0 133 | } 134 | if test.setting.no_cong { 135 | nc = 1 136 | } else { 137 | nc = 0 138 | } 139 | resend = int(test.setting.fast_resend) 140 | sp.conn.(*KCP.UDPSession).SetNoDelay(no_delay, int(test.setting.flush_interval), resend, nc) 141 | } 142 | return 0 143 | } 144 | 145 | func (kcp *kcp_proto) stats_callback(test *iperf_test, sp *iperf_stream, temp_result *iperf_interval_results) int { 146 | rp := sp.result 147 | total_retrans := uint(KCP.DefaultSnmp.RetransSegs) 148 | total_lost := uint(KCP.DefaultSnmp.LostSegs) 149 | total_early_retrans := uint(KCP.DefaultSnmp.EarlyRetransSegs) 150 | total_fast_retrans := uint(KCP.DefaultSnmp.FastRetransSegs) 151 | total_recovers := uint(KCP.DefaultSnmp.FECRecovered) 152 | total_in_pkts := uint(KCP.DefaultSnmp.InPkts) 153 | total_in_segs := uint(KCP.DefaultSnmp.InSegs) 154 | total_out_pkts := uint(KCP.DefaultSnmp.OutPkts) 155 | total_out_segs := uint(KCP.DefaultSnmp.OutSegs) 156 | // retrans 157 | temp_result.interval_retrans = total_retrans - rp.stream_prev_total_retrans 158 | rp.stream_retrans += temp_result.interval_retrans 159 | rp.stream_prev_total_retrans = total_retrans 160 | // lost 161 | temp_result.interval_lost = total_lost - rp.stream_prev_total_lost 162 | rp.stream_lost += temp_result.interval_lost 163 | rp.stream_prev_total_lost = total_lost 164 | // early retrans 165 | temp_result.interval_early_retrans = total_early_retrans - rp.stream_prev_total_early_retrans 166 | rp.stream_early_retrans += temp_result.interval_early_retrans 167 | rp.stream_prev_total_early_retrans = total_early_retrans 168 | // fast retrans 169 | temp_result.interval_fast_retrans = total_fast_retrans - rp.stream_prev_total_fast_retrans 170 | rp.stream_fast_retrans += temp_result.interval_fast_retrans 171 | rp.stream_prev_total_fast_retrans = total_fast_retrans 172 | // recover 173 | rp.stream_recovers = total_recovers 174 | // packets receive 175 | rp.stream_in_pkts = total_in_pkts 176 | rp.stream_out_pkts = total_out_pkts 177 | // segs receive 178 | rp.stream_in_segs = total_in_segs 179 | rp.stream_out_segs = total_out_segs 180 | 181 | temp_result.rtt = sp.conn.(*KCP.UDPSession).GetRTT() * 1000 // ms to micro sec 182 | if rp.stream_min_rtt == 0 || temp_result.rtt < rp.stream_min_rtt { 183 | rp.stream_min_rtt = temp_result.rtt 184 | } 185 | if rp.stream_max_rtt == 0 || temp_result.rtt > rp.stream_max_rtt { 186 | rp.stream_max_rtt = temp_result.rtt 187 | } 188 | rp.stream_sum_rtt += temp_result.rtt 189 | rp.stream_cnt_rtt ++ 190 | return 0 191 | } 192 | 193 | func (kcp *kcp_proto) teardown(test *iperf_test) int{ 194 | return 0 195 | } -------------------------------------------------------------------------------- /iperf_rudp.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | RUDP "../rudp-go" 5 | "encoding/binary" 6 | "fmt" 7 | "github.com/op/go-logging" 8 | "io" 9 | "net" 10 | "os" 11 | "strconv" 12 | ) 13 | 14 | 15 | type rudp_proto struct{ 16 | } 17 | 18 | func (rudp *rudp_proto) name() string{ 19 | return RUDP_NAME 20 | } 21 | 22 | func (rudp *rudp_proto) accept(test *iperf_test) (net.Conn, error){ 23 | log.Debugf("Enter RUDP accept") 24 | conn, err := test.proto_listener.Accept() 25 | if err != nil{ 26 | return nil, err 27 | } 28 | buf := make([]byte, 4) 29 | n, err := conn.Read(buf) 30 | signal := binary.LittleEndian.Uint32(buf[:]) 31 | if err != nil || n != 4 || signal != ACCEPT_SIGNAL{ 32 | log.Errorf("RUDP Receive Unexpected signal") 33 | } 34 | log.Debugf("RUDP accept succeed. signal = %v", signal) 35 | return conn, nil 36 | } 37 | 38 | func (rudp *rudp_proto) listen(test *iperf_test) (net.Listener, error){ 39 | listener, err := RUDP.ListenWithOptions(":" + strconv.Itoa(int(test.port)), int(test.setting.data_shards), int(test.setting.parity_shards)) 40 | listener.SetReadBuffer(int(test.setting.read_buf_size)) // all income conn share the same underline packet conn, the buffer should be large 41 | listener.SetWriteBuffer(int(test.setting.write_buf_size)) 42 | 43 | if err != nil { 44 | return nil, err 45 | } 46 | return listener, nil 47 | } 48 | 49 | func (rudp *rudp_proto) connect(test *iperf_test) (net.Conn, error){ 50 | conn, err := RUDP.ConnectRUDP(test.addr + ":" + strconv.Itoa(int(test.port)), int(test.setting.data_shards), int(test.setting.parity_shards)) 51 | if err != nil { 52 | return nil, err 53 | } 54 | buf := make([]byte, 4) 55 | binary.LittleEndian.PutUint32(buf, ACCEPT_SIGNAL) 56 | n, err := conn.Write(buf) 57 | if err != nil || n != 4 { 58 | log.Errorf("RUDP send accept signal failed") 59 | } 60 | log.Debugf("RUDP connect succeed.") 61 | return conn, nil 62 | } 63 | 64 | func (rudp *rudp_proto) send(sp *iperf_stream) int{ 65 | n, err := sp.conn.(*RUDP.RUDPSession).Write(sp.buffer) 66 | if err != nil { 67 | if serr, ok := err.(*net.OpError); ok{ 68 | log.Debugf("rudp conn already close = %v", serr) 69 | return -1 70 | } else if err.Error() == "broken pipe"{ 71 | log.Debugf("rudp conn already close = %v", err.Error()) 72 | return -1 73 | } else if err == os.ErrClosed || err == io.ErrClosedPipe{ 74 | log.Debugf("send rudp socket close.") 75 | return -1 76 | } 77 | log.Errorf("rudp write err = %T %v",err, err) 78 | return -2 79 | } 80 | if n < 0 { 81 | log.Errorf("rudp write err. n = %v" ,n) 82 | return n 83 | } 84 | sp.result.bytes_sent += uint64(n) 85 | sp.result.bytes_sent_this_interval += uint64(n) 86 | //log.Debugf("RUDP send %v bytes of total %v", n, sp.result.bytes_sent) 87 | return n 88 | } 89 | 90 | func (rudp *rudp_proto) recv(sp *iperf_stream) int{ 91 | // recv is blocking 92 | n, err := sp.conn.(*RUDP.RUDPSession).Read(sp.buffer) 93 | 94 | if err != nil { 95 | if serr, ok := err.(*net.OpError); ok{ 96 | log.Debugf("rudp conn already close = %v", serr) 97 | return -1 98 | } else if err.Error() == "broken pipe"{ 99 | log.Debugf("rudp conn already close = %v", err.Error()) 100 | return -1 101 | } else if err == io.EOF || err == os.ErrClosed || err == io.ErrClosedPipe{ 102 | log.Debugf("recv rudp socket close. EOF") 103 | return -1 104 | } 105 | log.Errorf("rudp recv err = %T %v",err, err) 106 | return -2 107 | } 108 | if n < 0 { 109 | return n 110 | } 111 | if sp.test.state == TEST_RUNNING { 112 | sp.result.bytes_received += uint64(n) 113 | sp.result.bytes_received_this_interval += uint64(n) 114 | } 115 | //log.Debugf("RUDP recv %v bytes of total %v", n, sp.result.bytes_received) 116 | return n 117 | } 118 | 119 | func (rudp *rudp_proto) init(test *iperf_test) int{ 120 | for _, sp := range test.streams { 121 | sp.conn.(*RUDP.RUDPSession).SetReadBuffer(int(test.setting.read_buf_size)) 122 | sp.conn.(*RUDP.RUDPSession).SetWriteBuffer(int(test.setting.write_buf_size)) 123 | sp.conn.(*RUDP.RUDPSession).SetWindowSize(int(test.setting.snd_wnd), int(test.setting.rcv_wnd)) 124 | sp.conn.(*RUDP.RUDPSession).SetStreamMode(true) 125 | var no_delay, resend, nc int 126 | if test.no_delay { 127 | no_delay = 1 128 | } else { 129 | no_delay = 0 130 | } 131 | if test.setting.no_cong { 132 | nc = 1 133 | } else { 134 | nc = 0 135 | } 136 | resend = int(test.setting.fast_resend) 137 | sp.conn.(*RUDP.RUDPSession).SetNoDelay(no_delay, int(test.setting.flush_interval), resend, nc) 138 | } 139 | return 0 140 | } 141 | 142 | func (rudp *rudp_proto) stats_callback(test *iperf_test, sp *iperf_stream, temp_result *iperf_interval_results) int { 143 | rp := sp.result 144 | total_retrans := uint(RUDP.DefaultSnmp.RetransSegs) 145 | total_lost := uint(RUDP.DefaultSnmp.LostSegs) 146 | total_early_retrans := uint(RUDP.DefaultSnmp.EarlyRetransSegs) 147 | total_fast_retrans := uint(RUDP.DefaultSnmp.FastRetransSegs) 148 | total_recovers := uint(RUDP.DefaultSnmp.FECRecovered) 149 | total_in_pkts := uint(RUDP.DefaultSnmp.InPkts) 150 | total_in_segs := uint(RUDP.DefaultSnmp.InSegs) 151 | total_out_pkts := uint(RUDP.DefaultSnmp.OutPkts) 152 | total_out_segs := uint(RUDP.DefaultSnmp.OutSegs) 153 | // retrans 154 | temp_result.interval_retrans = total_retrans - rp.stream_prev_total_retrans 155 | rp.stream_retrans += temp_result.interval_retrans 156 | rp.stream_prev_total_retrans = total_retrans 157 | // lost 158 | temp_result.interval_lost = total_lost - rp.stream_prev_total_lost 159 | rp.stream_lost += temp_result.interval_lost 160 | rp.stream_prev_total_lost = total_lost 161 | // early retrans 162 | temp_result.interval_early_retrans = total_early_retrans - rp.stream_prev_total_early_retrans 163 | rp.stream_early_retrans += temp_result.interval_early_retrans 164 | rp.stream_prev_total_early_retrans = total_early_retrans 165 | // fast retrans 166 | temp_result.interval_fast_retrans = total_fast_retrans - rp.stream_prev_total_fast_retrans 167 | rp.stream_fast_retrans += temp_result.interval_fast_retrans 168 | rp.stream_prev_total_fast_retrans = total_fast_retrans 169 | // recover 170 | rp.stream_recovers = total_recovers 171 | // packets receive 172 | rp.stream_in_pkts = total_in_pkts 173 | rp.stream_out_pkts = total_out_pkts 174 | // segs receive 175 | rp.stream_in_segs = total_in_segs 176 | rp.stream_out_segs = total_out_segs 177 | 178 | temp_result.rto = sp.conn.(*RUDP.RUDPSession).GetRTO() * 1000 179 | temp_result.rtt = sp.conn.(*RUDP.RUDPSession).GetRTT() * 1000 // ms to micro sec 180 | if rp.stream_min_rtt == 0 || temp_result.rtt < rp.stream_min_rtt { 181 | rp.stream_min_rtt = temp_result.rtt 182 | } 183 | if rp.stream_max_rtt == 0 || temp_result.rtt > rp.stream_max_rtt { 184 | rp.stream_max_rtt = temp_result.rtt 185 | } 186 | rp.stream_sum_rtt += temp_result.rtt 187 | rp.stream_cnt_rtt ++ 188 | return 0 189 | } 190 | 191 | func (rudp *rudp_proto)teardown(test *iperf_test) int{ 192 | if logging.GetLevel("rudp") == logging.INFO || 193 | logging.GetLevel("rudp") == logging.DEBUG{ 194 | header := RUDP.DefaultSnmp.Header() 195 | slices := RUDP.DefaultSnmp.ToSlice() 196 | for k := range header{ 197 | fmt.Printf("%s: %v\t", header[k], slices[k]) 198 | } 199 | fmt.Printf("\n") 200 | if test.setting.no_cong == false { 201 | RUDP.PrintTracker() 202 | } 203 | } 204 | return 0 205 | } -------------------------------------------------------------------------------- /iperf_client.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/binary" 5 | "fmt" 6 | "io" 7 | "net" 8 | "os" 9 | "strconv" 10 | "time" 11 | ) 12 | 13 | func (test *iperf_test) create_streams() int { 14 | for i := uint(0) ; i < test.stream_num ; i ++ { 15 | conn, err := test.proto.connect(test) 16 | if err != nil { 17 | log.Errorf("Connect failed. err = %v", err) 18 | return -1 19 | } 20 | var sp *iperf_stream 21 | if test.mode == IPERF_SENDER { 22 | sp = test.new_stream(conn, SENDER_STREAM) 23 | } else { 24 | sp = test.new_stream(conn, RECEIVER_STREAM) 25 | } 26 | test.streams = append(test.streams, sp) 27 | } 28 | return 0 29 | } 30 | 31 | func (test *iperf_test) create_client_timer() int { 32 | now := time.Now() 33 | cd := TimerClientData{p: test} 34 | test.timer = timer_create(now, client_timer_proc, cd, test.duration * 1000 ) // convert sec to ms 35 | times := test.duration * 1000 / test.interval 36 | test.stats_ticker = ticker_create(now, client_stats_ticker_proc, cd, test.interval, times - 1) 37 | test.report_ticker = ticker_create(now, client_report_ticker_proc, cd, test.interval, times - 1) 38 | if test.timer.timer == nil || test.stats_ticker.ticker == nil || test.report_ticker.ticker == nil { 39 | log.Error("timer create failed.") 40 | } 41 | return 0 42 | } 43 | 44 | func client_timer_proc(data TimerClientData, now time.Time){ 45 | log.Debugf("Enter client_timer_proc") 46 | test := data.p.(*iperf_test) 47 | test.timer.done <- true 48 | test.done = true // will end send/recv in iperf_send/iperf_recv, and then triggered TEST_END 49 | test.timer.timer = nil 50 | } 51 | 52 | func client_stats_ticker_proc(data TimerClientData, now time.Time){ 53 | test := data.p.(*iperf_test) 54 | if test.done{ 55 | return 56 | } 57 | if test.stats_callback != nil{ 58 | test.stats_callback(test) 59 | } 60 | } 61 | 62 | func client_report_ticker_proc(data TimerClientData, now time.Time){ 63 | test := data.p.(*iperf_test) 64 | if test.done{ 65 | return 66 | } 67 | if test.reporter_callback != nil{ 68 | test.reporter_callback(test) 69 | } 70 | } 71 | 72 | func (test *iperf_test) create_client_omit_timer() int { 73 | // undo, depend on which kind of timer 74 | return 0 75 | } 76 | 77 | func send_ticker_proc(data TimerClientData, now time.Time){ 78 | sp := data.p.(*iperf_stream) 79 | sp.test.check_throttle(sp, now) 80 | } 81 | 82 | func (test *iperf_test) client_end(){ 83 | log.Debugf("Enter client_end") 84 | for _, sp := range test.streams{ 85 | sp.conn.Close() 86 | } 87 | if test.reporter_callback != nil { // call only after exchange_result finish 88 | test.reporter_callback(test) 89 | } 90 | test.proto.teardown(test) 91 | if test.set_send_state(IPERF_DONE) < 0 { 92 | log.Errorf("set_send_state failed") 93 | } 94 | 95 | log.Infof("Client Enter IPerf Done...") 96 | if test.ctrl_conn != nil { 97 | test.ctrl_conn.Close() 98 | } 99 | } 100 | 101 | func (test *iperf_test) handleClientCtrlMsg() { 102 | buf := make([]byte, 4) 103 | for { 104 | if n, err := test.ctrl_conn.Read(buf); err==nil{ 105 | state := binary.LittleEndian.Uint32(buf[:]) 106 | log.Debugf("Client Ctrl conn receive n = %v state = [%v]", n, state) 107 | //state, err := strconv.Atoi(string(buf[:n])) 108 | 109 | //if err != nil { 110 | // log.Errorf("Convert string to int failed. s = %v", string(buf[:n])) 111 | // return 112 | //} 113 | test.state = uint(state) 114 | log.Infof("Client Enter %v state...", test.state) 115 | } else { 116 | if serr, ok := err.(*net.OpError); ok{ 117 | log.Info("Client control connection close. err = %T %v", serr, serr) 118 | test.ctrl_conn.Close() 119 | } else if err == os.ErrClosed || err == io.ErrClosedPipe || err == io.EOF{ 120 | log.Info("Client control connection close. err = %T %v", serr, serr) 121 | } else { 122 | log.Errorf("ctrl_conn read failed. err=%T, %v", err, err) 123 | test.ctrl_conn.Close() 124 | } 125 | return 126 | } 127 | 128 | switch test.state { 129 | case IPERF_EXCHANGE_PARAMS: 130 | if rtn := test.exchange_params(); rtn < 0 { 131 | log.Errorf("exchange_params failed. rtn = %v", rtn) 132 | return 133 | } 134 | case IPERF_CREATE_STREAM: 135 | if rtn := test.create_streams(); rtn < 0 { 136 | log.Errorf("create_streams failed. rtn = %v", rtn) 137 | return 138 | } 139 | case TEST_START: 140 | // handle test start 141 | if rtn := test.init_test(); rtn < 0 { 142 | log.Errorf("init_test failed. rtn = %v", rtn) 143 | return 144 | } 145 | if rtn := test.create_client_timer(); rtn < 0 { 146 | log.Errorf("create_client_timer failed. rtn = %v", rtn) 147 | return 148 | } 149 | if rtn := test.create_client_omit_timer(); rtn < 0 { 150 | log.Errorf("create_client_omit_timer failed. rtn = %v", rtn) 151 | return 152 | } 153 | if test.mode == IPERF_SENDER{ 154 | if rtn := test.create_sender_ticker(); rtn < 0 { 155 | log.Errorf("create_sender_ticker failed. rtn = %v", rtn) 156 | return 157 | } 158 | } 159 | case TEST_RUNNING: 160 | test.ctrl_chan <- TEST_RUNNING 161 | break 162 | case IPERF_EXCHANGE_RESULT: 163 | if rtn := test.exchange_results(); rtn < 0 { 164 | log.Errorf("exchange_results failed. rtn = %v", rtn) 165 | return 166 | } 167 | case IPERF_DISPLAY_RESULT: 168 | test.client_end() 169 | case IPERF_DONE: 170 | break 171 | case SERVER_TERMINATE: 172 | old_state := test.state 173 | test.state = IPERF_DISPLAY_RESULT 174 | test.reporter_callback(test) 175 | test.state = old_state 176 | default: 177 | log.Errorf("Unexpected situation with state = %v.", test.state) 178 | return 179 | } 180 | } 181 | } 182 | 183 | func (test *iperf_test) ConnectServer() int{ 184 | tcpAddr, err := net.ResolveTCPAddr("tcp4", test.addr + ":" + strconv.Itoa(int(test.port))) 185 | if err != nil { 186 | log.Errorf("Resolve TCP Addr failed. err = %v, addr = %v", err, test.addr + strconv.Itoa(int(test.port))) 187 | return -1 188 | } 189 | conn, err := net.DialTCP("tcp", nil, tcpAddr) 190 | if err != nil { 191 | log.Errorf("Connect TCP Addr failed. err = %v, addr = %v", err, test.addr + strconv.Itoa(int(test.port))) 192 | return -1 193 | } 194 | test.ctrl_conn = conn 195 | fmt.Printf("Connect to server %v succeed.\n", test.addr + ":" + strconv.Itoa(int(test.port))) 196 | return 0 197 | } 198 | func (test *iperf_test) run_client() int{ 199 | 200 | rtn := test.ConnectServer() 201 | if rtn < 0 { 202 | log.Errorf("ConnectServer failed") 203 | return -1 204 | } 205 | 206 | go test.handleClientCtrlMsg() 207 | 208 | var is_iperf_done bool = false 209 | var test_end_num uint = 0 210 | for is_iperf_done != true { 211 | select { 212 | case state := <-test.ctrl_chan: 213 | if state == TEST_RUNNING { 214 | // set non-block for non-UDP test. unfinished 215 | // Regular mode. Client sends. 216 | log.Info("Client enter Test Running state...") 217 | for i, sp := range test.streams{ 218 | if sp.role == SENDER_STREAM { 219 | go sp.iperf_send(test) 220 | log.Infof("Client Stream %v start sending.", i) 221 | } else { 222 | go sp.iperf_recv(test) 223 | log.Infof("Client Stream %v start receiving.", i) 224 | } 225 | } 226 | log.Info("Create all streams finish...") 227 | } else if state == TEST_END { 228 | test_end_num ++ 229 | if test_end_num < test.stream_num || test_end_num == test.stream_num + 1{ // redundant TEST_END signal generate by set_send_state 230 | continue 231 | } else if test_end_num > test.stream_num + 1{ 232 | log.Errorf("Receive more TEST_END signal than expected") 233 | return -1 234 | } 235 | log.Infof("Client all Stream closed.") 236 | // test_end_num == test.stream_num. all the stream send TEST_END signal 237 | test.done = true 238 | if test.stats_callback != nil { 239 | test.stats_callback(test) 240 | } 241 | if test.set_send_state(TEST_END) < 0 { 242 | log.Errorf("set_send_state failed. %v", TEST_END) 243 | return -1 244 | } 245 | log.Info("Client Enter Test End State.") 246 | } else if state == IPERF_DONE { 247 | is_iperf_done = true 248 | } else { 249 | log.Debugf("Channel Unhandle state [%v]", state) 250 | } 251 | } 252 | } 253 | return 0 254 | } -------------------------------------------------------------------------------- /iperf_server.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/binary" 5 | "fmt" 6 | "io" 7 | "net" 8 | "os" 9 | "strconv" 10 | "time" 11 | ) 12 | 13 | func (test *iperf_test) server_listen() int{ 14 | listen_addr := ":" 15 | listen_addr += strconv.Itoa(int(test.port)) 16 | var err error 17 | test.listener, err = net.Listen("tcp", listen_addr) 18 | if err != nil { 19 | return -1 20 | } 21 | fmt.Printf("Server listening on %v\n", test.port) 22 | return 0 23 | } 24 | 25 | func (test *iperf_test) handleServerCtrlMsg() { 26 | buf := make([]byte, 4) // only for ctrl state 27 | for { 28 | if n, err := test.ctrl_conn.Read(buf); err==nil{ 29 | state := binary.LittleEndian.Uint32(buf[:]) 30 | log.Debugf("Ctrl conn receive n = %v state = [%v]", n, state) 31 | //if err != nil { 32 | // log.Errorf("Convert string to int failed. s = %v", string(buf[:n])) 33 | // return 34 | //} 35 | test.state = uint(state) 36 | } else { 37 | if serr, ok := err.(*net.OpError); ok{ 38 | log.Info("Client control connection close. err = %T %v", serr, serr) 39 | test.ctrl_conn.Close() 40 | } else if err == os.ErrClosed || err == io.ErrClosedPipe || err == io.EOF{ 41 | log.Info("Client control connection close. err = %T %v", serr, serr) 42 | } else { 43 | log.Errorf("ctrl_conn read failed. err=%T, %v", err, err) 44 | test.ctrl_conn.Close() 45 | } 46 | return 47 | } 48 | 49 | switch test.state { 50 | case TEST_START: 51 | break 52 | case TEST_END: 53 | log.Infof("Server Enter Test End state...") 54 | test.done = true 55 | if test.stats_callback != nil { 56 | test.stats_callback(test) 57 | } 58 | test.close_all_streams() 59 | 60 | /* exchange result mode */ 61 | if test.set_send_state(IPERF_EXCHANGE_RESULT) < 0 { 62 | log.Errorf("set_send_state error") 63 | return 64 | } 65 | log.Infof("Server Enter Exchange Result state...") 66 | if test.exchange_results() < 0{ 67 | log.Errorf("exchange result failed") 68 | return 69 | } 70 | 71 | /* display result mode */ 72 | if test.set_send_state(IPERF_DISPLAY_RESULT) < 0 { 73 | log.Errorf("set_send_state error") 74 | return 75 | } 76 | log.Infof("Server Enter Display Result state...") 77 | if test.reporter_callback != nil { // why call these again 78 | test.reporter_callback(test) 79 | } 80 | //if test.display_results() < 0 { 81 | // log.Errorf("display result failed") 82 | // return 83 | //} 84 | // on_test_finish undo 85 | case IPERF_DONE: 86 | test.state = IPERF_DONE 87 | log.Debugf("Server reach IPERF_DONE") 88 | test.ctrl_chan <- IPERF_DONE 89 | test.proto.teardown(test) 90 | return 91 | case CLIENT_TERMINATE: //not used yet 92 | old_state := test.state 93 | test.state = IPERF_DISPLAY_RESULT 94 | test.reporter_callback(test) 95 | test.state = old_state 96 | 97 | test.close_all_streams() 98 | log.Infof("Client is terminated.") 99 | test.state = IPERF_DONE 100 | break 101 | default: 102 | log.Errorf("Unexpected situation with state = %v.", test.state) 103 | return 104 | } 105 | } 106 | } 107 | 108 | func (test *iperf_test) create_server_timer() int { 109 | now := time.Now() 110 | cd := TimerClientData{p: test} 111 | test.timer = timer_create(now, server_timer_proc, cd, (test.duration + 5) * 1000) // convert sec to ms, add 5 sec to ensure client end first 112 | times := test.duration * 1000 / test.interval 113 | test.stats_ticker = ticker_create(now, server_stats_ticker_proc, cd, test.interval, times - 1) 114 | test.report_ticker = ticker_create(now, server_report_ticker_proc, cd, test.interval, times - 1) 115 | if test.timer.timer == nil || test.stats_ticker.ticker == nil || test.report_ticker.ticker == nil { 116 | log.Error("timer create failed.") 117 | } 118 | return 0 119 | } 120 | 121 | func server_timer_proc(data TimerClientData, now time.Time){ 122 | log.Debugf("Enter server_timer_proc") 123 | test := data.p.(*iperf_test) 124 | if test.done { 125 | return 126 | } 127 | test.done = true 128 | // close all streams 129 | for _, sp := range test.streams{ 130 | sp.conn.Close() 131 | } 132 | test.timer.done <- true 133 | //test.ctrl_conn.Close() // ctrl conn should be closed at last 134 | //log.Infof("Server exceed duration. Close control connection.") 135 | } 136 | 137 | func server_stats_ticker_proc(data TimerClientData, now time.Time){ 138 | test := data.p.(*iperf_test) 139 | if test.done{ 140 | return 141 | } 142 | if test.stats_callback != nil{ 143 | test.stats_callback(test) 144 | } 145 | } 146 | 147 | func server_report_ticker_proc(data TimerClientData, now time.Time){ 148 | test := data.p.(*iperf_test) 149 | if test.done{ 150 | return 151 | } 152 | if test.reporter_callback != nil{ 153 | test.reporter_callback(test) 154 | } 155 | } 156 | 157 | func (test *iperf_test) create_server_omit_timer() int { 158 | // undo, depend on which kind of timer 159 | return 0 160 | } 161 | 162 | 163 | func (test *iperf_test) run_server() int{ 164 | log.Debugf("Enter run_server") 165 | 166 | if test.server_listen() < 0 { 167 | log.Error("Listen failed") 168 | return -1 169 | } 170 | test.state = IPERF_START 171 | log.Info("Enter Iperf start state...") 172 | // start 173 | conn, err := test.listener.Accept() 174 | if err != nil { 175 | log.Error("Accept failed") 176 | return -2 177 | } 178 | test.ctrl_conn = conn 179 | fmt.Printf("Accept connection from client: %v\n", conn.RemoteAddr()) 180 | // exchange params 181 | if test.set_send_state(IPERF_EXCHANGE_PARAMS) < 0 { 182 | log.Error("set_send_state error.") 183 | return -3 184 | } 185 | log.Info("Enter Exchange Params state...") 186 | 187 | if test.exchange_params() < 0 { 188 | log.Error("exchange params failed.") 189 | return -3 190 | } 191 | 192 | go test.handleServerCtrlMsg() // coroutine handle control msg 193 | 194 | if test.is_server == true { 195 | listener, err := test.proto.listen(test) 196 | if err != nil { 197 | log.Error("proto listen error.") 198 | return -4 199 | } 200 | test.proto_listener = listener 201 | } 202 | 203 | // create streams 204 | if test.set_send_state(IPERF_CREATE_STREAM) < 0 { 205 | log.Error("set_send_state error.") 206 | return -3 207 | } 208 | log.Info("Enter Create Stream state...") 209 | 210 | var is_iperf_done bool = false 211 | for is_iperf_done != true{ 212 | select { 213 | case state := <- test.ctrl_chan: 214 | log.Debugf("Ctrl channel receive state [%v]", state) 215 | if state == IPERF_DONE{ 216 | return 0 217 | } else if state == IPERF_CREATE_STREAM { 218 | var stream_num uint = 0 219 | for stream_num < test.stream_num{ 220 | proto_conn, err := test.proto.accept(test) 221 | if err != nil { 222 | log.Error("proto accept error.") 223 | return -4 224 | } 225 | stream_num ++ 226 | var sp *iperf_stream 227 | if test.mode == IPERF_SENDER { 228 | sp = test.new_stream(proto_conn, SENDER_STREAM) 229 | } else { 230 | sp = test.new_stream(proto_conn, RECEIVER_STREAM) 231 | } 232 | 233 | if sp == nil { 234 | log.Error("Create new strema failed.") 235 | return -4 236 | } 237 | test.streams = append(test.streams, sp) 238 | log.Debugf("create new stream, stream_num = %v, target stream num = %v", stream_num, test.stream_num) 239 | } 240 | if stream_num == test.stream_num { 241 | if test.set_send_state(TEST_START) != 0{ 242 | log.Errorf("set_send_state error") 243 | return -5 244 | } 245 | log.Info("Enter Test Start state...") 246 | if test.init_test() < 0 { 247 | log.Errorf("Init test failed.") 248 | return -5 249 | } 250 | if test.create_server_timer() < 0 { 251 | log.Errorf("Create Server timer failed.") 252 | return -6 253 | } 254 | if test.create_server_omit_timer() < 0{ 255 | log.Errorf("Create Server Omit timer failed.") 256 | return -7 257 | } 258 | if test.mode == IPERF_SENDER{ 259 | if rtn := test.create_sender_ticker(); rtn < 0 { 260 | log.Errorf("create_sender_ticker failed. rtn = %v", rtn) 261 | return -7 262 | } 263 | } 264 | if test.set_send_state(TEST_RUNNING) != 0{ 265 | log.Errorf("set_send_state error") 266 | return -8 267 | } 268 | } 269 | } else if state == TEST_RUNNING{ 270 | // Regular mode. Server receives. 271 | log.Info("Enter Test Running state...") 272 | for i, sp := range test.streams{ 273 | if sp.role == SENDER_STREAM { 274 | go sp.iperf_send(test) 275 | log.Infof("Server Stream %v start sending.", i) 276 | } else { 277 | go sp.iperf_recv(test) 278 | log.Infof("Server Stream %v start receiving.", i) 279 | } 280 | } 281 | log.Info("Server all streams start...") 282 | } else if state == TEST_END { 283 | continue 284 | } else if state == IPERF_DONE{ 285 | is_iperf_done = true 286 | } else { 287 | log.Debugf("Channel Unhandle state [%v]", state) 288 | } 289 | } 290 | } 291 | log.Debugf("Server side done.") 292 | return 0 293 | } 294 | 295 | 296 | -------------------------------------------------------------------------------- /iperf.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "time" 7 | ) 8 | 9 | var PROTOCOL_LIST = []string{"tcp", "udp", "rudp", "kcp"} 10 | 11 | const( 12 | IPERF_START = 1 13 | IPERF_DONE = 2 14 | IPERF_CREATE_STREAM = 3 15 | IPERF_EXCHANGE_PARAMS = 4 16 | IPERF_EXCHANGE_RESULT = 5 17 | IPERF_DISPLAY_RESULT = 6 18 | 19 | STREAM_CLOSE = 10 20 | 21 | SENDER_STREAM = 20 22 | RECEIVER_STREAM = 21 23 | 24 | TEST_START = 31 25 | TEST_RUNNING = 32 26 | TEST_RESULT_REQUEST = 33 27 | TEST_END = 34 28 | /* unexpected situation */ 29 | CLIENT_TERMINATE = 50 30 | SERVER_TERMINATE = 51 31 | 32 | IPERF_SENDER = true 33 | IPERF_RECEIVER = false 34 | ) 35 | 36 | const( 37 | TCP_NAME = "tcp" 38 | UDP_NAME = "udp" 39 | RUDP_NAME = "rudp" 40 | KCP_NAME = "kcp" 41 | ) 42 | 43 | const( 44 | DEFAULT_TCP_BLKSIZE = 128*1024 // default read/write block size 45 | DEFAULT_UDP_BLKSIZE = 1460 // default is dynamically set 46 | DEFAULT_RUDP_BLKSIZE = 4*1024 // default read/write block size 47 | TCP_MSS = 1460 // tcp mss size 48 | RUDP_MSS = 1376 // rudp mss size 49 | // rudp / kcp 50 | DEFAULT_WRITE_BUF_SIZE = 4*1024*1024 // rudp write buffer size 51 | DEFAULT_READ_BUF_SIZE = 4*1024*1024 // rudp read buffer size 52 | DEFAULT_FLUSH_INTERVAL = 10 // rudp flush interval 10 ms default 53 | MS_TO_NS = 1000000 54 | S_TO_NS = 1000000000 55 | MB_TO_B = 1024*1024 56 | KB_TO_B = 1024 57 | ACCEPT_SIGNAL = 1 58 | ) 59 | 60 | const( 61 | TCP_INTERVAL_HEADER = "[ ID] Interval Transfer Bandwidth RTT Retrans\n" 62 | TCP_RESULT_HEADER = "[ ID] Interval Transfer Bandwidth RTT Retrans Retrans(%%)\n" 63 | RUDP_INTERVAL_HEADER = "[ ID] Interval Transfer Bandwidth RTT Retrans Retrans(%%) Lost(%%) Early(%%) Fast(%%)\n" 64 | RUDP_RESULT_HEADER = "[ ID] Interval Transfer Bandwidth RTT Retrans Retrans(%%) Lost(%%) Early(%%) Fast(%%) Recover(%%) PktsLost(%%) SegsLost(%%)\n" 65 | TCP_REPORT_SINGLE_STREAM = "[ %v] %4.2f-%4.2f sec\t%5.2f MB\t%5.2f Mb/s\t%6.1fms\t%4v\n" 66 | RUDP_REPORT_SINGLE_STREAM = "[ %v] %4.2f-%4.2f sec\t%5.2f MB\t%5.2f Mb/s\t%6.1fms\t%4v\t%2.2f%%\t%2.2f%%\t%2.2f%%\t%2.2f%%\n" 67 | TCP_REPORT_SINGLE_RESULT = "[ %v] %4.2f-%4.2f sec\t%5.2f MB\t%5.2f Mb/s\t%6.1fms\t%4v\t%2.2f%%\t[%s]\n" 68 | RUDP_REPORT_SINGLE_RESULT = "[ %v] %4.2f-%4.2f sec\t%5.2f MB\t%5.2f Mb/s\t%6.1fms\t%4v\t%2.2f%%\t%2.2f%%\t%2.2f%%\t%2.2f%%\t%2.2f%%\t%2.2f%%\t%2.2f%%\t[%s]\n" 69 | REPORT_SUM_STREAM = "[SUM] %4.2f-%4.2f sec\t%5.2f MB\t%5.2f Mb/s\t%6.1fms\t%4v\t\n" 70 | REPORT_SEPERATOR = "- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n" 71 | SUMMARY_SEPERATOR = "- - - - - - - - - - - - - - - - SUMMARY - - - - - - - - - - - - - - - -\n" 72 | ) 73 | type iperf_test struct { 74 | is_server bool 75 | mode bool // true for sender. false for receiver 76 | reverse bool // server send? 77 | addr string 78 | port uint 79 | state uint 80 | duration uint // sec 81 | no_delay bool 82 | interval uint // ms 83 | proto protocol 84 | protocols []protocol 85 | 86 | 87 | /* stream */ 88 | 89 | listener net.Listener 90 | proto_listener net.Listener 91 | ctrl_conn net.Conn 92 | ctrl_chan chan uint 93 | setting *iperf_setting 94 | stream_num uint 95 | streams []*iperf_stream 96 | 97 | /* test statistics */ 98 | bytes_received uint64 99 | blocks_received uint64 100 | bytes_sent uint64 101 | blocks_sent uint64 102 | done bool 103 | 104 | /* timer */ 105 | timer ITimer 106 | //omit_timer ITimer // not used yet 107 | stats_ticker ITicker 108 | report_ticker ITicker 109 | chStats chan bool 110 | 111 | /* call back function */ 112 | 113 | stats_callback func(test *iperf_test) 114 | reporter_callback func(test *iperf_test) 115 | //on_new_stream on_new_stream_callback 116 | //on_test_start on_test_start_callback 117 | //on_connect on_connect_callback 118 | //on_test_finish on_test_finish_callback 119 | } 120 | 121 | // output_callback is a prototype which ought capture conn and call conn.Write 122 | //type on_new_stream_callback func(test *iperf_stream) 123 | //type on_test_start_callback func(test *iperf_test) 124 | //type on_connect_callback func(test *iperf_test) 125 | //type on_test_finish_callback func(test *iperf_test) 126 | 127 | 128 | type protocol interface { 129 | //name string 130 | name() string 131 | accept(test *iperf_test) (net.Conn, error) 132 | listen(test *iperf_test) (net.Listener, error) 133 | connect(test *iperf_test) (net.Conn, error) 134 | send(test *iperf_stream) int 135 | recv(test *iperf_stream) int 136 | // init will be called before send/recv data 137 | init(test *iperf_test) int 138 | // teardown will be called before send/recv data 139 | teardown(test *iperf_test) int 140 | // stats_callback will be invoked intervally, please get some other statistics in this function 141 | stats_callback(test *iperf_test, sp *iperf_stream, temp_result *iperf_interval_results) int 142 | } 143 | 144 | type iperf_stream struct{ 145 | role int //SENDER_STREAM or RECEIVE_STREAM 146 | test *iperf_test 147 | result *iperf_stream_results 148 | can_send bool 149 | conn net.Conn 150 | send_ticker ITicker 151 | 152 | buffer []byte //buffer to send 153 | 154 | rcv func(sp *iperf_stream) int // return recv size. -1 represent EOF. 155 | snd func(sp *iperf_stream) int // return send size. -1 represent socket close. 156 | 157 | } 158 | 159 | type iperf_setting struct{ 160 | blksize uint 161 | burst bool // burst & rate & pacing_time should be set at the same time 162 | rate uint // bit per second 163 | pacing_time uint // ms 164 | bytes uint64 165 | blocks uint64 166 | 167 | // rudp only 168 | snd_wnd uint 169 | rcv_wnd uint 170 | read_buf_size uint // bit 171 | write_buf_size uint // bit 172 | flush_interval uint // ms 173 | no_cong bool // bbr or not? 174 | fast_resend uint 175 | data_shards uint // for fec 176 | parity_shards uint 177 | } 178 | 179 | // params to exchange 180 | // tips: all the members should be visible, or json decoder cannot encode it 181 | type stream_params struct{ 182 | ProtoName string 183 | Reverse bool 184 | Duration uint 185 | NoDelay bool 186 | Interval uint 187 | StreamNum uint 188 | Blksize uint 189 | SndWnd uint 190 | RcvWnd uint 191 | ReadBufSize uint 192 | WriteBufSize uint 193 | FlushInterval uint 194 | NoCong bool 195 | FastResend uint 196 | DataShards uint 197 | ParityShards uint 198 | Burst bool 199 | Rate uint 200 | PacingTime uint 201 | } 202 | 203 | func (p stream_params) String() string{ 204 | s := fmt.Sprintf("name:%v\treverse:%v\tdur:%v\tno_delay:%v\tinterval:%v\tstream_num:%v\tBlkSize:%v\tSndWnd:%v\tRcvWnd:%v\tNoCong:%v\tBurst:%v\tDataShards:%v\tParityShards:%v\t", 205 | p.ProtoName, p.Reverse, p.Duration, p.NoDelay, p.Interval, p.StreamNum, p.Blksize, p.SndWnd, p.RcvWnd, p.NoCong, p.Burst, p.DataShards, p.ParityShards) 206 | return s 207 | } 208 | 209 | type iperf_stream_results struct{ 210 | bytes_received uint64 211 | bytes_sent uint64 212 | bytes_received_this_interval uint64 213 | bytes_sent_this_interval uint64 214 | bytes_sent_omit uint64 215 | stream_retrans uint 216 | stream_prev_total_retrans uint 217 | stream_lost uint 218 | stream_prev_total_lost uint 219 | stream_early_retrans uint 220 | stream_prev_total_early_retrans uint 221 | stream_fast_retrans uint 222 | stream_prev_total_fast_retrans uint 223 | stream_recovers uint 224 | stream_in_segs uint 225 | stream_in_pkts uint 226 | stream_out_segs uint 227 | stream_out_pkts uint 228 | stream_max_rtt uint 229 | stream_min_rtt uint 230 | stream_sum_rtt uint // micro sec 231 | stream_cnt_rtt uint 232 | start_time time.Time 233 | end_time time.Time 234 | start_time_fixed time.Time 235 | interval_results []iperf_interval_results 236 | } 237 | 238 | type stream_results_array []stream_results_exchange 239 | 240 | // result to exchange 241 | // tips: all the members should be visible, or json decoder cannot encode it 242 | type stream_results_exchange struct{ 243 | Id uint 244 | Bytes uint64 245 | Retrans uint 246 | Jitter uint 247 | InPkts uint 248 | OutPkts uint 249 | InSegs uint 250 | OutSegs uint 251 | Recovered uint 252 | StartTime time.Time 253 | EndTime time.Time 254 | } 255 | 256 | func (r stream_results_exchange) String() string{ 257 | s := fmt.Sprintf("id:%v\tbytes:%v\tretrans:%v\tjitter:%v\tInPkts:%v\tOutPkts:%v\tInSegs:%v\tOutSegs:%v\tstart_time:%v\tend_time:%v\t", 258 | r.Id, r.Bytes, r.Retrans, r.Jitter, r.InPkts, r.OutPkts, r.InSegs, r.OutSegs, r.StartTime, r.EndTime) 259 | return s 260 | } 261 | 262 | type iperf_interval_results struct{ 263 | bytes_transfered uint64 264 | interval_start_time time.Time 265 | interval_end_time time.Time 266 | interval_dur time.Duration 267 | rtt uint // us 268 | rto uint // us 269 | interval_lost uint 270 | interval_early_retrans uint 271 | interval_fast_retrans uint 272 | interval_retrans uint // segs num 273 | /* for udp */ 274 | interval_packet_cnt uint 275 | omitted uint 276 | } -------------------------------------------------------------------------------- /iperf_api_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/binary" 5 | "github.com/op/go-logging" 6 | "testing" 7 | "time" 8 | //"github.com/gotestyourself/gotest.tools/assert" 9 | "gotest.tools/assert" 10 | //"github.com/stretchr/testify/assert" 11 | 12 | ) 13 | 14 | const portServer = 5021 15 | const addrServer = "127.0.0.1:5021" 16 | const addrClient = "127.0.0.1" 17 | var server_test, client_test *iperf_test 18 | 19 | func init(){ 20 | 21 | logging.SetLevel(logging.ERROR, "iperf") 22 | logging.SetLevel(logging.ERROR, "rudp") 23 | /* log settting */ 24 | 25 | server_test = new_iperf_test() 26 | client_test = new_iperf_test() 27 | server_test.init() 28 | client_test.init() 29 | 30 | server_test.is_server = true 31 | server_test.port = portServer 32 | 33 | client_test.is_server = false 34 | client_test.port = portServer 35 | client_test.addr = addrClient 36 | 37 | client_test.interval = 1000 // 1000 ms 38 | client_test.duration = 5 // 5 s for test 39 | client_test.stream_num = 1 // 1 stream only 40 | client_test.set_test_reverse(false) 41 | 42 | //TCPSetting() 43 | RUDPSetting() 44 | //KCPSetting() 45 | 46 | //client_test.setting.burst = true 47 | go server_test.run_server() 48 | time.Sleep(time.Second) 49 | } 50 | 51 | func TCPSetting(){ 52 | client_test.set_protocol(TCP_NAME) 53 | client_test.no_delay = true 54 | client_test.setting.blksize = DEFAULT_TCP_BLKSIZE 55 | client_test.setting.burst = false 56 | client_test.setting.rate = 1024*1024*1024*1024 // b/s 57 | client_test.setting.pacing_time = 100 //ms 58 | } 59 | 60 | func RUDPSetting(){ 61 | client_test.set_protocol(RUDP_NAME) 62 | client_test.no_delay = false 63 | client_test.setting.blksize = DEFAULT_RUDP_BLKSIZE 64 | client_test.setting.burst = true 65 | client_test.setting.no_cong = false // false for BBR control 66 | client_test.setting.snd_wnd = 10 67 | client_test.setting.rcv_wnd = 1024 68 | client_test.setting.read_buf_size = DEFAULT_READ_BUF_SIZE 69 | client_test.setting.write_buf_size = DEFAULT_WRITE_BUF_SIZE 70 | client_test.setting.flush_interval = DEFAULT_FLUSH_INTERVAL 71 | client_test.setting.data_shards = 3 72 | client_test.setting.parity_shards = 1 73 | } 74 | 75 | func KCPSetting(){ 76 | client_test.set_protocol(KCP_NAME) 77 | client_test.no_delay = false 78 | client_test.setting.blksize = DEFAULT_RUDP_BLKSIZE 79 | client_test.setting.burst = true 80 | client_test.setting.no_cong = true // false for BBR control 81 | client_test.setting.snd_wnd = 512 82 | client_test.setting.rcv_wnd = 1024 83 | client_test.setting.read_buf_size = DEFAULT_READ_BUF_SIZE 84 | client_test.setting.write_buf_size = DEFAULT_WRITE_BUF_SIZE 85 | client_test.setting.flush_interval = DEFAULT_FLUSH_INTERVAL 86 | } 87 | 88 | func RecvCheckState(t *testing.T, state int) int { 89 | buf := make([]byte, 4) 90 | if n, err := client_test.ctrl_conn.Read(buf); err == nil { 91 | s := binary.LittleEndian.Uint32(buf[:]) 92 | log.Debugf("Ctrl conn receive n = %v state = [%v]", n, s) 93 | //s, err := strconv.Atoi(string(buf[:n])) 94 | if s != uint32(state){ 95 | log.Errorf("recv state[%v] != expected state[%v]", s, state) 96 | t.FailNow() 97 | return -1 98 | } 99 | client_test.state = uint(state) 100 | log.Infof("Client Enter %v state", client_test.state) 101 | } 102 | return 0 103 | } 104 | 105 | func CreateStreams(t *testing.T) int{ 106 | if rtn := client_test.create_streams(); rtn < 0 { 107 | log.Errorf("create_streams failed. rtn = %v", rtn) 108 | return -1 109 | } 110 | // check client state 111 | assert.Equal(t, uint(len(client_test.streams)), client_test.stream_num) 112 | for _, sp := range(client_test.streams){ 113 | assert.Equal(t, sp.test, client_test) 114 | if client_test.mode == IPERF_SENDER { 115 | assert.Equal(t, sp.role, SENDER_STREAM) 116 | } else{ 117 | assert.Equal(t, sp.role, RECEIVER_STREAM) 118 | } 119 | assert.Assert(t, sp.result != nil) 120 | assert.Equal(t, sp.can_send, false) // set true after create_send_timer 121 | assert.Assert(t, sp.conn != nil) 122 | assert.Assert(t, sp.send_ticker.ticker == nil) // ticker haven't been created yet 123 | } 124 | time.Sleep(time.Millisecond * 10) // ensure server side has created all the streams 125 | // check server state 126 | assert.Equal(t, uint(len(server_test.streams)), client_test.stream_num) 127 | for _, sp := range(server_test.streams){ 128 | assert.Equal(t, sp.test, server_test) 129 | if server_test.mode == IPERF_SENDER { 130 | assert.Equal(t, sp.role, SENDER_STREAM) 131 | } else { 132 | assert.Equal(t, sp.role, RECEIVER_STREAM) 133 | } 134 | assert.Assert(t, sp.result != nil) 135 | if server_test.mode == IPERF_SENDER { 136 | assert.Equal(t, sp.can_send, true) 137 | if client_test.setting.burst == true { 138 | assert.Assert(t, sp.send_ticker.ticker == nil) 139 | } else { 140 | assert.Assert(t, sp.send_ticker.ticker != nil) 141 | } 142 | } else { 143 | assert.Equal(t, sp.can_send, false) 144 | assert.Assert(t, sp.send_ticker.ticker == nil) 145 | } 146 | assert.Assert(t, sp.conn != nil) 147 | 148 | } 149 | return 0 150 | } 151 | 152 | func handleTestStart(t *testing.T) int{ 153 | if rtn := client_test.init_test(); rtn < 0 { 154 | log.Errorf("init_test failed. rtn = %v", rtn) 155 | return -1 156 | } 157 | if rtn := client_test.create_client_timer(); rtn < 0 { 158 | log.Errorf("create_client_timer failed. rtn = %v", rtn) 159 | return -1 160 | } 161 | if rtn := client_test.create_client_omit_timer(); rtn < 0 { 162 | log.Errorf("create_client_omit_timer failed. rtn = %v", rtn) 163 | return -1 164 | } 165 | if client_test.mode == IPERF_SENDER{ 166 | if rtn := client_test.create_sender_ticker(); rtn < 0 { 167 | log.Errorf("create_client_send_timer failed. rtn = %v", rtn) 168 | return -1 169 | } 170 | } 171 | 172 | // check client 173 | for _, sp := range client_test.streams{ 174 | assert.Assert(t, sp.result.start_time.Before(time.Now().Add(time.Duration(time.Millisecond)))) 175 | assert.Assert(t, sp.test.timer.timer != nil) 176 | assert.Assert(t, sp.test.stats_ticker.ticker != nil) 177 | assert.Assert(t, sp.test.report_ticker.ticker != nil) 178 | 179 | if client_test.mode == IPERF_SENDER { 180 | assert.Equal(t, sp.can_send, true) 181 | if client_test.setting.burst == true { 182 | assert.Assert(t, sp.send_ticker.ticker == nil) 183 | } else { 184 | assert.Assert(t, sp.send_ticker.ticker != nil) 185 | } 186 | } else { 187 | assert.Equal(t, sp.can_send, false) 188 | assert.Assert(t, sp.send_ticker.ticker == nil) 189 | } 190 | } 191 | 192 | // check server, should finish test_start process and enter test_running now 193 | for _, sp := range server_test.streams{ 194 | assert.Assert(t, sp.result.start_time.Before(time.Now().Add(time.Duration(time.Millisecond)))) 195 | assert.Assert(t, sp.test.timer.timer != nil) 196 | assert.Assert(t, sp.test.stats_ticker.ticker != nil) 197 | assert.Assert(t, sp.test.report_ticker.ticker != nil) 198 | assert.Equal(t, sp.test.state, uint(TEST_RUNNING)) 199 | } 200 | 201 | return 0 202 | } 203 | 204 | func handleTestRunning(t *testing.T) int{ 205 | log.Info("Client enter Test Running state...") 206 | for i, sp := range client_test.streams{ 207 | if client_test.mode == IPERF_SENDER { 208 | go sp.iperf_send(client_test) 209 | log.Infof("Stream %v start sending.", i) 210 | } else { 211 | go sp.iperf_recv(client_test) 212 | log.Infof("Stream %v start receiving.", i) 213 | } 214 | } 215 | log.Info("Client all Stream start. Wait for finish...") 216 | // wait for send/write end (triggered by timer) 217 | //for { 218 | // if client_test.done { 219 | // time.Sleep(time.Millisecond) 220 | // break 221 | // } 222 | //} 223 | for i := 0; i < int(client_test.stream_num); i++ { 224 | s := <- client_test.ctrl_chan 225 | assert.Equal(t, s, uint(TEST_END)) 226 | } 227 | log.Infof("Client All Send Stream closed.") 228 | client_test.done = true 229 | if client_test.stats_callback != nil { 230 | client_test.stats_callback(client_test) 231 | } 232 | if client_test.set_send_state(TEST_END) < 0 { 233 | log.Errorf("set_send_state failed. %v", TEST_END) 234 | t.FailNow() 235 | } 236 | // check client 237 | assert.Equal(t, client_test.done, true) 238 | assert.Assert(t, client_test.timer.timer == nil) 239 | assert.Equal(t, client_test.state, uint(TEST_END)) 240 | var total_bytes uint64 241 | for _, sp := range client_test.streams { 242 | if client_test.mode == IPERF_SENDER{ 243 | total_bytes += sp.result.bytes_sent 244 | } else { 245 | total_bytes += sp.result.bytes_received 246 | } 247 | } 248 | if client_test.mode == IPERF_SENDER{ 249 | assert.Equal(t, client_test.bytes_sent, total_bytes) 250 | assert.Equal(t, client_test.bytes_received, uint64(0)) 251 | } else { 252 | assert.Equal(t, client_test.bytes_received, total_bytes) 253 | assert.Equal(t, client_test.bytes_sent, uint64(0)) 254 | } 255 | 256 | 257 | 258 | time.Sleep(time.Millisecond*10) // ensure server change state 259 | // check server 260 | assert.Equal(t, server_test.done, true) 261 | assert.Equal(t, server_test.state, uint(IPERF_EXCHANGE_RESULT)) 262 | absolute_bytes_diff := int64(server_test.bytes_received) - int64(client_test.bytes_sent) 263 | if absolute_bytes_diff < 0{ 264 | absolute_bytes_diff = 0 - absolute_bytes_diff 265 | } 266 | if float64(absolute_bytes_diff) / float64(client_test.bytes_sent) > 0.01 { // if bytes difference larger than 1% 267 | t.FailNow() 268 | } 269 | //assert.Equal(t, server_test.bytes_received, client_test.bytes_sent) 270 | //assert.Equal(t, server_test.blocks_received, client_test.blocks_sent) // block num not always same 271 | total_bytes = 0 272 | for _, sp := range server_test.streams { 273 | if server_test.mode == IPERF_SENDER{ 274 | total_bytes += sp.result.bytes_sent 275 | } else { 276 | total_bytes += sp.result.bytes_received 277 | } 278 | } 279 | if server_test.mode == IPERF_SENDER { 280 | assert.Equal(t, server_test.bytes_sent, total_bytes) 281 | assert.Equal(t, server_test.bytes_received, uint64(0)) 282 | } else { 283 | assert.Equal(t, server_test.bytes_received, total_bytes) 284 | assert.Equal(t, server_test.bytes_sent, uint64(0)) 285 | } 286 | return 0 287 | } 288 | 289 | func handleExchangeResult(t *testing.T) int{ 290 | if rtn := client_test.exchange_results(); rtn < 0 { 291 | log.Errorf("exchange_results failed. rtn = %v", rtn) 292 | return -1 293 | } 294 | // check client 295 | assert.Equal(t, client_test.done, true) 296 | for i, sp := range client_test.streams { 297 | ssp := server_test.streams[i] 298 | assert.Equal(t, sp.result.bytes_received, ssp.result.bytes_received) 299 | assert.Equal(t, sp.result.bytes_sent, ssp.result.bytes_sent) 300 | } 301 | // check server 302 | assert.Equal(t, server_test.state, uint(IPERF_DISPLAY_RESULT)) 303 | return 0 304 | } 305 | /* 306 | Test case can only be run one by one 307 | */ 308 | 309 | /* 310 | func TestCtrlConnect(t *testing.T){ 311 | if rtn := client_test.ConnectServer(); rtn < 0 { 312 | t.FailNow() 313 | } 314 | RecvCheckState(t, IPERF_EXCHANGE_PARAMS) 315 | if err := client_test.ctrl_conn.Close(); err != nil { 316 | log.Errorf("close ctrl_conn failed.") 317 | t.FailNow() 318 | } 319 | if err := server_test.ctrl_conn.Close(); err != nil { 320 | log.Errorf("close ctrl_conn failed.") 321 | t.FailNow() 322 | } 323 | } 324 | 325 | func TestExchangeParams(t *testing.T){ 326 | if rtn := client_test.ConnectServer(); rtn < 0 { 327 | t.FailNow() 328 | } 329 | RecvCheckState(t, IPERF_EXCHANGE_PARAMS) 330 | if rtn := client_test.exchange_params(); rtn < 0 { 331 | t.FailNow() 332 | } 333 | 334 | time.Sleep(time.Second) 335 | assert.Equal(t, server_test.proto.name(), client_test.proto.name()) 336 | assert.Equal(t, server_test.stream_num, client_test.stream_num) 337 | assert.Equal(t, server_test.duration, client_test.duration) 338 | assert.Equal(t, server_test.interval, client_test.interval) 339 | assert.Equal(t, server_test.no_delay, client_test.no_delay) 340 | } 341 | 342 | func TestCreateOneStream(t *testing.T){ 343 | // create only one stream 344 | if rtn := client_test.ConnectServer(); rtn < 0 { 345 | t.FailNow() 346 | } 347 | RecvCheckState(t, IPERF_EXCHANGE_PARAMS) 348 | if rtn := client_test.exchange_params(); rtn < 0 { 349 | t.FailNow() 350 | } 351 | RecvCheckState(t, IPERF_CREATE_STREAM) 352 | CreateStreams(t) 353 | } 354 | 355 | func TestCreateMultiStreams(t *testing.T){ 356 | // create multi streams 357 | if rtn := client_test.ConnectServer(); rtn < 0 { 358 | t.FailNow() 359 | } 360 | RecvCheckState(t, IPERF_EXCHANGE_PARAMS) 361 | client_test.stream_num = 5 // change stream_num before exchange params 362 | if rtn := client_test.exchange_params(); rtn < 0 { 363 | t.FailNow() 364 | } 365 | RecvCheckState(t, IPERF_CREATE_STREAM) 366 | if rtn := CreateStreams(t); rtn < 0{ 367 | t.FailNow() 368 | } 369 | } 370 | 371 | func TestTestStart(t *testing.T){ 372 | if rtn := client_test.ConnectServer(); rtn < 0 { 373 | t.FailNow() 374 | } 375 | RecvCheckState(t, IPERF_EXCHANGE_PARAMS) 376 | if rtn := client_test.exchange_params(); rtn < 0 { 377 | t.FailNow() 378 | } 379 | RecvCheckState(t, IPERF_CREATE_STREAM) 380 | if rtn := CreateStreams(t); rtn < 0{ 381 | t.FailNow() 382 | } 383 | RecvCheckState(t, TEST_START) 384 | if rtn := handleTestStart(t); rtn < 0{ 385 | t.FailNow() 386 | } 387 | RecvCheckState(t, TEST_RUNNING) 388 | } 389 | 390 | func TestTestRunning(t *testing.T){ 391 | if rtn := client_test.ConnectServer(); rtn < 0 { 392 | t.FailNow() 393 | } 394 | RecvCheckState(t, IPERF_EXCHANGE_PARAMS) 395 | client_test.stream_num = 2 396 | if rtn := client_test.exchange_params(); rtn < 0 { 397 | t.FailNow() 398 | } 399 | RecvCheckState(t, IPERF_CREATE_STREAM) 400 | if rtn := CreateStreams(t); rtn < 0{ 401 | t.FailNow() 402 | } 403 | RecvCheckState(t, TEST_START) 404 | if rtn := handleTestStart(t); rtn < 0{ 405 | t.FailNow() 406 | } 407 | RecvCheckState(t, TEST_RUNNING) 408 | if handleTestRunning(t) < 0{ 409 | t.FailNow() 410 | } 411 | RecvCheckState(t, IPERF_EXCHANGE_RESULT) 412 | } 413 | 414 | func TestExchangeResult(t *testing.T){ 415 | if rtn := client_test.ConnectServer(); rtn < 0 { 416 | t.FailNow() 417 | } 418 | RecvCheckState(t, IPERF_EXCHANGE_PARAMS) 419 | client_test.stream_num = 2 420 | if rtn := client_test.exchange_params(); rtn < 0 { 421 | t.FailNow() 422 | } 423 | RecvCheckState(t, IPERF_CREATE_STREAM) 424 | if rtn := CreateStreams(t); rtn < 0{ 425 | t.FailNow() 426 | } 427 | RecvCheckState(t, TEST_START) 428 | if rtn := handleTestStart(t); rtn < 0{ 429 | t.FailNow() 430 | } 431 | RecvCheckState(t, TEST_RUNNING) 432 | if handleTestRunning(t) < 0{ 433 | t.FailNow() 434 | } 435 | RecvCheckState(t, IPERF_EXCHANGE_RESULT) 436 | if handleExchangeResult(t) < 0 { 437 | t.FailNow() 438 | } 439 | RecvCheckState(t, IPERF_DISPLAY_RESULT) 440 | } 441 | */ 442 | 443 | func TestDisplayResult(t *testing.T){ 444 | if rtn := client_test.ConnectServer(); rtn < 0 { 445 | t.FailNow() 446 | } 447 | RecvCheckState(t, IPERF_EXCHANGE_PARAMS) 448 | //client_test.stream_num = 2 449 | if rtn := client_test.exchange_params(); rtn < 0 { 450 | t.FailNow() 451 | } 452 | RecvCheckState(t, IPERF_CREATE_STREAM) 453 | if rtn := CreateStreams(t); rtn < 0{ 454 | t.FailNow() 455 | } 456 | RecvCheckState(t, TEST_START) 457 | if rtn := handleTestStart(t); rtn < 0{ 458 | t.FailNow() 459 | } 460 | RecvCheckState(t, TEST_RUNNING) 461 | if handleTestRunning(t) < 0{ 462 | t.FailNow() 463 | } 464 | RecvCheckState(t, IPERF_EXCHANGE_RESULT) 465 | if handleExchangeResult(t) < 0 { 466 | t.FailNow() 467 | } 468 | RecvCheckState(t, IPERF_DISPLAY_RESULT) 469 | 470 | client_test.client_end() 471 | 472 | time.Sleep(time.Millisecond*10) // wait for server 473 | assert.Equal(t, client_test.state, uint(IPERF_DONE)) 474 | assert.Equal(t, server_test.state, uint(IPERF_DONE)) 475 | // check output with your own eyes 476 | 477 | time.Sleep(time.Second*5) // wait for server 478 | } 479 | -------------------------------------------------------------------------------- /iperf_api.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/binary" 5 | "encoding/json" 6 | "flag" 7 | "fmt" 8 | "github.com/op/go-logging" 9 | "net" 10 | "os" 11 | "strconv" 12 | "time" 13 | ) 14 | 15 | func new_iperf_test() (test*iperf_test){ 16 | test = new(iperf_test) 17 | test.ctrl_chan = make(chan uint, 5) 18 | test.setting = new(iperf_setting) 19 | test.reporter_callback = iperf_reporter_callback 20 | test.stats_callback = iperf_stats_callback 21 | test.chStats = make(chan bool, 1) 22 | return 23 | } 24 | 25 | func (test *iperf_test) set_protocol (proto_name string) int{ 26 | for _, proto := range test.protocols{ 27 | if proto_name == proto.name(){ 28 | test.proto = proto 29 | return 0 30 | } 31 | } 32 | return -1 33 | } 34 | 35 | func (test *iperf_test) set_send_state(state uint) int{ 36 | test.state = state 37 | test.ctrl_chan <- test.state 38 | bs := make([]byte, 4) 39 | binary.LittleEndian.PutUint32(bs, uint32(state)) 40 | //msg := fmt.Sprintf("%v", test.state) 41 | n, err := test.ctrl_conn.Write(bs) 42 | if err != nil { 43 | log.Errorf("Write state error. %v %v", n, err) 44 | return -1 45 | } 46 | log.Debugf("Set & send state = %v, n = %v", state, n) 47 | return 0 48 | } 49 | 50 | func (test *iperf_test) new_stream(conn net.Conn, sender_flag int) *iperf_stream{ 51 | sp := new(iperf_stream) 52 | sp.role = sender_flag 53 | sp.conn = conn 54 | sp.test = test 55 | 56 | // mark, set sp.buffer 57 | sp.result = new(iperf_stream_results) 58 | sp.snd = test.proto.send 59 | sp.rcv = test.proto.recv 60 | sp.buffer = make([]byte, test.setting.blksize) 61 | copy(sp.buffer[:], "hello world!") 62 | // initialize stream 63 | // set tos bit. undo 64 | return sp 65 | } 66 | 67 | func (test *iperf_test) close_all_streams() int{ 68 | var err error 69 | for _, sp := range test.streams{ 70 | err = sp.conn.Close() 71 | if err != nil { 72 | log.Errorf("Stream close failed, err = %v", err) 73 | return -1 74 | } 75 | } 76 | return 0 77 | } 78 | 79 | func (test *iperf_test) check_throttle(sp *iperf_stream, now time.Time) { 80 | if sp.test.done { 81 | return 82 | } 83 | dur := now.Sub(sp.result.start_time) 84 | sec := dur.Seconds() 85 | bits_per_second := float64(sp.result.bytes_sent * 8) / sec 86 | if bits_per_second < float64(sp.test.setting.rate) && sp.can_send == false{ 87 | sp.can_send = true 88 | log.Debugf("sp.can_send turn TRUE. bits_per_second = %6.2f MB/s Required = %6.2f MB/s", 89 | bits_per_second/MB_TO_B/8, float64(sp.test.setting.rate) / MB_TO_B / 8) 90 | } else if bits_per_second > float64(sp.test.setting.rate) && sp.can_send == true{ 91 | sp.can_send = false 92 | log.Debugf("sp.can_send turn FALSE. bits_per_second = %6.2f MB/s Required = %6.2f MB/s", 93 | bits_per_second/MB_TO_B/8, float64(sp.test.setting.rate) / MB_TO_B / 8) 94 | } 95 | } 96 | 97 | func (test *iperf_test) send_params() int { 98 | log.Debugf("Enter send_params") 99 | params := stream_params{ 100 | ProtoName: test.proto.name(), 101 | Reverse: test.reverse, 102 | Duration: test.duration, 103 | NoDelay: test.no_delay, 104 | Interval: test.interval, 105 | StreamNum: test.stream_num, 106 | Blksize: test.setting.blksize, 107 | SndWnd: test.setting.snd_wnd, 108 | RcvWnd: test.setting.rcv_wnd, 109 | ReadBufSize: test.setting.read_buf_size, 110 | WriteBufSize: test.setting.write_buf_size, 111 | FlushInterval: test.setting.flush_interval, 112 | NoCong: test.setting.no_cong, 113 | FastResend: test.setting.fast_resend, 114 | DataShards: test.setting.data_shards, 115 | ParityShards: test.setting.parity_shards, 116 | Burst: test.setting.burst, 117 | Rate: test.setting.rate, 118 | PacingTime: test.setting.pacing_time, 119 | } 120 | //encoder := json.NewEncoder(test.ctrl_conn) 121 | //err := encoder.Encode(params) 122 | 123 | bytes, err := json.Marshal(¶ms) 124 | if err != nil { 125 | log.Error("Encode params failed. %v", err) 126 | return -1 127 | } 128 | n, err := test.ctrl_conn.Write(bytes) 129 | if err != nil{ 130 | log.Error("Write failed. %v", err) 131 | return -1 132 | } 133 | log.Debugf("send params %v bytes: %v", n, params.String()) 134 | return 0 135 | } 136 | 137 | func (test *iperf_test) get_params() int { 138 | log.Debugf("Enter get_params") 139 | var params stream_params 140 | //encoder := json.NewDecoder(test.ctrl_conn) 141 | //err := encoder.Decode(¶ms) 142 | buf := make([]byte, 1024) 143 | n, err := test.ctrl_conn.Read(buf) 144 | if err != nil { 145 | log.Errorf("Read failed. %v", err) 146 | return -1 147 | } 148 | err = json.Unmarshal(buf[:n], ¶ms) 149 | if err != nil { 150 | log.Errorf("Decode failed. %v", err) 151 | return -1 152 | } 153 | log.Debugf("get params %v bytes: %v", n, params.String()) 154 | test.set_protocol(params.ProtoName) 155 | test.set_test_reverse(params.Reverse) 156 | test.duration = params.Duration 157 | test.no_delay = params.NoDelay 158 | test.interval = params.Interval 159 | test.stream_num = params.StreamNum 160 | test.setting.blksize = params.Blksize 161 | test.setting.burst = params.Burst 162 | test.setting.rate = params.Rate 163 | test.setting.pacing_time = params.PacingTime 164 | // rudp/kcp only 165 | test.setting.snd_wnd = params.SndWnd 166 | test.setting.rcv_wnd = params.RcvWnd 167 | test.setting.write_buf_size = params.WriteBufSize 168 | test.setting.read_buf_size = params.ReadBufSize 169 | test.setting.flush_interval = params.FlushInterval 170 | test.setting.no_cong = params.NoCong 171 | test.setting.fast_resend = params.FastResend 172 | test.setting.data_shards = params.DataShards 173 | test.setting.parity_shards = params.ParityShards 174 | return 0 175 | } 176 | 177 | func (test *iperf_test) exchange_params() int { 178 | if test.is_server == false { 179 | if test.send_params() < 0 { 180 | return -1 181 | } 182 | } else { 183 | if test.get_params() < 0 { 184 | return -1 185 | } 186 | } 187 | return 0 188 | } 189 | 190 | func (test *iperf_test) send_results() int { 191 | log.Debugf("Send Results") 192 | var results = make(stream_results_array, test.stream_num) 193 | for i, sp := range test.streams{ 194 | var bytes_transfer uint64 195 | if test.mode == IPERF_RECEIVER { 196 | bytes_transfer = sp.result.bytes_received 197 | } else { 198 | bytes_transfer = sp.result.bytes_sent 199 | } 200 | rp := sp.result 201 | sp_result := stream_results_exchange{ 202 | Id: uint(i), 203 | Bytes: bytes_transfer, 204 | Retrans: rp.stream_retrans, 205 | Jitter: 0, // current not used 206 | InPkts: rp.stream_in_pkts, 207 | OutPkts: rp.stream_out_pkts, 208 | InSegs: rp.stream_in_segs, 209 | OutSegs: rp.stream_out_segs, 210 | Recovered: rp.stream_recovers, 211 | StartTime: sp.result.start_time, 212 | EndTime: sp.result.end_time, 213 | } 214 | results[i] = sp_result 215 | } 216 | bytes, err := json.Marshal(&results) 217 | if err != nil { 218 | log.Error("Encode results failed. %v", err) 219 | return -1 220 | } 221 | n, err := test.ctrl_conn.Write(bytes) 222 | if err != nil{ 223 | log.Error("Write failed. %v", err) 224 | return -1 225 | } 226 | if test.is_server { 227 | log.Debugf("Server send results %v bytes: %v", n, results) 228 | } else { 229 | log.Debugf("Client send results %v bytes: %v", n, results) 230 | } 231 | 232 | return 0 233 | } 234 | 235 | func (test *iperf_test) get_results() int { 236 | log.Debugf("Enter get_results") 237 | var results = make(stream_results_array, test.stream_num) 238 | //encoder := json.NewDecoder(test.ctrl_conn) 239 | //err := encoder.Decode(¶ms) 240 | buf := make([]byte, 4*1024) 241 | n, err := test.ctrl_conn.Read(buf) 242 | if err != nil { 243 | log.Errorf("Read failed. %v", err) 244 | return -1 245 | } 246 | err = json.Unmarshal(buf[:n], &results) 247 | if err != nil { 248 | log.Errorf("Decode failed. %v", err) 249 | return -1 250 | } 251 | if test.is_server { 252 | log.Debugf("Server get results %v bytes: %v", n, results) 253 | } else { 254 | log.Debugf("Client get results %v bytes: %v", n, results) 255 | } 256 | 257 | 258 | for i, result := range results{ 259 | sp := test.streams[i] 260 | if test.mode == IPERF_RECEIVER { 261 | sp.result.bytes_sent = result.Bytes 262 | sp.result.stream_retrans = result.Retrans 263 | sp.result.stream_out_segs = result.OutSegs 264 | sp.result.stream_out_pkts = result.OutPkts 265 | } else { 266 | sp.result.bytes_received = result.Bytes 267 | //sp.jitter = result.jitter 268 | sp.result.stream_in_segs = result.InSegs 269 | sp.result.stream_in_pkts = result.InPkts 270 | sp.result.stream_recovers = result.Recovered 271 | } 272 | } 273 | return 0 274 | } 275 | 276 | func (test *iperf_test) exchange_results() int { 277 | if test.is_server == false { 278 | if test.send_results() < 0 { 279 | return -1 280 | } 281 | if test.get_results() < 0 { 282 | return -1 283 | } 284 | } else { 285 | // server 286 | if test.get_results() < 0 { 287 | return -1 288 | } 289 | if test.send_results() < 0 { 290 | return -1 291 | } 292 | } 293 | return 0 294 | } 295 | 296 | func (test *iperf_test) init_test() int{ 297 | test.proto.init(test) 298 | now := time.Now() 299 | for _, sp := range test.streams{ 300 | sp.result.start_time = now 301 | sp.result.start_time_fixed = now 302 | } 303 | return 0 304 | } 305 | 306 | /* 307 | main level interface 308 | */ 309 | func (test *iperf_test) init() { 310 | test.protocols = append(test.protocols, new(tcp_proto), new(rudp_proto), new(kcp_proto)) 311 | } 312 | 313 | func (test *iperf_test)parse_arguments() int { 314 | 315 | // command flag definition 316 | var help_flag = flag.Bool("h", false, "this help") 317 | var server_flag = flag.Bool("s", false, "server side") 318 | var client_flag = flag.String("c", "127.0.0.1", "client side") 319 | var reverse_flag = flag.Bool("R", false, "reverse mode. client receive, server send") 320 | var port_flag = flag.Uint("p", 5201, "connect/listen port") 321 | var protocol_flag = flag.String("proto", TCP_NAME, "protocol under test") 322 | var dur_flag = flag.Uint("d", 10, "duration (s)") 323 | var interval_flag = flag.Uint("i", 1000, "test interval (ms)") 324 | var parallel_flag = flag.Uint("P", 1, "The number of simultaneous connections") 325 | var blksize_flag = flag.Uint("l", 4*1024, "send/read block size") 326 | var bandwidth_flag = flag.String("b", "0", "bandwidth limit. (M/K), default MB/s") 327 | var debug_flag = flag.Bool("debug", false, "debug mode") 328 | var info_flag = flag.Bool("info", false, "info mode") 329 | var no_delay_flag = flag.Bool("D", false, "no delay option") 330 | // RUDP specific option 331 | var snd_wnd_flag = flag.Uint("sw", 10, "rudp send window size") 332 | var rcv_wnd_flag = flag.Uint("rw", 512, "rudp receive window size") 333 | var read_buffer_size_flag = flag.Uint("rb", 4*1024, "read buffer size (Kb)") 334 | var write_buffer_size_flag = flag.Uint("wb", 4*1024, "write buffer size (Kb)") 335 | var flush_interval_flag = flag.Uint("f", 10, "flush interval for rudp (ms)") 336 | var no_cong_flag = flag.Bool("nc", true, "no congestion control or BBR") 337 | var fast_resend_flag = flag.Uint("fr", 0, "rudp fast resend strategy. 0 indicate turn off fast resend") 338 | var dataShards_flag = flag.Uint("data", 0, "rudp/kcp FEC dataShards option") 339 | var parityShards_flag = flag.Uint("parity", 0, "rudp/kcp FEC parityShards option") 340 | // parse argument 341 | flag.Parse() 342 | 343 | if *help_flag { 344 | flag.Usage() 345 | os.Exit(0) 346 | } 347 | // check valid 348 | flagset := make(map[string]bool) 349 | flag.Visit(func(f *flag.Flag) { flagset[f.Name]=true } ) 350 | 351 | if flagset["c"] == false{ 352 | if *server_flag == false{ 353 | return -1 354 | } 355 | } 356 | valid_protocol := false 357 | for _, proto := range PROTOCOL_LIST{ 358 | if *protocol_flag == proto { 359 | valid_protocol = true 360 | } 361 | } 362 | if valid_protocol == false{ 363 | return -2 364 | } 365 | //if flagset["nc"] == true{ 366 | // test.setting.no_cong = true 367 | //} else { 368 | // test.setting.no_cong = false 369 | //} 370 | 371 | // set block size 372 | if flagset["l"] == false{ 373 | if *protocol_flag == TCP_NAME { 374 | test.setting.blksize = DEFAULT_TCP_BLKSIZE 375 | } else if *protocol_flag == UDP_NAME { 376 | test.setting.blksize = DEFAULT_UDP_BLKSIZE 377 | } else if *protocol_flag == RUDP_NAME { 378 | test.setting.blksize = DEFAULT_RUDP_BLKSIZE 379 | } else if *protocol_flag == KCP_NAME { 380 | test.setting.blksize = DEFAULT_RUDP_BLKSIZE 381 | } 382 | } else { 383 | test.setting.blksize = *blksize_flag 384 | } 385 | 386 | if flagset["b"] == false{ 387 | test.setting.burst = true 388 | } else { 389 | test.setting.burst = false 390 | bw_str := *bandwidth_flag 391 | if string(bw_str[len(bw_str)-1]) == "M" { 392 | if n, err := strconv.Atoi(string(bw_str[:len(bw_str)-1])); err == nil{ 393 | test.setting.rate = uint(n * MB_TO_B * 8) 394 | } else { 395 | log.Errorf("Error bandwidth flag") 396 | } 397 | } else if string(bw_str[len(bw_str)-1]) == "K" { 398 | if n, err := strconv.Atoi(string(bw_str[:len(bw_str)-1])); err == nil{ 399 | test.setting.rate = uint(n * KB_TO_B * 8) 400 | } else { 401 | log.Errorf("Error bandwidth flag") 402 | } 403 | } else { 404 | if n, err := strconv.Atoi(bw_str); err == nil{ 405 | test.setting.rate = uint(n * MB_TO_B * 8) 406 | } else { 407 | log.Errorf("Error bandwidth flag") 408 | } 409 | } 410 | test.setting.pacing_time = 5 // 5ms pacing 411 | } 412 | 413 | if *debug_flag == true{ 414 | logging.SetLevel(logging.DEBUG, "iperf") 415 | logging.SetLevel(logging.DEBUG, "rudp") 416 | } else if *info_flag == true{ 417 | logging.SetLevel(logging.INFO, "iperf") 418 | logging.SetLevel(logging.INFO, "rudp") 419 | } else { 420 | logging.SetLevel(logging.ERROR, "iperf") 421 | logging.SetLevel(logging.ERROR, "rudp") 422 | } 423 | // pass to iperf_test 424 | if *server_flag == true{ 425 | test.is_server = true 426 | } else{ 427 | test.is_server = false 428 | var err error 429 | _, err = net.ResolveIPAddr("ip", *client_flag) 430 | if err != nil { 431 | return -3 432 | } 433 | test.addr = *client_flag 434 | } 435 | test.set_test_reverse(*reverse_flag) 436 | test.port = *port_flag 437 | test.state = 0 438 | test.interval = *interval_flag 439 | test.duration = *dur_flag // 10s 440 | test.stream_num = *parallel_flag 441 | // rudp only 442 | test.setting.snd_wnd = *snd_wnd_flag 443 | test.setting.rcv_wnd = *rcv_wnd_flag 444 | test.setting.read_buf_size = *read_buffer_size_flag * 1024 // Kb to b 445 | test.setting.write_buf_size = *write_buffer_size_flag * 1024 446 | test.setting.flush_interval = *flush_interval_flag 447 | test.setting.no_cong = *no_cong_flag 448 | test.setting.fast_resend = *fast_resend_flag 449 | test.setting.data_shards = *dataShards_flag 450 | test.setting.parity_shards = *parityShards_flag 451 | 452 | if test.interval > test.duration * 1000{ 453 | log.Errorf("interval must smaller than duration") 454 | } 455 | test.no_delay = *no_delay_flag 456 | if test.is_server == false { 457 | test.set_protocol(*protocol_flag) 458 | } 459 | 460 | test.Print() 461 | return 0 462 | } 463 | 464 | func (test *iperf_test) run_test() int { 465 | // server 466 | if test.is_server == true { 467 | rtn := test.run_server() 468 | if rtn < 0 { 469 | log.Errorf("Run server failed. %v", rtn) 470 | return rtn 471 | } 472 | 473 | } else { 474 | //client 475 | rtn := test.run_client() 476 | if rtn < 0 { 477 | log.Errorf("Run client failed. %v", rtn) 478 | return rtn 479 | } 480 | } 481 | 482 | return 0 483 | } 484 | 485 | func (test *iperf_test) set_test_reverse(reverse bool) { 486 | test.reverse = reverse 487 | if reverse == true{ 488 | if test.is_server { 489 | test.mode = IPERF_SENDER 490 | } else { 491 | test.mode = IPERF_RECEIVER 492 | } 493 | } else { 494 | if test.is_server { 495 | test.mode = IPERF_RECEIVER 496 | } else { 497 | test.mode = IPERF_SENDER 498 | } 499 | } 500 | } 501 | 502 | func (test *iperf_test) free_test() int { 503 | return 0 504 | } 505 | 506 | func (test *iperf_test) Print() { 507 | if test.is_server { 508 | return 509 | } 510 | if test.proto == nil { 511 | log.Errorf("Protocol not set.") 512 | return 513 | } 514 | fmt.Printf("Iperf started:\n") 515 | if test.proto.name() == TCP_NAME{ 516 | fmt.Printf("addr:%v\tport:%v\tproto:%v\tinterval:%v\tduration:%v\tNoDelay:%v\tburst:%v\tBlockSize:%v\tStreamNum:%v\n", 517 | test.addr, test.port, test.proto.name(), test.interval, test.duration, test.no_delay, test.setting.burst, test.setting.blksize, test.stream_num) 518 | } else if test.proto.name() == RUDP_NAME{ 519 | fmt.Printf("addr:%v\tport:%v\tproto:%v\tinterval:%v\tduration:%v\tNoDelay:%v\tburst:%v\tBlockSize:%v\tStreamNum:%v\n" + 520 | "RUDP settting: sndWnd:%v\trcvWnd:%v\twriteBufSize:%vKb\treadBufSize:%vKb\tnoCongestion:%v\tflushInterval:%v\tdataShards:%v\tparityShards:%v\n", 521 | test.addr, test.port, test.proto.name(), test.interval, test.duration, test.no_delay, test.setting.burst, test.setting.blksize, test.stream_num, 522 | test.setting.snd_wnd, test.setting.rcv_wnd, test.setting.write_buf_size / 1024, test.setting.read_buf_size / 1024, test.setting.no_cong, 523 | test.setting.flush_interval, test.setting.data_shards, test.setting.parity_shards) 524 | } else if test.proto.name() == KCP_NAME{ 525 | fmt.Printf("addr:%v\tport:%v\tproto:%v\tinterval:%v\tduration:%v\tNoDelay:%v\tburst:%v\tBlockSize:%v\tStreamNum:%v\n" + 526 | "KCP settting: sndWnd:%v\trcvWnd:%v\twriteBufSize:%vKb\treadBufSize:%vKb\tnoCongestion:%v\tflushInterval:%v\tdataShards:%v\tparityShards:%v\n", 527 | test.addr, test.port, test.proto.name(), test.interval, test.duration, test.no_delay, test.setting.burst, test.setting.blksize, test.stream_num, 528 | test.setting.snd_wnd, test.setting.rcv_wnd, test.setting.write_buf_size / 1024, test.setting.read_buf_size / 1024, test.setting.no_cong, 529 | test.setting.flush_interval, test.setting.data_shards, test.setting.parity_shards) 530 | } 531 | } 532 | 533 | 534 | /* 535 | ---------------------------------------------------- 536 | ******************* iperf_stream ******************* 537 | ---------------------------------------------------- 538 | */ 539 | func (sp *iperf_stream) iperf_recv(test *iperf_test) { 540 | // travel all the stream and start receive 541 | for { 542 | var n int 543 | if n = sp.rcv(sp); n < 0{ 544 | if n == -1 { 545 | log.Debugf("Stream Quit receiving") 546 | return 547 | } 548 | log.Errorf("Iperf streams receive failed. n = %v", n) 549 | return 550 | } 551 | if test.state == TEST_RUNNING { 552 | test.bytes_received += uint64(n) 553 | test.blocks_received += 1 554 | log.Debugf("Stream receive data %v bytes of total %v bytes", n, test.bytes_received) 555 | } 556 | if test.done { 557 | test.ctrl_chan <- TEST_END 558 | log.Debugf("Stream quit receiving. test done.") 559 | return 560 | } 561 | } 562 | } 563 | 564 | /* 565 | called by multi streams. Be careful the function called here 566 | */ 567 | func (sp *iperf_stream) iperf_send(test *iperf_test) { 568 | // travel all the stream and start receive 569 | for{ 570 | if sp.can_send { 571 | var n int 572 | if n = sp.snd(sp); n < 0{ 573 | if n == -1 { 574 | log.Debugf("Iperf send stream closed.") 575 | return 576 | } 577 | log.Error("Iperf streams send failed. %v", n) 578 | return 579 | } 580 | test.bytes_sent += uint64(n) 581 | test.blocks_sent += 1 582 | log.Debugf("Stream sent data %v bytes of total %v bytes", n, test.bytes_sent) 583 | } 584 | if test.setting.burst == false { 585 | test.check_throttle(sp, time.Now()) 586 | } 587 | if (test.duration != 0 && test.done) || 588 | (test.setting.bytes != 0 && test.bytes_sent >= test.setting.bytes) || 589 | (test.setting.blocks != 0 && test.blocks_sent >= test.setting.blocks){ 590 | test.ctrl_chan <- TEST_END 591 | // end sending 592 | log.Debugf("Stream Quit sending") 593 | return 594 | } 595 | } 596 | } 597 | 598 | func (test *iperf_test) create_sender_ticker() int { 599 | for _, sp := range test.streams{ 600 | sp.can_send = true 601 | if test.setting.rate != 0 { 602 | if test.setting.pacing_time == 0 || test.setting.burst == true { 603 | log.Error("pacing_time & rate & burst should be set at the same time.") 604 | return -1 605 | } 606 | var cd TimerClientData 607 | cd.p = sp 608 | sp.send_ticker = ticker_create(time.Now(), send_ticker_proc, cd, test.setting.pacing_time, ^uint(0)) 609 | } 610 | } 611 | return 0 612 | } 613 | /* 614 | ---------------------------------------------------- 615 | ****************** call_back func ****************** 616 | ---------------------------------------------------- 617 | */ 618 | 619 | // Main report-printing callback. 620 | func iperf_reporter_callback(test *iperf_test){ 621 | <- test.chStats // only call this function after stats 622 | if test.state == TEST_RUNNING { 623 | log.Debugf("TEST_RUNNING report, role = %v, mode = %v, done = %v", test.is_server, test.mode, test.done) 624 | test.iperf_print_intermediate() 625 | } else if test.state == TEST_END || test.state == IPERF_DISPLAY_RESULT { 626 | log.Debugf("TEST_END report, role = %v, mode = %v, done = %v", test.is_server, test.mode, test.done) 627 | test.iperf_print_intermediate() 628 | test.iperf_print_results() 629 | } else { 630 | log.Errorf("Unexpected state = %v, role = %v", test.state, test.is_server) 631 | } 632 | } 633 | 634 | func (test *iperf_test)iperf_print_intermediate(){ 635 | var sum_bytes_transfer, sum_rtt uint64 636 | var sum_retrans uint 637 | var display_start_time, display_end_time float64 638 | for i, sp := range test.streams{ 639 | if i == 0 && len(sp.result.interval_results) == 1{ 640 | // first time to print result, print header 641 | if test.proto.name() == TCP_NAME { 642 | fmt.Printf(TCP_INTERVAL_HEADER) 643 | } else { 644 | fmt.Printf(RUDP_INTERVAL_HEADER) 645 | } 646 | } 647 | interval_seq := len(sp.result.interval_results) - 1 648 | rp := sp.result.interval_results[interval_seq] // get the last one 649 | supposed_start_time := time.Duration(uint(interval_seq) * test.interval) * time.Millisecond 650 | real_start_time := rp.interval_start_time.Sub(sp.result.start_time) 651 | real_end_time := rp.interval_end_time.Sub(sp.result.start_time) 652 | if dur_not_same(supposed_start_time, real_start_time) { 653 | log.Errorf("Start time differ from expected. supposed = %v, real = %v", 654 | supposed_start_time.Nanoseconds() / MS_TO_NS, real_start_time.Nanoseconds() / MS_TO_NS) 655 | //return 656 | } 657 | sum_bytes_transfer += rp.bytes_transfered 658 | sum_retrans += rp.interval_retrans 659 | sum_rtt += uint64(rp.rtt) 660 | display_start_time = float64(real_start_time.Nanoseconds())/ S_TO_NS 661 | display_end_time = float64(real_end_time.Nanoseconds())/ S_TO_NS 662 | display_bytes_transfer := float64(rp.bytes_transfered) / MB_TO_B 663 | display_bandwidth := display_bytes_transfer / float64(test.interval) * 1000 * 8 // Mb/s 664 | // output single stream interval report 665 | if test.proto.name() == TCP_NAME { 666 | //display_retrans_rate := float64(rp.interval_retrans) / (float64(rp.bytes_transfered) / TCP_MSS) * 100 667 | fmt.Printf(TCP_REPORT_SINGLE_STREAM, i, display_start_time, display_end_time, 668 | display_bytes_transfer, display_bandwidth, float64(rp.rtt)/1000, rp.interval_retrans) 669 | } else { 670 | total_segs := float64(rp.bytes_transfered) / RUDP_MSS + float64(rp.interval_retrans) 671 | display_retrans_rate := float64(rp.interval_retrans) / total_segs * 100 // to percentage 672 | display_lost_rate := float64(rp.interval_lost) / total_segs * 100 673 | display_early_retrans_rate := float64(rp.interval_early_retrans) / total_segs * 100 674 | display_fast_retrans_rate := float64(rp.interval_fast_retrans) / total_segs * 100 675 | fmt.Printf(RUDP_REPORT_SINGLE_STREAM, i, display_start_time, display_end_time, display_bytes_transfer, 676 | display_bandwidth, float64(rp.rtt)/1000, rp.interval_retrans, display_retrans_rate, 677 | display_lost_rate, display_early_retrans_rate, display_fast_retrans_rate) 678 | } 679 | } 680 | if test.stream_num > 1 { 681 | display_sum_bytes_transfer := float64(sum_bytes_transfer) / MB_TO_B 682 | display_bandwidth := display_sum_bytes_transfer / float64(test.interval) * 1000 * 8 683 | fmt.Printf(REPORT_SUM_STREAM, display_start_time, display_end_time, display_sum_bytes_transfer, 684 | display_bandwidth, float64(sum_rtt)/1000/float64(test.stream_num), sum_retrans) 685 | fmt.Printf(REPORT_SEPERATOR) 686 | } 687 | } 688 | 689 | func dur_not_same(d time.Duration, d2 time.Duration) bool { 690 | // if deviation exceed 1ms, there might be problems 691 | var diff_in_ms int = int(d.Nanoseconds() / MS_TO_NS - d2.Nanoseconds() / MS_TO_NS) 692 | if diff_in_ms < -100 || diff_in_ms > 100 { 693 | return true 694 | } 695 | return false 696 | } 697 | 698 | func (test *iperf_test)iperf_print_results(){ 699 | fmt.Printf(SUMMARY_SEPERATOR) 700 | if test.proto.name() == TCP_NAME { 701 | fmt.Printf(TCP_RESULT_HEADER) 702 | } else { 703 | fmt.Printf(RUDP_RESULT_HEADER) 704 | } 705 | 706 | if len(test.streams) <=0 { 707 | log.Errorf("No streams available.") 708 | return 709 | } 710 | var sum_bytes_transfer uint64 711 | var sum_retrans uint 712 | var avg_rtt float64 713 | var display_start_time, display_end_time float64 714 | for i, sp := range test.streams{ 715 | display_start_time = float64(0) 716 | display_end_time = float64(sp.result.end_time.Sub(sp.result.start_time).Nanoseconds())/ S_TO_NS 717 | var display_bytes_transfer float64 718 | if test.mode == IPERF_RECEIVER { 719 | display_bytes_transfer = float64(sp.result.bytes_received) / MB_TO_B 720 | sum_bytes_transfer += sp.result.bytes_received 721 | } else { 722 | display_bytes_transfer = float64(sp.result.bytes_sent) / MB_TO_B 723 | sum_bytes_transfer += sp.result.bytes_sent 724 | } 725 | display_rtt := float64(sp.result.stream_sum_rtt) / float64(sp.result.stream_cnt_rtt) / 1000 726 | avg_rtt += display_rtt 727 | display_bandwidth := display_bytes_transfer / float64(test.duration) * 8 // Mb/s 728 | sum_retrans += sp.result.stream_retrans 729 | var role string 730 | if sp.role == SENDER_STREAM { 731 | role = "SENDER" 732 | } else { 733 | role = "RECEIVER" 734 | } 735 | // output single stream final report 736 | if test.proto.name() == TCP_NAME { 737 | total_segs := (display_bytes_transfer * MB_TO_B / TCP_MSS) + float64(sp.result.stream_retrans) 738 | display_retrans_rate := float64(sp.result.stream_retrans) / total_segs * 100 739 | fmt.Printf(TCP_REPORT_SINGLE_RESULT, i, display_start_time, display_end_time, display_bytes_transfer, 740 | display_bandwidth, display_rtt, sp.result.stream_retrans, display_retrans_rate, role) 741 | } else { 742 | total_segs := float64(sp.result.stream_out_segs) 743 | display_retrans_rate := float64(sp.result.stream_retrans) / total_segs * 100 744 | display_lost_rate := float64(sp.result.stream_lost) / total_segs * 100 745 | display_early_retrans_rate := float64(sp.result.stream_early_retrans) / total_segs * 100 746 | display_fast_retrans_rate := float64(sp.result.stream_fast_retrans) / total_segs * 100 747 | 748 | recover_rate := float64(sp.result.stream_recovers) / total_segs * 100 749 | pkts_lost_rate := (1 - float64(sp.result.stream_in_pkts) / float64(sp.result.stream_out_pkts)) * 100 750 | segs_lost_rate := (1 - float64(sp.result.stream_in_segs) / float64(sp.result.stream_out_segs)) * 100 751 | fmt.Printf(RUDP_REPORT_SINGLE_RESULT, i, display_start_time, display_end_time, display_bytes_transfer, 752 | display_bandwidth, display_rtt, sp.result.stream_retrans, display_retrans_rate, 753 | display_lost_rate, display_early_retrans_rate, display_fast_retrans_rate, 754 | recover_rate, pkts_lost_rate, segs_lost_rate, role) 755 | fmt.Printf("total_segs = %v, out_segs = %v, in_segs = %v, out_pkts = %v, in_pkts = %v, recovery = %v\n", 756 | total_segs, sp.result.stream_out_segs, sp.result.stream_in_segs, sp.result.stream_out_pkts, sp.result.stream_in_pkts, sp.result.stream_recovers) 757 | } 758 | } 759 | if test.stream_num > 1 { 760 | display_sum_bytes_transfer := float64(sum_bytes_transfer) / MB_TO_B 761 | display_bandwidth := display_sum_bytes_transfer / float64(test.duration) * 1000 * 8 762 | fmt.Printf(REPORT_SUM_STREAM, display_start_time, display_end_time, 763 | display_sum_bytes_transfer, display_bandwidth, avg_rtt / float64(test.stream_num), sum_retrans) 764 | } 765 | } 766 | 767 | // Gather statistics during a test. 768 | func iperf_stats_callback(test *iperf_test){ 769 | for _, sp := range test.streams{ 770 | temp_result := iperf_interval_results{} 771 | rp := sp.result 772 | if len(rp.interval_results) == 0 { 773 | // first interval 774 | temp_result.interval_start_time = rp.start_time 775 | } else { 776 | temp_result.interval_start_time = rp.end_time // rp.end_time contains timestamp of previous interval 777 | } 778 | rp.end_time = time.Now() 779 | temp_result.interval_end_time = rp.end_time 780 | temp_result.interval_dur = temp_result.interval_end_time.Sub(temp_result.interval_start_time) 781 | test.proto.stats_callback(test, sp, &temp_result) // write temp_result differ from proto to proto 782 | if test.mode == IPERF_RECEIVER { 783 | temp_result.bytes_transfered = rp.bytes_received_this_interval 784 | } else { 785 | temp_result.bytes_transfered = rp.bytes_sent_this_interval 786 | } 787 | rp.interval_results = append(rp.interval_results, temp_result) 788 | rp.bytes_sent_this_interval = 0 789 | rp.bytes_received_this_interval = 0 790 | } 791 | test.chStats <- true 792 | } --------------------------------------------------------------------------------