├── .gitattributes
├── docs
├── _config.yml
├── index.md
└── _layouts
│ └── default.html
├── scripts
├── percentile.sh
├── decode_statistics.sh
├── framenum.sh
├── dec_ratio_session.sh
├── playtime_stat.py
├── stall_sessions.py
├── frchange_stat.py
├── cond_prob.py
├── stat.py
└── component_relation.py
├── README.md
├── sim
├── flow_percentile_stat.py
├── flowtype_stat.py
├── delayed_stat.py
├── environment.yaml
├── param_optimize.py
├── env.py
├── traceloader.py
├── README.md
├── main.py
├── stat.py
└── algorithm.py
└── .gitignore
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/docs/_config.yml:
--------------------------------------------------------------------------------
1 | theme: jekyll-theme-cayman
2 |
3 | title: AFR
4 | description: Enabling High Quality Real-Time Communications with Adaptive Frame-Rate
--------------------------------------------------------------------------------
/scripts/percentile.sh:
--------------------------------------------------------------------------------
1 | for i in {0.5,0.9,0.95,0.99,0.999,0.9999}
2 | do
3 | awk '{print $3}' $1 | sort -n | awk '{all[NR] = $0} END{print all[int(NR*"'${i}'" - 0.5)]}'
4 | done
--------------------------------------------------------------------------------
/scripts/decode_statistics.sh:
--------------------------------------------------------------------------------
1 | for i in `ls ${1}`
2 | do
3 | cat ${1}/${i} | awk '{res[$5]++;} END{for (i in res){print i,res[i]}}' > ${2}/${i}
4 | # dec=`cat ${1}/${i} | awk '{sum += $5;} END{print sum/NR}'`
5 | # echo ${i}, ${dec} >> ${1}/../delayed_frames_decode.log
6 | done
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # AFR
2 | This repo contains all necessary codes for the implementation of the AFR paper. Specifically,
3 | - `scripts/` contains the evaluation and statistics scripts for the paper.
4 | - `sim/` contains the codes for the frame-rate adaption simulator. We also provide a sample trace in the `sim/` folder.
5 |
6 | In general, when new traces are collected, scripts in the `scripts\` folder are needed to preprocess and parse the traces. Then the simulator in the `sim\` could be executed to run the simulation and output the log files. Finally, scripts in the `scripts\` would be needed again to calculate the needed statistics from the output logs.
7 |
8 | Please refer to the readme files in each folder for specific usages in detail.
9 |
--------------------------------------------------------------------------------
/sim/flow_percentile_stat.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | SAMPLING_TILE = 100 # in 0.01%
5 |
6 | if __name__ == "__main__":
7 | parser = argparse.ArgumentParser()
8 | parser.add_argument('--percentile')
9 | parser.add_argument('--filter', type=str)
10 | parser.add_argument('--result', type=str)
11 |
12 | args = parser.parse_args()
13 |
14 | with open(os.path.join(args.result, args.filter + '-' + str(args.percentile) + '.log'), 'w') as fout:
15 | for fname in os.listdir(args.result):
16 | if (args.filter + '.log') not in fname:
17 | continue
18 | with open(os.path.join(args.result, fname), 'r') as f:
19 | while True:
20 | line = f.readline().split()
21 | if not line:
22 | break
23 | if args.percentile == 'Avg':
24 | if line[0] == args.percentile:
25 | fout.write(fname + ' ' + line[1] + '\n')
26 | break
27 | else:
28 | if int(line[0]) >= int(args.percentile) * SAMPLING_TILE:
29 | fout.write(fname + ' ' + line[1] + '\n')
30 | break
31 |
--------------------------------------------------------------------------------
/scripts/framenum.sh:
--------------------------------------------------------------------------------
1 | cd ../online_data/actions
2 | for filter in w22w w32w w30w w20w
3 | do
4 | echo ${filter}
5 | cd 60fps
6 | cat ../../cut_${filter}_60fps_flowlist.log | xargs -i wc -l {} > ../../results/${filter}/framenum-60fps.log
7 | cat ../../results/${filter}/native/stall_sessions_100.log | awk '{if($3>0.05){print $1}}' | xargs -i wc -l {} > ../../results/${filter}/framenum-60fps-stutter.log
8 | cd ../afr_0.002_0.033_0.250
9 | cat ../../cut_${filter}_60fps_flowlist.log | xargs -i wc -l {} > ../../results/${filter}/framenum-afr.log
10 | cat ../../results/${filter}/native/stall_sessions_100.log | awk '{if($3>0.05){print $1}}' | xargs -i wc -l {} > ../../results/${filter}/framenum-afr-stutter.log
11 | cd ..
12 | paste -d" " ../results/${filter}/framenum-60fps.log ../results/${filter}/framenum-afr.log | awk '{print $2,$1,$3,($1-$3)/$3}' > ../results/${filter}/framenum-60fps-afr.log
13 | paste -d" " ../results/${filter}/framenum-60fps-stutter.log ../results/${filter}/framenum-afr-stutter.log | awk '{print $2,$1,$3,($1-$3)/$3}' > ../results/${filter}/framenum-60fps-afr-stutter.log
14 | done
15 |
16 | # cat ../online_data/cut_w30w_flowlist.log | sort | xargs -i wc -l ../online_data/logs/afr_0.002_0.033_0.250/{} > w30w_afr_0.002_0.033_0.250_wc.log
17 | # cat ../online_data/cut_w30w_flowlist.log | sort | xargs -i wc -l ../online_data/logs/native/{} > w30w_native_wc.log
18 |
--------------------------------------------------------------------------------
/sim/flowtype_stat.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import subprocess as sp
4 | import numpy as np
5 |
6 | parser = argparse.ArgumentParser()
7 | parser.add_argument('--filter', type=str, default='w2w1',
8 | help='devType (UNKNOWN, DESKTOP, LAPTOP, PHONE, PAD, STB, TV), \n' +
9 | 'netType (NONE, MOBILE, ETHERNET, WIFI, OTHER), \n' +
10 | 'clientType (WinPC, IOS, MacPC, Android), \n' +
11 | 'decodeType (SOFTWARE, HARDWARE)')
12 | parser.add_argument('--flowinfo', type=str, default='../online_data/flowinfo.log')
13 | parser.add_argument('--flowlist', type=str)
14 | args = parser.parse_args()
15 |
16 | devType = args.filter[0]
17 | netType = args.filter[1]
18 | clientType = args.filter[2]
19 | decodeType = args.filter[3]
20 |
21 | filtered_sid = []
22 | with open(args.flowinfo, 'r') as f:
23 | while True:
24 | line = f.readline().split()
25 | if not line:
26 | break
27 | devCond = devType == 'w' or devType == line[2]
28 | netCond = netType == 'w' or netType == line[4]
29 | clientCond = clientType == 'w' or clientType == line[6]
30 | decodeCond = decodeType == 'w' or decodeType == line[8]
31 | if devCond and netCond and clientCond and decodeCond:
32 | filtered_sid.append(line[0])
33 |
34 | if args.flowlist:
35 | fnames = np.loadtxt(args.flowlist, dtype=str).tolist()
36 |
37 | fnames_new = []
38 | for fname in fnames:
39 | sid = fname.split('_')[0]
40 | if sid in filtered_sid:
41 | fnames_new.append(fname)
42 | print(len(fnames_new), '/', len(fnames))
--------------------------------------------------------------------------------
/scripts/dec_ratio_session.sh:
--------------------------------------------------------------------------------
1 | cd ../online_data
2 | dec_a=6
3 | dec_b=12
4 | dec_c=18
5 | dec_d=24
6 | dec_e=30
7 |
8 | for dec_th in ${dec_a} ${dec_b} ${dec_c} ${dec_d} ${dec_e}
9 | do
10 | rm dec_dist/sess_${dec_th}ms_unsorted.tmp
11 | done
12 | cat cut_wc5w_flowlist.log | xargs -i awk '{
13 | if ($5 > '"${dec_a}"') {
14 | cnt_a++
15 | }
16 | if ($5 > '"${dec_b}"') {
17 | cnt_b++
18 | }
19 | if ($5 > '"${dec_c}"') {
20 | cnt_c++
21 | }
22 | if ($5 > '"${dec_d}"') {
23 | cnt_d++
24 | }
25 | if ($5 > '"${dec_e}"') {
26 | cnt_e++
27 | }
28 | } END {
29 | print FILENAME, cnt_a/NR >> "dec_dist/sess_"'${dec_a}'"ms_unsorted.tmp"
30 | print FILENAME, cnt_b/NR >> "dec_dist/sess_"'${dec_b}'"ms_unsorted.tmp"
31 | print FILENAME, cnt_c/NR >> "dec_dist/sess_"'${dec_c}'"ms_unsorted.tmp"
32 | print FILENAME, cnt_d/NR >> "dec_dist/sess_"'${dec_d}'"ms_unsorted.tmp"
33 | print FILENAME, cnt_e/NR >> "dec_dist/sess_"'${dec_e}'"ms_unsorted.tmp"
34 | }' cut/{}
35 |
36 | for dec_th in ${dec_a} ${dec_b} ${dec_c} ${dec_d} ${dec_e}
37 | do
38 | awk '{print $2}' dec_dist/sess_${dec_th}ms_unsorted.tmp | sort -gk1 > dec_dist/sess_${dec_th}ms.tmp
39 | awk -vstep=$(awk 'END{printf("%.4f\n", NR/10000)}' dec_dist/sess_${dec_th}ms.tmp | bc) 'BEGIN {
40 | cnt = 1
41 | } {
42 | sum += $1
43 | if (NR >= int(cnt*step)) {
44 | print cnt, $0
45 | cnt = cnt + 1 + int(NR-cnt*step)
46 | }
47 | } END {
48 | printf("Avg %.3f\n", sum/NR)
49 | }' dec_dist/sess_${dec_th}ms.tmp > dec_dist/sess_${dec_th}ms.log
50 | rm dec_dist/sess_${dec_th}ms.tmp
51 | done
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 |
2 | ## Abstract
3 |
4 | Emerging high quality real-time communication (RTC) applications stream ultra-high-definition (UHD) videos with high frame rate (HFR). They use edge computing, which enables high bandwidth and low latency streaming. Our measurements, from the cloud gaming platform of one of the largest gaming companies, show that, in this setting, the client-side decoder is often the cause for high latency that hurts user's experience. We therefore propose an Adaptive Frame Rate (AFR) controller that helps achieve ultra-low latency by coordinating the frame rate with network fluctuation and decoder capacity. AFR's design addresses two key challenges: (1) queue measurements do not provide timely feedback for the control loop and (2) multiple factors control the decoder queue, and different actions must be taken depending on why the queue accumulates. Trace-driven simulations and large-scale deployments in the wild demonstrate that AFR can reduce the tail queuing delay by up to 7.4× and the stuttering events measured by end-to-end delay by 34% on average. AFR has been deployed in production in our cloud gaming service for over one year.
5 |
6 | ## Paper
7 |
8 | ### Enabling High Quality Real-Time Communications with Adaptive Frame-Rate
9 |
10 | Zili Meng, Tingfeng Wang, Yixin Shen, Bo Wang, Mingwei Xu, Rui Han, Honghao Liu, Venkat Arun, Hongxin Hu, Xue Wei.
Proceedings of the 2023 USENIX NSDI Conference
[[PDF]](https://zilimeng.com/papers/afr-nsdi23.pdf)
11 |
12 | ### Citation
13 |
14 | ```
15 | @inproceedings{meng2023enabling,
16 | title={Enabling High Quality Real-Time Communications with Adaptive Frame-Rate},
17 | author={Meng, Zili and Wang, Tingfeng and Shen, Yixin and Wang, Bo and Xu, Mingwei and Han, Rui and Liu, Honghao and Arun, Venkat and Hu, Hongxin and Wei, Xue},
18 | booktitle={Proc. USENIX NSDI},
19 | year={2023}
20 | }
21 | ```
22 |
23 | ## Code
24 |
25 | [GitHub](https://github.com/transys-project/afr/)
26 |
27 | ## Supporters
28 |
29 | The research is supported by the National Natural Science Foundation of China (No. 62002196, 61832013, and
30 | 62221003) and the Tsinghua-Tencent Collaborative Grant.
31 |
32 | ## Contact
33 | For any questions, please send an email to [zilim@ieee.org](mailto:zilim@ieee.org).
34 |
35 |
--------------------------------------------------------------------------------
/scripts/playtime_stat.py:
--------------------------------------------------------------------------------
1 | import os
2 | import datetime
3 | import argparse
4 |
5 | parser = argparse.ArgumentParser()
6 | parser.add_argument('--logdir', type=str, default='../online_data/cut')
7 | parser.add_argument('--filter', type=str, default='wwww',
8 | help='devType (UNKNOWN, DESKTOP, LAPTOP, PHONE, PAD, STB, TV), \n' +
9 | 'netType (NONE, MOBILE, ETHERNET, WIFI, OTHER), \n' +
10 | 'clientType (WinPC, IOS, MacPC, Android), \n' +
11 | 'decodeType (SOFTWARE, HARDWARE)')
12 | parser.add_argument('--flowinfo', type=str, default='../online_data/flowinfo.log')
13 | args = parser.parse_args()
14 |
15 | devType = args.filter[0]
16 | netType = args.filter[1]
17 | clientType = args.filter[2]
18 | decodeType = args.filter[3]
19 |
20 | filtered_sid = []
21 | with open(args.flowinfo, 'r') as f:
22 | while True:
23 | line = f.readline().split()
24 | if not line:
25 | break
26 | devCond = devType == 'w' or devType == line[2]
27 | netCond = netType == 'w' or netType == line[4]
28 | clientCond = clientType == 'w' or clientType == line[6]
29 | decodeCond = decodeType == 'w' or decodeType == line[8]
30 | if devCond and netCond and clientCond and decodeCond:
31 | filtered_sid.append(line[0])
32 |
33 | fnames = os.listdir(args.logdir)
34 | fnames_new = []
35 | for fname in fnames:
36 | sid = fname.split('_')[0]
37 | if sid in filtered_sid:
38 | fnames_new.append(fname)
39 | print(len(fnames_new), '/', len(fnames))
40 | fnames = fnames_new
41 |
42 | playtime = datetime.timedelta(0)
43 | for fname in fnames:
44 | with open(os.path.join(args.logdir, fname), 'rb') as f:
45 | first_line = f.readline()
46 | off = -50
47 | while True:
48 | f.seek(off, 2)
49 | lines = f.readlines()
50 | if len(lines) >= 2:
51 | last_line = lines[-1]
52 | break
53 | off *= 2
54 |
55 | try:
56 | start_time = datetime.datetime.strptime(first_line.split()[0].decode() + ' ' + first_line.split()[1].decode(), "%Y/%m/%d %H:%M:%S:%f")
57 | end_time = datetime.datetime.strptime(last_line.split()[0].decode() + ' ' + last_line.split()[1].decode(), "%Y/%m/%d %H:%M:%S:%f")
58 | playtime += end_time - start_time
59 | except ValueError as e:
60 | print(fname, e)
61 | print(playtime)
62 |
--------------------------------------------------------------------------------
/sim/delayed_stat.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import subprocess as sp
4 | import numpy as np
5 |
6 | parser = argparse.ArgumentParser()
7 | parser.add_argument('--log', type=str)
8 | parser.add_argument('--result', type=str)
9 | parser.add_argument('--settings', type=str)
10 | parser.add_argument('--filter', type=str, default='w2w1',
11 | help='devType (UNKNOWN, DESKTOP, LAPTOP, PHONE, PAD, STB, TV), \n' +
12 | 'netType (NONE, MOBILE, ETHERNET, WIFI, OTHER), \n' +
13 | 'clientType (WinPC, IOS, MacPC, Android), \n' +
14 | 'decodeType (SOFTWARE, HARDWARE)')
15 | parser.add_argument('--condition', type=str)
16 | parser.add_argument('--flowinfo', type=str, default='../online_data/flowinfo.log')
17 | parser.add_argument('--flowlist', type=str)
18 | args = parser.parse_args()
19 |
20 | devType = args.filter[0]
21 | netType = args.filter[1]
22 | clientType = args.filter[2]
23 | decodeType = args.filter[3]
24 |
25 | filtered_sid = []
26 | with open(args.flowinfo, 'r') as f:
27 | while True:
28 | line = f.readline().split()
29 | if not line:
30 | break
31 | devCond = devType == 'w' or devType == line[2]
32 | netCond = netType == 'w' or netType == line[4]
33 | clientCond = clientType == 'w' or clientType == line[6]
34 | decodeCond = decodeType == 'w' or decodeType == line[8]
35 | if devCond and netCond and clientCond and decodeCond:
36 | filtered_sid.append(line[0])
37 |
38 | if args.flowlist:
39 | fnames = np.loadtxt(args.flowlist, dtype=str).tolist()
40 | prefix = os.path.join(args.result, args.filter + '_' + os.path.split(args.flowlist)[-1], args.settings)
41 | else:
42 | fnames = os.listdir(os.path.join(args.log, args.settings))
43 | prefix = os.path.join(args.result, args.filter, args.settings)
44 |
45 | fnames_new = []
46 | for fname in fnames:
47 | sid = fname.split('_')[0]
48 | if sid in filtered_sid:
49 | fnames_new.append(fname)
50 | print(len(fnames_new), '/', len(fnames))
51 | fnames = fnames_new
52 |
53 | with open('stat.tmp', 'w') as f:
54 | for fname in fnames:
55 | f.write(os.path.join(args.log, args.settings, fname) + '\n')
56 |
57 | if not os.path.exists(prefix):
58 | os.makedirs(prefix)
59 |
60 | sp_stat = sp.Popen("cat stat.tmp | xargs awk \'{if($" + args.condition[0] +
61 | ">" + args.condition[1:] + "){print FILENAME,$0}}\' > " +
62 | os.path.join(prefix, args.condition + "-frames.log"), shell=True)
63 | sp_stat.wait()
64 |
65 | sp_clean = sp.Popen('rm stat.tmp', shell=True)
66 | sp_clean.wait()
--------------------------------------------------------------------------------
/scripts/stall_sessions.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import argparse
4 | import datetime
5 | import subprocess as sp
6 | from multiprocessing import Pool
7 |
8 |
9 | def stall_stats(params):
10 | fname, args = params
11 | awk_proc = sp.Popen("awk 'BEGIN{stall=0} {if($5>" + str(args.delay_thres) + "){stall++}} END{print NR,stall/NR}' " +
12 | os.path.join(args.log, args.settings, fname), shell=True, stdout=sp.PIPE)
13 | out, err = awk_proc.communicate()
14 | awk_proc.wait()
15 | return [fname, out]
16 |
17 |
18 | if __name__ == '__main__':
19 | starttime = datetime.datetime.now()
20 | parser = argparse.ArgumentParser()
21 | parser.add_argument('--log', type=str, default='../online_data/logs')
22 | parser.add_argument('--result', type=str, default='../online_data/results')
23 | parser.add_argument('--settings', type=str)
24 | parser.add_argument('--delay-thres', type=int, default=100)
25 | parser.add_argument('--filter', type=str, default='wwww',
26 | help='devType (UNKNOWN, DESKTOP, LAPTOP, PHONE, PAD, STB, TV), \n' +
27 | 'netType (NONE, MOBILE, ETHERNET, WIFI, OTHER), \n' +
28 | 'clientType (WinPC, IOS, MacPC, Android), \n' +
29 | 'decodeType (SOFTWARE, HARDWARE)')
30 | parser.add_argument('--flowinfo', type=str, default='../online_data/flowinfo.log')
31 | parser.add_argument('--worker', type=int)
32 | args = parser.parse_args()
33 |
34 | devType = args.filter[0]
35 | netType = args.filter[1]
36 | clientType = args.filter[2]
37 | decodeType = args.filter[3]
38 |
39 | filtered_sid = []
40 | with open(args.flowinfo, 'r') as f:
41 | while True:
42 | line = f.readline().split()
43 | if not line:
44 | break
45 | devCond = devType == 'w' or devType == line[2]
46 | netCond = netType == 'w' or netType == line[4]
47 | clientCond = clientType == 'w' or clientType == line[6]
48 | decodeCond = decodeType == 'w' or decodeType == line[8]
49 | if devCond and netCond and clientCond and decodeCond:
50 | filtered_sid.append(line[0])
51 |
52 | fnames = os.listdir(os.path.join(args.log, args.settings))
53 | fnames_new = []
54 | for fname in fnames:
55 | sid = fname.split('_')[0]
56 | if sid in filtered_sid:
57 | fnames_new.append(fname)
58 | print(len(fnames_new), '/', len(fnames))
59 | fnames = fnames_new
60 |
61 | if args.worker:
62 | pool = Pool(args.worker)
63 | else:
64 | pool = Pool()
65 | results = pool.map(stall_stats, [(fname, args) for fname in fnames])
66 | pool.close()
67 | pool.join()
68 | with open(os.path.join(args.result, args.filter, args.settings,
69 | 'stall_sessions_' + str(args.delay_thres) + '.log'), 'w') as f:
70 | for result in results:
71 | fname = result[0]
72 | out = str(result[1], encoding='utf-8')
73 | if out[-1] != '\n':
74 | out +='\n'
75 | f.write("%s %s" % (fname, out))
76 | endtime = datetime.datetime.now()
77 | print(f'{args.settings} TotalTime: {(endtime - starttime).total_seconds()/60:.2f} minutes')
78 |
--------------------------------------------------------------------------------
/docs/_layouts/default.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | {% if site.google_analytics %}
6 |
7 |
13 | {% endif %}
14 |
15 |
16 | {% seo %}
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
49 |
50 |
51 | {{ content }}
52 |
53 |
54 |
55 |
--------------------------------------------------------------------------------
/scripts/frchange_stat.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | import datetime
4 | import subprocess as sp
5 | from multiprocessing import Pool
6 |
7 | def frchange_stat(params):
8 | fname, args = params
9 | awk_proc = sp.Popen("awk '{if($1!=last){idx++;last=$1}} END{print NR/idx}' " +
10 | os.path.join(args.action, args.settings, fname), shell=True, stdout=sp.PIPE)
11 | out, err = awk_proc.communicate()
12 | awk_proc.wait()
13 | out = float(str(out, encoding='utf-8').split()[0])
14 | return [fname, out]
15 |
16 | # fname, args = params
17 | # with open(os.path.join(args.action, args.settings, fname), 'r') as f:
18 | # last_line = int(f.readline().split()[0])
19 | # cnt = 0
20 | # cnt_sum = 0
21 | # cnt_idx = 0
22 | # while True:
23 | # cur_line = f.readline()
24 | # if not cur_line:
25 | # break
26 | # cur_line = int(cur_line.split()[0])
27 | # if last_line = cur_line:
28 | # cnt += 1
29 | # else:
30 | # cnt_sum += cnt
31 | # cnt_idx += 1
32 | # cnt = 0
33 | # last_line = cur_line
34 | # return [fname, cnt_sum / cnt_idx]
35 |
36 |
37 | if __name__ == '__main__':
38 | starttime = datetime.datetime.now()
39 | parser = argparse.ArgumentParser()
40 | parser.add_argument('--action', type=str, default='../online_data/actions')
41 | parser.add_argument('--result', type=str, default='../online_data/results')
42 | parser.add_argument('--settings', type=str)
43 | parser.add_argument('--filter', type=str, default='wwww',
44 | help='devType (UNKNOWN, DESKTOP, LAPTOP, PHONE, PAD, STB, TV), \n' +
45 | 'netType (NONE, MOBILE, ETHERNET, WIFI, OTHER), \n' +
46 | 'clientType (WinPC, IOS, MacPC, Android), \n' +
47 | 'decodeType (SOFTWARE, HARDWARE)')
48 | parser.add_argument('--flowinfo', type=str, default='../online_data/flowinfo.log')
49 | parser.add_argument('--worker', type=int)
50 | args = parser.parse_args()
51 |
52 | devType = args.filter[0]
53 | netType = args.filter[1]
54 | clientType = args.filter[2]
55 | decodeType = args.filter[3]
56 |
57 | filtered_sid = []
58 | with open(args.flowinfo, 'r') as f:
59 | while True:
60 | line = f.readline().split()
61 | if not line:
62 | break
63 | devCond = devType == 'w' or devType == line[2]
64 | netCond = netType == 'w' or netType == line[4]
65 | clientCond = clientType == 'w' or clientType == line[6]
66 | decodeCond = decodeType == 'w' or decodeType == line[8]
67 | if devCond and netCond and clientCond and decodeCond:
68 | filtered_sid.append(line[0])
69 |
70 | fnames = os.listdir(os.path.join(args.action, args.settings))
71 | fnames_new = []
72 | for fname in fnames:
73 | sid = fname.split('_')[0]
74 | if sid in filtered_sid:
75 | fnames_new.append(fname)
76 | print(len(fnames_new), '/', len(fnames))
77 | fnames = fnames_new
78 |
79 | if args.worker:
80 | pool = Pool(args.worker)
81 | else:
82 | pool = Pool()
83 | results = pool.map(frchange_stat, [(fname, args) for fname in fnames])
84 | pool.close()
85 | pool.join()
86 | with open(os.path.join(args.result, args.filter, args.settings,
87 | 'frchange.log'), 'w') as f:
88 | for result in results:
89 | f.write("%s %d\n" % (result[0], result[1]))
90 | endtime = datetime.datetime.now()
91 | print(f'{args.settings} TotalTime: {(endtime - starttime).total_seconds()/60:.2f} minutes')
--------------------------------------------------------------------------------
/sim/environment.yaml:
--------------------------------------------------------------------------------
1 | name: afr
2 | channels:
3 | - defaults
4 | dependencies:
5 | - _libgcc_mutex=0.1=main
6 | - backcall=0.2.0=py_0
7 | - blas=1.0=mkl
8 | - ca-certificates=2021.7.5=h06a4308_1
9 | - certifi=2021.5.30=py38h06a4308_0
10 | - cffi=1.14.0=py38he30daa8_1
11 | - chardet=3.0.4=py38_1003
12 | - conda=4.10.3=py38h06a4308_0
13 | - conda-package-handling=1.6.1=py38h7b6447c_0
14 | - cryptography=2.9.2=py38h1ba5d50_0
15 | - cycler=0.10.0=py38_0
16 | - dbus=1.13.18=hb2f20db_0
17 | - decorator=4.4.2=py_0
18 | - expat=2.2.10=he6710b0_2
19 | - fontconfig=2.13.0=h9420a91_0
20 | - freetype=2.10.4=h5ab3b9f_0
21 | - glib=2.66.1=h92f7085_0
22 | - gst-plugins-base=1.14.0=hbbd80ab_1
23 | - gstreamer=1.14.0=hb31296c_0
24 | - icu=58.2=he6710b0_3
25 | - idna=2.9=py_1
26 | - intel-openmp=2020.2=254
27 | - ipykernel=5.3.4=py38h5ca1d4c_0
28 | - ipython=7.19.0=py38hb070fc8_0
29 | - ipython_genutils=0.2.0=pyhd3eb1b0_1
30 | - jedi=0.18.0=py38h06a4308_0
31 | - jpeg=9b=h024ee3a_2
32 | - jupyter_client=6.1.7=py_0
33 | - jupyter_core=4.7.0=py38h06a4308_0
34 | - kiwisolver=1.3.0=py38h2531618_0
35 | - lcms2=2.11=h396b838_0
36 | - ld_impl_linux-64=2.33.1=h53a641e_7
37 | - libedit=3.1.20181209=hc058e9b_0
38 | - libffi=3.3=he6710b0_1
39 | - libgcc-ng=9.1.0=hdf63c60_0
40 | - libgfortran-ng=7.3.0=hdf63c60_0
41 | - libpng=1.6.37=hbc83047_0
42 | - libsodium=1.0.18=h7b6447c_0
43 | - libstdcxx-ng=9.1.0=hdf63c60_0
44 | - libtiff=4.1.0=h2733197_1
45 | - libuuid=1.0.3=h1bed415_2
46 | - libxcb=1.14=h7b6447c_0
47 | - libxml2=2.9.10=hb55368b_3
48 | - lz4-c=1.9.2=heb0550a_3
49 | - matplotlib=3.3.2=0
50 | - matplotlib-base=3.3.2=py38h817c723_0
51 | - mkl=2020.2=256
52 | - mkl-service=2.3.0=py38he904b0f_0
53 | - mkl_fft=1.2.0=py38h23d657b_0
54 | - mkl_random=1.1.1=py38h0573a6f_0
55 | - ncurses=6.2=he6710b0_1
56 | - numpy=1.19.2=py38h54aff64_0
57 | - numpy-base=1.19.2=py38hfa32c7d_0
58 | - olefile=0.46=py_0
59 | - openssl=1.1.1k=h27cfd23_0
60 | - pandas=1.1.3=py38he6710b0_0
61 | - parso=0.7.0=py_0
62 | - pcre=8.44=he6710b0_0
63 | - pexpect=4.8.0=pyhd3eb1b0_3
64 | - pickleshare=0.7.5=pyhd3eb1b0_1003
65 | - pillow=8.0.1=py38he98fc37_0
66 | - pip=20.0.2=py38_3
67 | - prompt-toolkit=3.0.8=py_0
68 | - ptyprocess=0.7.0=pyhd3eb1b0_2
69 | - pycosat=0.6.3=py38h7b6447c_1
70 | - pycparser=2.20=py_0
71 | - pygments=2.7.3=pyhd3eb1b0_0
72 | - pyopenssl=19.1.0=py38_0
73 | - pyparsing=2.4.7=py_0
74 | - pyqt=5.9.2=py38h05f1152_4
75 | - pysocks=1.7.1=py38_0
76 | - python=3.8.3=hcff3b4d_0
77 | - python-dateutil=2.8.1=py_0
78 | - pytz=2020.1=py_0
79 | - pyzmq=20.0.0=py38h2531618_1
80 | - qt=5.9.7=h5867ecd_1
81 | - readline=8.0=h7b6447c_0
82 | - requests=2.23.0=py38_0
83 | - ruamel_yaml=0.15.87=py38h7b6447c_0
84 | - scipy=1.6.2=py38h91f5cce_0
85 | - seaborn=0.11.0=py_0
86 | - setuptools=46.4.0=py38_0
87 | - sip=4.19.13=py38he6710b0_0
88 | - six=1.14.0=py38_0
89 | - sqlite=3.31.1=h62c20be_1
90 | - tk=8.6.8=hbc83047_0
91 | - tornado=6.0.4=py38h7b6447c_1
92 | - tqdm=4.46.0=py_0
93 | - traitlets=5.0.5=py_0
94 | - urllib3=1.25.8=py38_0
95 | - wcwidth=0.2.5=py_0
96 | - wheel=0.34.2=py38_0
97 | - xz=5.2.5=h7b6447c_0
98 | - yaml=0.1.7=had09818_2
99 | - zeromq=4.3.3=he6710b0_3
100 | - zlib=1.2.11=h7b6447c_3
101 | - zstd=1.4.5=h9ceee32_0
102 | - pip:
103 | - aiohttp==3.7.2
104 | - aiohttp-cors==0.7.0
105 | - aioredis==1.3.1
106 | - async-timeout==3.0.1
107 | - atari-py==0.2.6
108 | - attrs==20.3.0
109 | - beautifulsoup4==4.9.3
110 | - blessings==1.7
111 | - cachetools==4.1.1
112 | - click==7.1.2
113 | - cloudpickle==1.6.0
114 | - colorama==0.4.4
115 | - colorful==0.5.4
116 | - dm-tree==0.1.5
117 | - dtw-python==1.1.10
118 | - fastdtw==0.3.4
119 | - filelock==3.0.12
120 | - flask==1.1.2
121 | - future==0.18.2
122 | - google==3.0.0
123 | - google-api-core==1.23.0
124 | - google-auth==1.23.0
125 | - googleapis-common-protos==1.52.0
126 | - gpustat==0.6.0
127 | - grpcio==1.33.2
128 | - gym==0.17.3
129 | - h11==0.11.0
130 | - hiredis==1.1.0
131 | - itsdangerous==1.1.0
132 | - jinja2==2.11.2
133 | - jsonschema==3.2.0
134 | - lz4==3.1.0
135 | - markupsafe==1.1.1
136 | - msgpack==1.0.0
137 | - multidict==5.0.0
138 | - nvidia-ml-py3==7.352.0
139 | - opencensus==0.7.11
140 | - opencensus-context==0.1.2
141 | - opencv-python==4.4.0.46
142 | - opencv-python-headless==4.3.0.36
143 | - prometheus-client==0.8.0
144 | - protobuf==3.13.0
145 | - psutil==5.7.3
146 | - py-spy==0.3.3
147 | - pyasn1==0.4.8
148 | - pyasn1-modules==0.2.8
149 | - pydantic==1.7.2
150 | - pyglet==1.5.0
151 | - pyrsistent==0.17.3
152 | - pyyaml==5.3.1
153 | - ray==1.0.0
154 | - redis==3.4.1
155 | - rsa==4.6
156 | - soupsieve==2.0.1
157 | - tabulate==0.8.7
158 | - tensorboardx==2.1
159 | - typing-extensions==3.7.4.3
160 | - uvicorn==0.12.2
161 | - werkzeug==1.0.1
162 | - yarl==1.6.2
163 | prefix: /home/hadoop/miniconda3
164 |
--------------------------------------------------------------------------------
/sim/param_optimize.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 |
5 | def argument_parser():
6 | parser = argparse.ArgumentParser()
7 | parser.add_argument('-i',
8 | '--interarrival_percentail',
9 | type=int,
10 | default='5000')
11 | parser.add_argument('-q', '--queuing_percentail', type=int, default='9900')
12 | parser.add_argument('-c', '--config', choices=['run', 'stat', 'all'], default='stat')
13 | args = parser.parse_args()
14 | return args
15 |
16 |
17 | def pause_len_run(len_params, delete_log=True):
18 | for i, j in len_params:
19 | setting_name = f'pause_len_{i}_{j}'
20 | os.system(
21 | f'python main.py --trace ../online_data/cut --log ../online_data/log --action ../online_data/action --result ../online_data/result --algorithm pause --mode release --pause_dimension len --pause_stop_threshold {i} --pause_activate_threshold {j} --random_sample 2000'
22 | )
23 | os.system(
24 | f'python stat.py --log ../online_data/log --result ../online_data/result --filter w20w --queuing --interarrival --smooth --total --settings {setting_name} --flowinfo ../online_data/flowinfo.log'
25 | )
26 | if delete_log:
27 | os.system(
28 | f'rm -rf ../online_data/log/{setting_name} ../online_data/action/{setting_name}'
29 | )
30 | print('qlen done!')
31 |
32 |
33 | def pause_wait_run(wait_params, delete_log=True):
34 | for i, j in wait_params:
35 | setting_name = f'pause_wait_{i}_{j}'
36 | os.system(
37 | f'python main.py --trace ../online_data/cut --log ../online_data/log --action ../online_data/action --result ../online_data/result --algorithm pause --mode release --pause_dimension wait --pause_stop_threshold {i} --pause_activate_threshold {j} --random_sample 2000'
38 | )
39 | os.system(
40 | f'python stat.py --log ../online_data/log --result ../online_data/result --filter w20w --queuing --interarrival --smooth --total --settings {setting_name} --flowinfo ../online_data/flowinfo.log'
41 | )
42 | if delete_log:
43 | os.system(
44 | f'rm -rf ../online_data/log/{setting_name} ../online_data/action/{setting_name}'
45 | )
46 | print('qwait done!')
47 |
48 |
49 | def pause_len_stat(len_params,
50 | interarrival_percentail=5000,
51 | queuing_percentail=9900):
52 | for i, j in len_params:
53 | setting_name = f'pause_len_{i}_{j}'
54 | with open(
55 | os.path.join('../online_data/result/w20w', setting_name,
56 | 'interarrival_all.log')) as f:
57 | buf = f.readlines()
58 | interarrival = buf[interarrival_percentail - 1].replace(
59 | '\n', '').split(' ')[1]
60 | with open(
61 | os.path.join('../online_data/result/w20w', setting_name,
62 | 'queuing_all.log')) as f:
63 | buf = f.readlines()
64 | queuing = buf[queuing_percentail - 1].replace('\n',
65 | '').split(' ')[1]
66 | yield i, j, interarrival, queuing
67 |
68 |
69 | def pause_wait_stat(wait_params,
70 | interarrival_percentail=5000,
71 | queuing_percentail=9900):
72 | for i, j in wait_params:
73 | setting_name = f'pause_wait_{i}_{j}'
74 | with open(
75 | os.path.join('../online_data/result/w20w', setting_name,
76 | 'interarrival_all.log')) as f:
77 | buf = f.readlines()
78 | interarrival = buf[interarrival_percentail - 1].replace(
79 | '\n', '').split(' ')[1]
80 | with open(
81 | os.path.join('../online_data/result/w20w', setting_name,
82 | 'queuing_all.log')) as f:
83 | buf = f.readlines()
84 | queuing = buf[queuing_percentail - 1].replace('\n',
85 | '').split(' ')[1]
86 | yield i, j, interarrival, queuing
87 |
88 |
89 | if __name__ == '__main__':
90 | args = argument_parser()
91 | len_params = []
92 | for i in range(1, 9):
93 | for j in range(i, 9):
94 | len_params.append((i, j))
95 |
96 | wait_params = []
97 | for i in range(10, 120, 10):
98 | for j in range(i, 120, 10):
99 | wait_params.append((i, j))
100 |
101 | # Run
102 | if args.config == 'run' or args.config == 'all':
103 | print('pause len run')
104 | pause_len_run(len_params)
105 | print('pause wait run')
106 | pause_wait_run(wait_params)
107 |
108 | # Stat
109 | if args.config == 'stat' or args.config == 'all':
110 | print('pause len stat')
111 | for i, j, interarrival, queueing in pause_len_stat(
112 | len_params, args.interarrival_percentail, args.queuing_percentail):
113 | print(i, j, interarrival, queueing)
114 | print('pause wait stat')
115 | for i, j, interarrival, queueing in pause_wait_stat(
116 | wait_params, args.interarrival_percentail,
117 | args.queuing_percentail):
118 | print(i, j, interarrival, queueing)
119 |
--------------------------------------------------------------------------------
/sim/env.py:
--------------------------------------------------------------------------------
1 | import os
2 | import traceloader
3 | import numpy as np
4 |
5 | class Environment:
6 | def __init__(self, args, fname):
7 | self.time = 0
8 | self.queue = []
9 | self.maxsize = args.max_queue_length # max pre-decoder queue length
10 | self.decoder_release = 0
11 | self.timeslicing_release = 0
12 | # self.arrival_interval_stash = 0
13 | self.fout = open(os.path.join(args.log, args.settings, fname), 'w')
14 | self.floss = open(os.path.join(args.result, args.settings, 'frameloss.log'), 'a')
15 | self.fname = fname
16 |
17 | def step(self, line):
18 | # if self.time > 1350:
19 | # print('debug')
20 | self.arrival_interval = []
21 | self.service_interval = []
22 |
23 | # line = [arrival timestamp, decoding time, netts, timeslicing time]
24 | next_timestamp = line[0]
25 | qwait = 0
26 | # Those pause cmds within same encoding cycle (16.6ms in 60fps) will only activate the latest one. \
27 | # So we just need to activate the latest pause cmd within original encoding cycle.
28 | # if line[1] == traceloader.PAUSE_FLAG:
29 | # self.arrival_interval_stash += next_timestamp - self.time
30 | # else:
31 | # self.arrival_interval.append(next_timestamp - self.time + self.arrival_interval_stash)
32 | # self.arrival_interval_stash = 0
33 | self.arrival_interval.append(next_timestamp - self.time)
34 |
35 | # dequeue
36 | while len(self.queue) > 0 and self.time + self.decoder_release <= next_timestamp:
37 | self.time += self.decoder_release
38 | self.decoder_release = 0
39 | if self.time + self.timeslicing_release <= next_timestamp: # wait for timeslicing
40 | self.time += self.timeslicing_release
41 | dequeue_line = self.queue.pop(0)
42 | self.decoder_release = dequeue_line[1]
43 | self.service_interval.append(self.decoder_release)
44 | if len(self.queue) > 0:
45 | self.timeslicing_release = self.queue[0][3]
46 | else:
47 | self.timeslicing_release = 0
48 |
49 | self.fout.write(
50 | "%.2f" % (dequeue_line[0]) + '\t' + # arrival timestamp
51 | "%.2f" % (dequeue_line[2]) + '\t' + # net transfer time
52 | "%.2f" % (self.time - dequeue_line[0]) +'\t' + # queuing time
53 | "%.2f" % (dequeue_line[1]) + '\t' + # decoding time
54 | "%.2f" % (dequeue_line[2] + self.time - dequeue_line[0] + dequeue_line[1]) + '\n'
55 | )
56 | qwait = self.time - dequeue_line[0]
57 | else: # store timeslicing intervals
58 | break
59 | if self.decoder_release <= next_timestamp - self.time:
60 | self.timeslicing_release -= min(next_timestamp - self.time - self.decoder_release, self.timeslicing_release)
61 | self.decoder_release = 0
62 | else:
63 | self.decoder_release -= next_timestamp - self.time
64 |
65 | qlen = len(self.queue)
66 | qwait = max(qwait, next_timestamp - self.queue[0][0] if len(self.queue) > 0 else 0)
67 | # check if we need to enqueue
68 | if line[1] == traceloader.PAUSE_FLAG:
69 | # self.fout.write(
70 | # "%s" %("This frames is under pause, should not be enqueued! ") +
71 | # "%.2f" % (line[0]) + '\t' + # arrival timestamp
72 | # "%.2f" % (line[2]) + '\t' + # net transfer time
73 | # "%.2f" % (line[3]) + '\t' + # queuing time
74 | # "%.2f" % (line[1]) + '\t' + # decoding time
75 | # "%.2f" % (line[2] + line[1]) + '\n'
76 | # )
77 | pass
78 | elif len(self.queue) == 0 and self.decoder_release == 0 and line[3] == 0:
79 | # queue is empty and decoder is available --> no need to queue
80 | self.decoder_release = line[1]
81 | self.service_interval.append(self.decoder_release)
82 | self.fout.write(
83 | "%.2f" % (line[0]) + '\t' + # arrival timestamp
84 | "%.2f" % (line[2]) + '\t' + # net transfer time
85 | "%.2f" % (line[3]) + '\t' + # queuing time
86 | "%.2f" % (line[1]) + '\t' + # decoding time
87 | "%.2f" % (line[2] + line[1]) + '\n'
88 | )
89 | qwait = line[3]
90 | else:
91 | # enqueue
92 | if len(self.queue) == 0:
93 | self.timeslicing_release = line[3]
94 |
95 | if len(self.queue) < self.maxsize:
96 | self.queue.append(line)
97 | else:
98 | # queue overflows, frame loss happens
99 | self.floss.write(
100 | self.fname + '\t' +
101 | "%.2f" % (line[0]) + '\t' + # arrival timestamp
102 | "%.2f" % (line[2]) + '\t' + # net transfer time
103 | "%.2f" % (line[1]) + '\t' + # decoding time
104 | "%.2f" % (line[2] + line[1]) + '\n'
105 | )
106 |
107 | assert(self.timeslicing_release >= 0)
108 | self.time = next_timestamp
109 | return qlen, qwait, self.arrival_interval, self.service_interval, line[2]
110 |
111 | def set_ewm_factor(self, new_ewm_factor):
112 | self.ewm_factor = new_ewm_factor
113 |
114 | def __del__(self):
115 | self.fout.close()
116 | self.floss.close()
117 |
--------------------------------------------------------------------------------
/scripts/cond_prob.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import argparse
4 | from multiprocessing import Pool
5 |
6 | total_k_list = [2, 3, 4, 6, 8, 12, 16]
7 | n_k_list = [2, 3, 4, 6, 8, 12, 16] # network
8 | d_k_list = [2, 3, 4, 6, 8, 12, 16, 24, 32] # decode
9 | q_k_list = [2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64] # queue
10 | n_pos = 5
11 | d_pos = 4
12 | q_pos = 3
13 |
14 | def cond_single_trace(param):
15 | fname, args = param
16 |
17 | with open(os.path.join(args.trace, fname), 'r') as f:
18 | if args.value:
19 | results = np.zeros((3, 3))
20 | else:
21 | n_results = np.zeros((len(total_k_list) + 1, len(n_k_list) + 1), dtype=int)
22 | d_results = np.zeros((len(total_k_list) + 1, len(d_k_list) + 1), dtype=int)
23 | q_results = np.zeros((len(total_k_list) + 1, len(q_k_list) + 1), dtype=int)
24 |
25 | for strline in f.readlines():
26 | numline = strline.split()
27 | total_cur = int(numline[5]) + int(numline[4]) + int(numline[3])
28 | n_cur = int(numline[n_pos])
29 | d_cur = int(numline[d_pos])
30 | q_cur = int(numline[q_pos])
31 |
32 | if args.value:
33 | total_val = 100
34 | com_val = 50
35 | results += np.array([[n_cur > com_val and total_cur > total_val, n_cur > com_val, total_cur > total_val],
36 | [d_cur > com_val and total_cur > total_val, d_cur > com_val, total_cur > total_val],
37 | [q_cur > com_val and total_cur > total_val, q_cur > com_val, total_cur > total_val]])
38 |
39 | else:
40 | total_avg = 22.72
41 | n_avg = 15.48
42 | d_avg = 2.83
43 | q_avg = 0.96
44 |
45 | for total_k_idx in range(len(total_k_list)):
46 | total_k = total_k_list[total_k_idx]
47 | if total_cur > total_k * total_avg:
48 | n_results[total_k_idx, -1] += 1
49 | d_results[total_k_idx, -1] += 1
50 | q_results[total_k_idx, -1] += 1
51 |
52 | for n_k_idx in range(len(n_k_list)):
53 | n_k = n_k_list[n_k_idx]
54 | if n_cur > n_k * n_avg:
55 | n_results[-1, n_k_idx] += 1
56 | for d_k_idx in range(len(d_k_list)):
57 | d_k = d_k_list[d_k_idx]
58 | if d_cur > d_k * d_avg:
59 | d_results[-1, d_k_idx] += 1
60 | for q_k_idx in range(len(q_k_list)):
61 | q_k = q_k_list[q_k_idx]
62 | if q_cur > q_k * q_avg:
63 | q_results[-1, q_k_idx] += 1
64 |
65 | for total_k_idx in range(len(total_k_list)):
66 | total_k = total_k_list[total_k_idx]
67 | for n_k_idx in range(len(n_k_list)):
68 | n_k = n_k_list[n_k_idx]
69 | if total_cur > total_k * total_avg and n_cur > n_k * n_avg:
70 | n_results[total_k_idx, n_k_idx] += 1
71 | for d_k_idx in range(len(d_k_list)):
72 | d_k = d_k_list[d_k_idx]
73 | if total_cur > total_k * total_avg and d_cur > d_k * d_avg:
74 | d_results[total_k_idx, d_k_idx] += 1
75 | for q_k_idx in range(len(q_k_list)):
76 | q_k = q_k_list[q_k_idx]
77 | if total_cur > total_k * total_avg and q_cur > q_k * q_avg:
78 | q_results[total_k_idx, q_k_idx] += 1
79 | if args.value:
80 | return results
81 | else:
82 | return n_results, d_results, q_results
83 |
84 |
85 | if __name__ == '__main__':
86 | parser = argparse.ArgumentParser()
87 | parser.add_argument('--trace', type=str, default='../sim/test-trace')
88 | parser.add_argument('--result', type=str, default='../sim/test-result')
89 | parser.add_argument('--filter', type=str, default='wwww',
90 | help='devType (UNKNOWN, DESKTOP, LAPTOP, PHONE, PAD, STB, TV), \n' +
91 | 'netType (NONE, MOBILE, ETHERNET, WIFI, OTHER), \n' +
92 | 'clientType (WinPC, IOS, MacPC, Android), \n' +
93 | 'decodeType (SOFTWARE, HARDWARE)')
94 | parser.add_argument('--value', action='store_true')
95 | parser.add_argument('--flowinfo', type=str, default='../online_data/flowinfo.log')
96 | parser.add_argument('--worker', type=int)
97 | args = parser.parse_args()
98 |
99 | devType = args.filter[0]
100 | netType = args.filter[1]
101 | clientType = args.filter[2]
102 | decodeType = args.filter[3]
103 |
104 | filtered_sid = []
105 | with open(args.flowinfo, 'r') as f:
106 | while True:
107 | line = f.readline().split()
108 | if not line:
109 | break
110 | devCond = devType == 'w' or devType == line[2]
111 | netCond = netType == 'w' or netType == line[4]
112 | clientCond = clientType == 'w' or clientType == line[6]
113 | decodeCond = decodeType == 'w' or decodeType == line[8]
114 | if devCond and netCond and clientCond and decodeCond:
115 | filtered_sid.append(line[0])
116 |
117 | if not os.path.exists(os.path.join(args.result, args.filter, 'stat')):
118 | os.makedirs(os.path.join(args.result, args.filter, 'stat'))
119 |
120 | fnames = os.listdir(args.trace)
121 | fnames_new = []
122 | for fname in fnames:
123 | sid = fname.split('_')[0]
124 | if sid in filtered_sid:
125 | fnames_new.append(fname)
126 | print(len(fnames_new), '/', len(fnames))
127 | fnames = fnames_new
128 |
129 | if args.worker:
130 | pool = Pool(args.worker)
131 | else:
132 | pool = Pool()
133 | results = pool.map(cond_single_trace, [(fname, args) for fname in fnames])
134 | pool.close()
135 | pool.join()
136 |
137 | if args.value:
138 | np.savetxt(os.path.join(args.result, args.filter, 'stat', 'cond_prob_value_50_100.log'), sum(results), fmt="%d")
139 | else:
140 | n_result = sum(n_results for n_results, _, _ in results)
141 | d_result = sum(d_results for _, d_results, _ in results)
142 | q_result = sum(q_results for _, _, q_results in results)
143 | np.savetxt(os.path.join(args.result, args.filter, 'stat', 'cond_prob_n.log'), n_result, fmt="%d")
144 | np.savetxt(os.path.join(args.result, args.filter, 'stat', 'cond_prob_d.log'), d_result, fmt="%d")
145 | np.savetxt(os.path.join(args.result, args.filter, 'stat', 'cond_prob_q.log'), q_result, fmt="%d")
146 |
--------------------------------------------------------------------------------
/sim/traceloader.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import algorithm
3 |
4 | PAUSE_FLAG = -1
5 |
6 | class TraceLoader:
7 | def __init__(self, trace_path, min_trace_length):
8 | self.f = open(trace_path, 'r')
9 | self.trace_path = trace_path
10 | self.line_queue = []
11 | self.queue_max = min_trace_length
12 | self.arrv_ewma = 0
13 | self.net_rto = 0.050
14 |
15 | self.lastrawline = ['inf' for _ in range(10)]
16 | while len(self.line_queue) < self.queue_max:
17 | rawline = self.f.readline().split()
18 | if not rawline:
19 | raise Exception("File too short")
20 | self.line_queue.append(np.array([float(rawline[6]), float(rawline[4]), float(rawline[5]), 0]))
21 | # arrival timestamp, decoding time, rtt, timeslicing waiting time
22 |
23 | # multiple frames inside the decoder
24 | dequeue_ts_diff = (float(rawline[6]) + float(rawline[3])) - (float(self.lastrawline[6]) + float(self.lastrawline[3]))
25 | if self.lastrawline[0] != 'inf' and dequeue_ts_diff < float(self.lastrawline[4]):
26 | self.line_queue[-2][1] -= (float(self.lastrawline[4]) - dequeue_ts_diff)
27 |
28 | if self.lastrawline[0] != 'inf':
29 | self.arrv_ewma += min((int(rawline[6]) - int(self.lastrawline[6])) / 1000, self.net_rto)
30 |
31 | # CPU time slicing delay counted in decoding delay
32 | timeslice_diff = float(rawline[6]) + float(rawline[3]) - \
33 | max(int(rawline[6]), float(self.lastrawline[6]) + float(self.lastrawline[3]) + float(self.lastrawline[4]))
34 | if timeslice_diff > 0:
35 | self.line_queue[-1][3] = timeslice_diff
36 |
37 | self.lastrawline = rawline
38 |
39 | self.arrv_ewma /= (self.queue_max - 1)
40 | self.arrv_ewma = max(1 / 60, self.arrv_ewma)
41 |
42 | self.timebase = self.line_queue[0][0] # the timeline in the traceloader
43 | for line in self.line_queue:
44 | line[0] -= self.timebase
45 | self.ptr = -1
46 | self.trace_end = 0
47 | self.target_arrv = 1 / 60
48 | self.arrv_staged = []
49 | self.lastline = [0, 0, 0, 0]
50 |
51 | def load_line(self, target_arrv):
52 | # first pop up a slowdown value from those staged values.
53 | # the slowdown_staged stores the list of [slowdown value, effective time (by network delay)]
54 | # the most recent effective slowdown factor will be used
55 | if len(self.arrv_staged) > 0:
56 | update_idx = -1
57 | for idx in range(len(self.arrv_staged)):
58 | # check if its effective time is earlier than the current time
59 | if self.line_queue[1][0] >= self.arrv_staged[idx][1]:
60 | update_idx = idx
61 | if update_idx >= 0:
62 | for _ in range(update_idx + 1):
63 | self.target_arrv = self.arrv_staged[0][0]
64 | self.arrv_staged.pop(0)
65 |
66 | # calculate the interpolation position between frames
67 | # the line_queue maintain several frames for slowdown
68 | # let self.slowdown == -1 to indicate if we should pause
69 | try:
70 | self.ptr += abs(self.target_arrv) / self.arrv_ewma
71 | except ZeroDivisionError: # sometimes logs parsed from servers have format issues, print it out
72 | print("ZeroDivisionError", self.trace_path)
73 | line = self.lastline
74 | self.trace_end = True
75 | return line, self.trace_end
76 |
77 | self.ptr = max(0, min(self.ptr, len(self.line_queue) - 2))
78 | idx_low = int(np.floor(self.ptr))
79 | idx_high = idx_low + 1
80 | ratio_low = idx_high - self.ptr
81 | ratio_high = self.ptr - idx_low
82 | line = ratio_low * self.line_queue[idx_low] + ratio_high * self.line_queue[idx_high]
83 |
84 | # if we pause, then we change decode_time to -1 to inform env.step() that this is a pause-frame
85 | if self.target_arrv < 0:
86 | line[1] = PAUSE_FLAG # putting a flag to mark this image will not be encoded, so it won't enqueue to the subsequent buffers.
87 |
88 | # ptr adjustment and new line read in
89 | while self.ptr >= 1:
90 | self.line_queue.pop(0)
91 | self.ptr -= 1
92 |
93 | try:
94 | while len(self.line_queue) < self.queue_max:
95 | rawline = self.f.readline().split()
96 | if not rawline:
97 | self.trace_end = 1
98 | break
99 |
100 | if int(rawline[6]) < int(self.lastrawline[6]):
101 | continue
102 |
103 | self.line_queue.append(np.array([float(rawline[6]) - self.timebase, float(rawline[4]), float(rawline[5]), 0]))
104 | # arrival timestamp, decoding time, rtt, timeslicing waiting time
105 |
106 | # multiple frames inside the decoder
107 | dequeue_ts_diff = (float(rawline[6]) + float(rawline[3])) - (float(self.lastrawline[6]) + float(self.lastrawline[3]))
108 | if dequeue_ts_diff < float(self.lastrawline[4]):
109 | self.line_queue[-2][1] -= (float(self.lastrawline[4]) - dequeue_ts_diff)
110 |
111 | # CPU time slicing delay counted in decoding delay
112 | timeslice_diff = float(rawline[6]) + float(rawline[3]) - \
113 | max(int(rawline[6]), float(self.lastrawline[6]) + float(self.lastrawline[3]) + float(self.lastrawline[4]))
114 | if timeslice_diff > 0:
115 | self.line_queue[-1][3] = timeslice_diff
116 |
117 | self.arrv_ewma += 0.033 * (min(self.net_rto, (float(rawline[6]) - float(self.lastrawline[6])) / 1000) - self.arrv_ewma)
118 |
119 | self.lastrawline = rawline
120 |
121 | # Attn: the slowdown must be enqueued at last.
122 | # This is because the frame-rate adaption would not be effective until the encoder encodes the next frame
123 | # the frame-rates from both the gaming application and the encoder need to be adjusted, approximately need one frame to take into effect.
124 | self.arrv_staged.append([target_arrv, self.lastline[0] + self.lastline[2]]) # should be averaged to approximate uplink delay (stalled downlink no influence on uplink)
125 | self.lastline = line
126 | except ValueError: # sometimes logs parsed from servers have format issues, print it out
127 | print(self.trace_path)
128 | line = self.lastline
129 | self.trace_end = True
130 | return line, self.trace_end
131 |
132 | def __del__(self):
133 | self.f.close()
134 |
--------------------------------------------------------------------------------
/scripts/stat.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import os
3 | import argparse
4 | import subprocess as sp
5 | from multiprocessing import Pool
6 |
7 |
8 | def stat(param):
9 | fname, args = param
10 |
11 | if args.queuing:
12 | fname_queuing = os.path.join(args.result, fname + '_queuing.tmp')
13 | f_queuing = open(fname_queuing, 'w')
14 | if args.interarrival:
15 | last_arrival = -1
16 | fname_interarrival = os.path.join(args.result, fname + '_interarrival.tmp')
17 | f_interarrival = open(fname_interarrival, 'w')
18 | if args.total:
19 | fname_total = os.path.join(args.result, fname + '_total.tmp')
20 | f_total = open(fname_total, 'w')
21 |
22 | with open(os.path.join(args.log, fname)) as f:
23 | while True:
24 | line = f.readline().split()
25 | if not line:
26 | break
27 | if args.queuing:
28 | queuing = float(line[3])
29 | f_queuing.write("%.2f\n" % queuing)
30 | if args.interarrival:
31 | if last_arrival < 0:
32 | # actually the time after decoding
33 | last_arrival = float(line[0]) + float(line[2]) + float(line[3])
34 | else:
35 | inter_arrival = float(line[0]) + float(line[2]) + float(line[3]) - last_arrival
36 | f_interarrival.write("%.2f\n" % inter_arrival)
37 | last_arrival = float(line[0]) + float(line[2]) + float(line[3])
38 | if args.total:
39 | total = float(line[4])
40 | f_total.write("%.2f\n" % total)
41 |
42 | # sort results to calculate cdf
43 | if args.queuing:
44 | f_queuing.close()
45 | sp_queuing = sp.Popen('sort -n ' + fname_queuing + ' -o ' + fname_queuing, shell=True)
46 | if args.interarrival:
47 | f_interarrival.close()
48 | sp_interarrival = sp.Popen('sort -n ' + fname_interarrival + ' -o ' + fname_interarrival, shell=True)
49 | if args.total:
50 | f_total.close()
51 | sp_total = sp.Popen('sort -n ' + fname_total + ' -o ' + fname_total, shell=True)
52 |
53 | if args.queuing:
54 | sp_queuing.wait()
55 | if args.interarrival:
56 | sp_interarrival.wait()
57 | if args.total:
58 | sp_total.wait()
59 |
60 | # calculate per-log cdf
61 | if args.queuing:
62 | sp_queuing = sp.Popen("awk -vstep=$(awk 'END{printf(\"%.4f\\n\", NR/10000)}' " + fname_queuing + " | bc) 'BEGIN{cnt=1} {sum+=$1;if (NR>=int(cnt*step)){print cnt,$0;cnt=cnt+1+int(NR-cnt*step)}} END{printf(\"Avg %.3f\\n\", sum/NR)}' " + fname_queuing + " > " + os.path.join(args.result, fname + '_queuing.log'), shell=True)
63 | if args.interarrival:
64 | sp_queuing = sp.Popen("awk -vstep=$(awk 'END{printf(\"%.4f\\n\", NR/10000)}' " + fname_interarrival + " | bc) 'BEGIN{cnt=1} {sum+=$1;if (NR>=int(cnt*step)){print cnt,$0;cnt=cnt+1+int(NR-cnt*step)}} END{printf(\"Avg %.2f\\n\", sum/NR)}' " + fname_interarrival + " > " + os.path.join(args.result, fname + '_interarrival.log'), shell=True)
65 | if args.total:
66 | sp_queuing = sp.Popen("awk -vstep=$(awk 'END{printf(\"%.4f\\n\", NR/10000)}' " + fname_total + " | bc) 'BEGIN{cnt=1} {sum+=$1;if (NR>=int(cnt*step)){print cnt,$0;cnt=cnt+1+int(NR-cnt*step)}} END{printf(\"Avg %.2f\\n\", sum/NR)}' " + fname_total + " > " + os.path.join(args.result, fname + '_total.log'), shell=True)
67 |
68 | if args.queuing:
69 | sp_queuing.wait()
70 | if args.interarrival:
71 | sp_interarrival.wait()
72 | if args.total:
73 | sp_total.wait()
74 |
75 |
76 | if __name__ == "__main__":
77 | parser = argparse.ArgumentParser()
78 | parser.add_argument('--log', type=str)
79 | parser.add_argument('--result', type=str)
80 |
81 | parser.add_argument('--queuing', action='store_true')
82 | parser.add_argument('--interarrival', action='store_true')
83 | parser.add_argument('--total', action='store_true')
84 |
85 | parser.add_argument('--threads', type=int, help='Number of parallel threads')
86 | args = parser.parse_args()
87 |
88 | if not os.path.exists(args.result):
89 | os.mkdir(args.result)
90 |
91 | fnames = os.listdir(args.log)
92 | # RELEASE
93 | if args.threads:
94 | pool = Pool(args.threads)
95 | else:
96 | pool = Pool()
97 | pool.map(stat, [(fname, args) for fname in fnames])
98 | pool.close()
99 | pool.join()
100 |
101 | # DEBUG
102 | # for fname in fnames:
103 | # stat((fname, args))
104 |
105 | # Con'd
106 | # cat and sort all-long results
107 | if args.queuing:
108 | fname_queuing_all = os.path.join(args.result, "queuing_all.tmp")
109 | sp_queuing = sp.Popen("cat " + os.path.join(args.result, "*_queuing.tmp") + " | sort -n -o " + fname_queuing_all, shell=True)
110 | if args.interarrival:
111 | fname_interarrival_all = os.path.join(args.result, "interarrival_all.tmp")
112 | sp_interarrival = sp.Popen("cat " + os.path.join(args.result, "*_interarrival.tmp") + " | sort -n -o " + fname_interarrival_all, shell=True)
113 | if args.total:
114 | fname_total_all = os.path.join(args.result, "total_all.tmp")
115 | sp_total = sp.Popen("cat " + os.path.join(args.result, "*_total.tmp") + " | sort -n -o " + fname_total_all, shell=True)
116 |
117 | if args.queuing:
118 | sp_queuing.wait()
119 | if args.interarrival:
120 | sp_interarrival.wait()
121 | if args.total:
122 | sp_total.wait()
123 |
124 | # calculate all-log cdf
125 | if args.queuing:
126 | sp_queuing = sp.Popen("awk -vstep=$(awk 'END{printf(\"%.4f\\n\", NR/10000)}' " + fname_queuing_all + " | bc) 'BEGIN{cnt=1} {sum+=$1;if (NR>=int(cnt*step)){print cnt,$0;cnt=cnt+1+int(NR-cnt*step)}} END{printf(\"Avg %.3f\\n\", sum/NR)}' " + fname_queuing_all + " > " + os.path.join(args.result, 'queuing_all.log'), shell=True)
127 | if args.interarrival:
128 | sp_queuing = sp.Popen("awk -vstep=$(awk 'END{printf(\"%.4f\\n\", NR/10000)}' " + fname_interarrival_all + " | bc) 'BEGIN{cnt=1} {sum+=$1;if (NR>=int(cnt*step)){print cnt,$0;cnt=cnt+1+int(NR-cnt*step)}} END{printf(\"Avg %.2f\\n\", sum/NR)}' " + fname_interarrival_all + " > " + os.path.join(args.result, 'interarrival_all.log'), shell=True)
129 | if args.total:
130 | sp_queuing = sp.Popen("awk -vstep=$(awk 'END{printf(\"%.4f\\n\", NR/10000)}' " + fname_total_all + " | bc) 'BEGIN{cnt=1} {sum+=$1;if (NR>=int(cnt*step)){print cnt,$0;cnt=cnt+1+int(NR-cnt*step)}} END{printf(\"Avg %.2f\\n\", sum/NR)}' " + fname_total_all + " > " + os.path.join(args.result, 'total_all.log'), shell=True)
131 |
132 | if args.queuing:
133 | sp_queuing.wait()
134 | if args.interarrival:
135 | sp_interarrival.wait()
136 | if args.total:
137 | sp_total.wait()
138 |
139 | # clear up tmp files
140 | os.system('rm ' + os.path.join(args.result, '*.tmp'))
141 |
--------------------------------------------------------------------------------
/sim/README.md:
--------------------------------------------------------------------------------
1 | # AFR Simulator
2 |
3 | ## Structure of the simulator
4 |
5 | The files used in the simulator:
6 | ```
7 | .
8 | ├── algorithm.py
9 | ├── env.py
10 | ├── main.py
11 | └── traceloader.py
12 | ```
13 | Specifically,
14 | - `main.py` is the main loop of the simulator. It parses arguments, prepares input and output directories, starts processes, and dispatches traces to each process. It calls `env.py` to set up a simulation environment, `traceloader.py` to load traces into the simulator, and `algorithm.py` to simulate the algorithm based on the trace.
15 | - `env.py` sets up the simulation environment. It sets up the environment for each trace, and maintains the timeline and frame generation states. It works at the granularity of frame: upon a new frame arrival, it will compute the waiting time, arrival interval, service interval, etc., and return these results to the `main.py`.
16 | - `algorithm.py` implements the AFR algorithm and several baseline algorithms. It takes the current network and queue states as input, and then calculates the target frame-rate. The actual return value of algorithms is the *slowdown* factor, which is the ratio of the target frame-rate over the current frame-rate (details introduced below).
17 | - `traceloader.py` load one line (one frame) from the trace file in each loop, according to the slowdown factor from `algorithm.py`. The slowdown factor is a scaling factor to the timeline of the trace file. It returns the information of the next frame to the environment.
18 |
19 | In summary, the main loop of the simulator is:
20 | ```
21 | while not trace_end:
22 | line, trace_end = traces.load_line(slowdown)
23 | qlen, qwait, arrv_intvl, serv_intvl, rtt = dec_env.step(line)
24 | slowdown = alg.predict(qlen, qwait, arrv_intvl, serv_intvl, rtt)
25 | ```
26 |
27 | ## Arguments of `main.py`
28 |
29 | Input and output settings
30 | | Args | Default value | Explanation | Mode: Local | Mode: Remote | Mode: Debug |
31 | |:---:|:---:|:---:|:---:|:---:|:---:|
32 | | `--trace` | `../traces` | The directory contains the trace directory. | `../online_data/cut` | `../traces` | `test-trace` |
33 | | `--log` | `../logs` | The directory *prefix* where resulting logs are generated (for performance analysis). The actual log directory is a sub-folder, with the name of settings, of the parameter. | `../online_data/logs` | `../logs` | `test-trace` |
34 | | `--action` | `../actions` | The directory *prefix* where frame rate actions are generated (for frame-rate & smoothness analysis). | `../online_data/actions` | `../actions` | `test-action` |
35 | | `--result` | `../results` | The directory *prefix* where some deprecated are generated (for frame loss analysis). | `../online_data/results` | `../results` | `test-result` |
36 |
37 | For other parameters, please refer to the help of each parameter in argparse. An example command to run AFR with full dataset:
38 | ```
39 | python main.py --trace ../online_data/cut --log ../online_data/logs --action ../online_data/actions --result ../online_data/results --algorithm afr --mode release
40 | ```
41 | **Attention! Be careful when running the simulator with full dataset. The output logs and results will overwrite previous outputs!**
42 |
43 | To monitor the simulation process, you can first clear up the output folder before simulation, and then comparing the number of files in the output folder with the input trace folder with `ls {folder} | wc -l`.
44 |
45 | Since the simulation time is very long with numerous traces, you can debug the simulator with a small set of traces (located in `test-*/`). For example,
46 | ```
47 | python main.py --trace test-trace --log test-log --action test-action --result test-result --algorithm afr --mode debug
48 |
49 | # for enumerate baselines
50 | python main.py --trace test-trace --log test-log --action test-action --result test-result --algorithm qlen --qlen_target_arri_seq 0123 --mode debug
51 | ```
52 | ## Trace format
53 | Here we provide a sample trace in the `test-trace` folder. Each line records the information of one video frame collected from our production environment, where the 7 columns are:
54 | * Date (not used)
55 | * Time (not used)
56 | * Flow ID (not used)
57 | * Queueing time (ms) -- the time the frame is queued in the decoder queue. Note that this value is **NOT** used for simulating the queuing delay. Instead, we keep this value because we find that sometimes the CPU scheduling delay can also contribute to the frame delays. To make the simulator more faithful, we use this (together with the tiemstamp) to estimate how long the CPU scheduling delay is. Implementation at [# CPU time slicing delay counted in decoding delay](https://github.com/transys-project/afr/blob/f758c4b63e9d41a44e3d258a5d85c187a03fc5de/sim/traceloader.py#L31-L35).
58 | * Decoding time (ms)
59 | * Network RTT (ms)
60 | * Arrival timestamp (ms)
61 |
62 | ## How does `env.py` work?
63 | The main fucntion of the environment is implemented in the `step()` function. It takes one frame as input, calculates the dequeued frames between the arrival of two frames, and enqueues the newly arrival frame. The final queue states are then returned to the main loop.
64 |
65 | ## How does `traceloader.py` work?
66 | The main function of the trace loader is the `load_line()` function. It reads the slowdown factor, enqueues the slowdown factor to a staged queue (to simulate the network delay), dequeues the slowdown factor according to their network delay, and calculates the frame information based on linear interpolation between frames.
67 |
68 | ## How does `algorithm.py` work?
69 | The algorithms implemented here are quite straightforward. Please referred to the inline comments for the usage of each algorithm.
70 |
71 | ## Usage of `stat.py`
72 | Besides the main components of the simulator, we also need to analyze the statistics of the logs. The `stat.py` has the following argument:
73 |
74 | |Argument|Explanation|
75 | |:---:|:---|
76 | |`--log`| The directory storing logs, the same as the log in the argument of `main.py`.|
77 | |`--result`| The directory to generate output results, the same as the log in the argument of `main.py`.|
78 | |`--queuing`| If this flag is set, the percentiles of the queuing delay will be generated to `queuing_all.log`. |
79 | |`--interarrival`| If this flag is set, the percentiles of the interarrival time will be generated to `interarrival_all.log`. |
80 | |`--smooth`| If this flag is set, the percentiles of the difference of the interarrival time will be generated to `smooth_all.log`. |
81 | |`--total`| If this flag is set, the percentiles of the total delay will be generated to `total_all.log`. |
82 | |`--settings`| The settings that need statistics. Be consistent with the folder name in the `log` argument. |
83 | |`--filter`| Filter is used to categorize the network type, device type, client type, etc. `w` used for wildcard. It includes devType (UNKNOWN, DESKTOP, LAPTOP, PHONE, PAD, STB, TV), netType (NONE, MOBILE, ETHERNET, WIFI, OTHER), clientType (WinPC, IOS, MacPC, Android), decodeType (SOFTWARE, HARDWARE). For example, `w20w` represents all Ethernet and Windows sessions. |
84 | |`--flowinfo`| The log file storing the classification for each flow, working with `--filter`. The default value is `../online_data/flowinfo.log` |
85 |
86 | An example usage:
87 | ```
88 | python stat.py --log test-log --result test-result --filter w20w --queuing --interarrival --smooth --total --settings afr_0.00200_0.001_0.250 --flowinfo ../online_data/flowinfo.log
89 | ```
90 | Then you can find all results in `test-result/w20w/afr_0.00200_0.001_0.250`.
--------------------------------------------------------------------------------
/scripts/component_relation.py:
--------------------------------------------------------------------------------
1 | from math import sqrt
2 | from multiprocessing import Pool
3 | import numpy as np
4 | import scipy.stats as sps
5 | from sklearn.metrics import mutual_info_score
6 | import argparse
7 | import os
8 | # from dtw import *
9 | from fastdtw import fastdtw
10 |
11 |
12 | def corr_single_trace(param):
13 | fname, args = param
14 | if args.mode == 'nd':
15 | variables = np.loadtxt(os.path.join(args.trace, fname), dtype=int, delimiter=' ', usecols=(5,4))
16 | elif args.mode == 'nq':
17 | variables = np.loadtxt(os.path.join(args.trace, fname), dtype=int, delimiter=' ', usecols=(5,3))
18 | elif args.mode == 'qd':
19 | variables = np.loadtxt(os.path.join(args.trace, fname), dtype=int, delimiter=' ', usecols=(3,4))
20 | elif args.mode == 'ds':
21 | variables = np.loadtxt(os.path.join(args.trace, fname), dtype=int, delimiter=' ', usecols=(4,7))
22 | total_num = len(variables)
23 |
24 | # pearson's correlation coefficient
25 | corr_coeff = np.corrcoef(variables[:, 0], variables[:, 1])[0, 1]
26 | if args.mode == 'ds':
27 | return [fname, corr_coeff]
28 |
29 | # # normalized cross correlation coefficient
30 | # ncc = np.correlate((variables[:, 0] - np.mean(variables[:, 0])) / np.std(variables[:, 0]),
31 | # (variables[:, 1] - np.mean(variables[:, 1])) / np.std(variables[:, 1]))
32 | # if np.abs(ncc.max()) > np.abs(ncc.min()):
33 | # ncc_coeff = ncc.max()
34 | # else:
35 | # ncc_coeff = ncc.min()
36 |
37 | # mutual information gain
38 | mi_coeff = mutual_info_score(variables[:, 0], variables[:, 1])
39 |
40 | # dynamic time warpping (DTW)
41 | # input amplitude normalization: https://datascience.stackexchange.com/questions/16034/dtw-dynamic-time-warping-requires-prior-normalization
42 | # fastdtw: https://cs.fit.edu/~pkc/papers/tdm04.pdf
43 | # fastdtw dist acceleration: https://github.com/slaypni/fastdtw/issues/35
44 | d, _ = fastdtw(sps.zscore(variables[:, 0]), sps.zscore(variables[:, 1]), dist=2)
45 | dtw_coeff = d / total_num
46 |
47 | # cramer's v (instead of chi-square test)
48 | cramer_v = np.zeros((len(args.x_list), len(args.y_list)))
49 | for x_idx in range(len(args.x_list)):
50 | for y_idx in range(len(args.y_list)):
51 | x = args.x_list[x_idx]
52 | y = args.y_list[y_idx]
53 | table = np.zeros((2, 2), dtype=int)
54 | table[0][0] = sum((variables[:, 0] <= x) * (variables[:, 1] <= y))
55 | table[0][1] = sum((variables[:, 0] <= x) * (variables[:, 1] > y))
56 | table[1][0] = sum((variables[:, 0] > x) * (variables[:, 1] <= y))
57 | table[1][1] = sum((variables[:, 0] > x) * (variables[:, 1] > y))
58 |
59 | # when the sample size is highly biased, ignore it
60 | if table.min() <= max(1e-4*total_num, 5):
61 | chi2_value = 0
62 | else:
63 | try:
64 | chi2_value = sps.chi2_contingency(table)[0]
65 | except ValueError:
66 | chi2_value = 0
67 | cramer_v[x_idx, y_idx] = sqrt(chi2_value / total_num)
68 | np.savetxt(os.path.join(args.result, args.filter, 'stat', 'corr_' + args.mode, 'cramer', fname), cramer_v, fmt='%.3f')
69 | return [fname, corr_coeff, mi_coeff, dtw_coeff]
70 |
71 |
72 | def cramer_heatmap(args, cramer_thres):
73 | fnames = os.listdir(os.path.join(args.result, args.filter, 'stat', 'corr_' + args.mode, 'cramer'))
74 | count_matrix = np.zeros((len(args.x_list), len(args.y_list)))
75 | for fname in fnames:
76 | value_matrix = np.loadtxt(os.path.join(args.result, args.filter, 'stat', 'corr_' + args.mode, 'cramer', fname),
77 | dtype=float, delimiter=' ')
78 | count_matrix += value_matrix >= cramer_thres
79 | count_matrix = count_matrix / len(fnames)
80 | np.savetxt(os.path.join(args.result, args.filter, 'stat', 'corr_' + args.mode, 'cramer_' + str(cramer_thres) + '.log'), count_matrix, fmt='%.6f')
81 |
82 |
83 | if __name__ == '__main__':
84 |
85 | parser = argparse.ArgumentParser()
86 | parser.add_argument('--trace', type=str, default='../sim/test-trace')
87 | parser.add_argument('--result', type=str, default='../sim/test-result')
88 | parser.add_argument('--filter', type=str, default='wwww',
89 | help='devType (UNKNOWN, DESKTOP, LAPTOP, PHONE, PAD, STB, TV), \n' +
90 | 'netType (NONE, MOBILE, ETHERNET, WIFI, OTHER), \n' +
91 | 'clientType (WinPC, IOS, MacPC, Android), \n' +
92 | 'decodeType (SOFTWARE, HARDWARE)')
93 | parser.add_argument('--flowinfo', type=str, default='../online_data/flowinfo.log')
94 | parser.add_argument('--mode', choices=['nd', 'nq', 'qd', 'ds'])
95 | parser.add_argument('--cramer-thres', type=float)
96 | parser.add_argument('--worker', type=int)
97 | args = parser.parse_args()
98 |
99 | devType = args.filter[0]
100 | netType = args.filter[1]
101 | clientType = args.filter[2]
102 | decodeType = args.filter[3]
103 |
104 | filtered_sid = []
105 | with open(args.flowinfo, 'r') as f:
106 | while True:
107 | line = f.readline().split()
108 | if not line:
109 | break
110 | devCond = devType == 'w' or devType == line[2]
111 | netCond = netType == 'w' or netType == line[4]
112 | clientCond = clientType == 'w' or clientType == line[6]
113 | decodeCond = decodeType == 'w' or decodeType == line[8]
114 | if devCond and netCond and clientCond and decodeCond:
115 | filtered_sid.append(line[0])
116 |
117 | if not os.path.exists(os.path.join(args.result, args.filter, 'stat', 'corr_' + args.mode, 'cramer')):
118 | os.makedirs(os.path.join(args.result, args.filter, 'stat', 'corr_' + args.mode, 'cramer'))
119 |
120 | fnames = os.listdir(args.trace)
121 | fnames_new = []
122 | for fname in fnames:
123 | sid = fname.split('_')[0]
124 | if sid in filtered_sid:
125 | fnames_new.append(fname)
126 | print(len(fnames_new), '/', len(fnames))
127 | fnames = fnames_new
128 |
129 | if args.mode == 'nd':
130 | args.x_list = [16, 24, 32, 48, 64, 96, 128, 192, 256]
131 | args.y_list = [4, 6, 8, 12, 16, 24, 32, 48, 64]
132 | elif args.mode == 'nq':
133 | args.x_list = [16, 24, 32, 48, 64, 96, 128, 192, 256]
134 | args.y_list = [4, 6, 8, 12, 16, 24, 32, 48, 64]
135 | elif args.mode == 'qd':
136 | args.x_list = [4, 6, 8, 12, 16, 24, 32, 48, 64]
137 | args.y_list = [4, 6, 8, 12, 16, 24, 32, 48, 64]
138 |
139 | if args.worker:
140 | pool = Pool(args.worker)
141 | else:
142 | pool = Pool()
143 | results = pool.map(corr_single_trace, [(fname, args) for fname in fnames])
144 | pool.close()
145 | pool.join()
146 |
147 | with open(os.path.join(args.result, args.filter, 'stat', 'corr_' + args.mode, 'corr_coeff.log'), 'w') as f:
148 | for result in results:
149 | fname = result[0]
150 | corr_coeff = result[1]
151 | if args.mode == 'ds':
152 | f.write("%s %.3f\n" % (fname, corr_coeff))
153 | else:
154 | mi_coeff = result[2]
155 | dtw_coeff = result[3]
156 | f.write("%s %.3f %.3f %.3f\n" % (fname, corr_coeff, mi_coeff, dtw_coeff))
157 |
158 | if args.mode != 'ds':
159 | if args.cramer_thres:
160 | cramer_heatmap(args, args.cramer_thres)
161 | else:
162 | for cramer_thres in [0.1, 0.3, 0.5]:
163 | cramer_heatmap(args, cramer_thres)
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Ignore Visual Studio temporary files, build results, and
2 | ## files generated by popular Visual Studio add-ons.
3 | ##
4 | ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
5 |
6 | # User-specific files
7 | *.rsuser
8 | *.suo
9 | *.user
10 | *.userosscache
11 | *.sln.docstates
12 |
13 | # User-specific files (MonoDevelop/Xamarin Studio)
14 | *.userprefs
15 |
16 | # Mono auto generated files
17 | mono_crash.*
18 |
19 | # Build results
20 | [Dd]ebug/
21 | [Dd]ebugPublic/
22 | [Rr]elease/
23 | [Rr]eleases/
24 | x64/
25 | x86/
26 | [Aa][Rr][Mm]/
27 | [Aa][Rr][Mm]64/
28 | bld/
29 | [Bb]in/
30 | [Oo]bj/
31 | [Ll]og/
32 | [Ll]ogs/
33 |
34 | # Visual Studio 2015/2017 cache/options directory
35 | .vs/
36 | # Uncomment if you have tasks that create the project's static files in wwwroot
37 | #wwwroot/
38 |
39 | # Visual Studio 2017 auto generated files
40 | Generated\ Files/
41 |
42 | # MSTest test Results
43 | [Tt]est[Rr]esult*/
44 | [Bb]uild[Ll]og.*
45 |
46 | # NUnit
47 | *.VisualState.xml
48 | TestResult.xml
49 | nunit-*.xml
50 |
51 | # Build Results of an ATL Project
52 | [Dd]ebugPS/
53 | [Rr]eleasePS/
54 | dlldata.c
55 |
56 | # Benchmark Results
57 | BenchmarkDotNet.Artifacts/
58 |
59 | # .NET Core
60 | project.lock.json
61 | project.fragment.lock.json
62 | artifacts/
63 |
64 | # StyleCop
65 | StyleCopReport.xml
66 |
67 | # Files built by Visual Studio
68 | *_i.c
69 | *_p.c
70 | *_h.h
71 | *.ilk
72 | *.meta
73 | *.obj
74 | *.iobj
75 | *.pch
76 | *.pdb
77 | *.ipdb
78 | *.pgc
79 | *.pgd
80 | *.rsp
81 | *.sbr
82 | *.tlb
83 | *.tli
84 | *.tlh
85 | *.tmp
86 | *.tmp_proj
87 | *_wpftmp.csproj
88 | *.vspscc
89 | *.vssscc
90 | .builds
91 | *.pidb
92 | *.svclog
93 | *.scc
94 |
95 | # Chutzpah Test files
96 | _Chutzpah*
97 |
98 | # Visual C++ cache files
99 | ipch/
100 | *.aps
101 | *.ncb
102 | *.opendb
103 | *.opensdf
104 | *.sdf
105 | *.cachefile
106 | *.VC.db
107 | *.VC.VC.opendb
108 |
109 | # Visual Studio profiler
110 | *.psess
111 | *.vsp
112 | *.vspx
113 | *.sap
114 |
115 | # Visual Studio Trace Files
116 | *.e2e
117 |
118 | # TFS 2012 Local Workspace
119 | $tf/
120 |
121 | # Guidance Automation Toolkit
122 | *.gpState
123 |
124 | # ReSharper is a .NET coding add-in
125 | _ReSharper*/
126 | *.[Rr]e[Ss]harper
127 | *.DotSettings.user
128 |
129 | # JustCode is a .NET coding add-in
130 | .JustCode
131 |
132 | # TeamCity is a build add-in
133 | _TeamCity*
134 |
135 | # DotCover is a Code Coverage Tool
136 | *.dotCover
137 |
138 | # AxoCover is a Code Coverage Tool
139 | .axoCover/*
140 | !.axoCover/settings.json
141 |
142 | # Visual Studio code coverage results
143 | *.coverage
144 | *.coveragexml
145 |
146 | # NCrunch
147 | _NCrunch_*
148 | .*crunch*.local.xml
149 | nCrunchTemp_*
150 |
151 | # MightyMoose
152 | *.mm.*
153 | AutoTest.Net/
154 |
155 | # Web workbench (sass)
156 | .sass-cache/
157 |
158 | # Installshield output folder
159 | [Ee]xpress/
160 |
161 | # DocProject is a documentation generator add-in
162 | DocProject/buildhelp/
163 | DocProject/Help/*.HxT
164 | DocProject/Help/*.HxC
165 | DocProject/Help/*.hhc
166 | DocProject/Help/*.hhk
167 | DocProject/Help/*.hhp
168 | DocProject/Help/Html2
169 | DocProject/Help/html
170 |
171 | # Click-Once directory
172 | publish/
173 |
174 | # Publish Web Output
175 | *.[Pp]ublish.xml
176 | *.azurePubxml
177 | # Note: Comment the next line if you want to checkin your web deploy settings,
178 | # but database connection strings (with potential passwords) will be unencrypted
179 | *.pubxml
180 | *.publishproj
181 |
182 | # Microsoft Azure Web App publish settings. Comment the next line if you want to
183 | # checkin your Azure Web App publish settings, but sensitive information contained
184 | # in these scripts will be unencrypted
185 | PublishScripts/
186 |
187 | # NuGet Packages
188 | *.nupkg
189 | # NuGet Symbol Packages
190 | *.snupkg
191 | # The packages folder can be ignored because of Package Restore
192 | **/[Pp]ackages/*
193 | # except build/, which is used as an MSBuild target.
194 | !**/[Pp]ackages/build/
195 | # Uncomment if necessary however generally it will be regenerated when needed
196 | #!**/[Pp]ackages/repositories.config
197 | # NuGet v3's project.json files produces more ignorable files
198 | *.nuget.props
199 | *.nuget.targets
200 |
201 | # Microsoft Azure Build Output
202 | csx/
203 | *.build.csdef
204 |
205 | # Microsoft Azure Emulator
206 | ecf/
207 | rcf/
208 |
209 | # Windows Store app package directories and files
210 | AppPackages/
211 | BundleArtifacts/
212 | Package.StoreAssociation.xml
213 | _pkginfo.txt
214 | *.appx
215 | *.appxbundle
216 | *.appxupload
217 |
218 | # Visual Studio cache files
219 | # files ending in .cache can be ignored
220 | *.[Cc]ache
221 | # but keep track of directories ending in .cache
222 | !?*.[Cc]ache/
223 |
224 | # Others
225 | ClientBin/
226 | ~$*
227 | *~
228 | *.dbmdl
229 | *.dbproj.schemaview
230 | *.jfm
231 | *.pfx
232 | *.publishsettings
233 | orleans.codegen.cs
234 |
235 | # Including strong name files can present a security risk
236 | # (https://github.com/github/gitignore/pull/2483#issue-259490424)
237 | #*.snk
238 |
239 | # Since there are multiple workflows, uncomment next line to ignore bower_components
240 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
241 | #bower_components/
242 |
243 | # RIA/Silverlight projects
244 | Generated_Code/
245 |
246 | # Backup & report files from converting an old project file
247 | # to a newer Visual Studio version. Backup files are not needed,
248 | # because we have git ;-)
249 | _UpgradeReport_Files/
250 | Backup*/
251 | UpgradeLog*.XML
252 | UpgradeLog*.htm
253 | ServiceFabricBackup/
254 | *.rptproj.bak
255 |
256 | # SQL Server files
257 | *.mdf
258 | *.ldf
259 | *.ndf
260 |
261 | # Business Intelligence projects
262 | *.rdl.data
263 | *.bim.layout
264 | *.bim_*.settings
265 | *.rptproj.rsuser
266 | *- [Bb]ackup.rdl
267 | *- [Bb]ackup ([0-9]).rdl
268 | *- [Bb]ackup ([0-9][0-9]).rdl
269 |
270 | # Microsoft Fakes
271 | FakesAssemblies/
272 |
273 | # GhostDoc plugin setting file
274 | *.GhostDoc.xml
275 |
276 | # Node.js Tools for Visual Studio
277 | .ntvs_analysis.dat
278 | node_modules/
279 |
280 | # Visual Studio 6 build log
281 | *.plg
282 |
283 | # Visual Studio 6 workspace options file
284 | *.opt
285 |
286 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
287 | *.vbw
288 |
289 | # Visual Studio LightSwitch build output
290 | **/*.HTMLClient/GeneratedArtifacts
291 | **/*.DesktopClient/GeneratedArtifacts
292 | **/*.DesktopClient/ModelManifest.xml
293 | **/*.Server/GeneratedArtifacts
294 | **/*.Server/ModelManifest.xml
295 | _Pvt_Extensions
296 |
297 | # Paket dependency manager
298 | .paket/paket.exe
299 | paket-files/
300 |
301 | # FAKE - F# Make
302 | .fake/
303 |
304 | # CodeRush personal settings
305 | .cr/personal
306 |
307 | # Python Tools for Visual Studio (PTVS)
308 | __pycache__/
309 | *.pyc
310 |
311 | # Cake - Uncomment if you are using it
312 | # tools/**
313 | # !tools/packages.config
314 |
315 | # Tabs Studio
316 | *.tss
317 |
318 | # Telerik's JustMock configuration file
319 | *.jmconfig
320 |
321 | # BizTalk build output
322 | *.btp.cs
323 | *.btm.cs
324 | *.odx.cs
325 | *.xsd.cs
326 |
327 | # OpenCover UI analysis results
328 | OpenCover/
329 |
330 | # Azure Stream Analytics local run output
331 | ASALocalRun/
332 |
333 | # MSBuild Binary and Structured Log
334 | *.binlog
335 |
336 | # NVidia Nsight GPU debugger configuration file
337 | *.nvuser
338 |
339 | # MFractors (Xamarin productivity tool) working folder
340 | .mfractor/
341 |
342 | # Local History for Visual Studio
343 | .localhistory/
344 |
345 | # BeatPulse healthcheck temp database
346 | healthchecksdb
347 |
348 | # Backup folder for Package Reference Convert tool in Visual Studio 2017
349 | MigrationBackup/
350 |
351 | # Ionide (cross platform F# VS Code tools) working folder
352 | .ionide/
353 |
354 | .vscode/
355 |
356 | *.log
357 | *.zip
358 | *.gz
359 | *.csv
360 |
361 | /sim/test.py
362 | sim/.vscode/
363 | sim/tmp*
364 | *.pdf
365 | sim/sort*
366 | scripts/sort*
367 | scripts/tmp*
368 |
--------------------------------------------------------------------------------
/sim/main.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import datetime
3 | import os
4 | import random
5 | import sys
6 | import numpy as np
7 |
8 | import algorithm
9 | import env
10 | import traceloader
11 |
12 | target_arri_seq = []
13 | random.seed(10)
14 |
15 | def files_generator(args):
16 | if not os.path.exists(os.path.join(args.log, args.settings)):
17 | os.makedirs(os.path.join(args.log, args.settings))
18 | if not os.path.exists(os.path.join(args.action, args.settings)):
19 | os.makedirs(os.path.join(args.action, args.settings))
20 | if not os.path.exists(os.path.join(args.result, args.settings)):
21 | os.makedirs(os.path.join(args.result, args.settings))
22 |
23 | def excute_simulation(args, fname, qlen_map_seq = [], qwait_map_seq = []):
24 | # initialize the states
25 | dec_env = env.Environment(args, fname)
26 | fpath = os.path.join(args.trace, fname)
27 | traces = traceloader.TraceLoader(fpath, args.min_trace_length)
28 |
29 | trace_end = 0 # flag whether we read the end of the trace
30 | target_arrv = 1 / 60 # the initial factor
31 | is_pause = False # not pausing encoder
32 |
33 | alg = algorithm.Algorithm(args, fname)
34 | alg.qlen_target_map_seq = qlen_map_seq
35 | alg.qwait_target_map_seq = qwait_map_seq
36 |
37 | while not trace_end:
38 | line, trace_end = traces.load_line(target_arrv)
39 | qlen, qwait, arrv_intvl, serv_intvl, rtt = dec_env.step(line)
40 | target_arrv = alg.predict(qlen, qwait, arrv_intvl, serv_intvl, rtt)
41 |
42 | del traces
43 | del dec_env
44 | del alg
45 |
46 | def run_single_trace(param):
47 | fname, args = param
48 |
49 | # skip short logs (it should've already been removed when processing the logs, only for sure)
50 | with open(os.path.join(args.trace, fname), 'r') as f:
51 | cnt = -1
52 | shortFlag = True
53 | for cnt, line in enumerate(f.readlines()):
54 | cnt += 1
55 | if (cnt > args.short_session):
56 | shortFlag = False
57 | break
58 | if shortFlag:
59 | print(fname, "short flow", cnt)
60 | return
61 |
62 | if args.algorithm == 'qlen' or args.algorithm == 'qwait': # enumerate baselines
63 | map_seq = []
64 | # generate real input to target_arriv mapping list
65 | for idx in target_arri_seq:
66 | map_seq.append(algorithm.TARGET_ARRIVAL_INTERVAL[idx])
67 | excute_simulation(args, fname, qlen_map_seq = map_seq, qwait_map_seq = map_seq)
68 | return
69 |
70 | excute_simulation(args, fname)
71 |
72 |
73 | if __name__ == "__main__":
74 | starttime = datetime.datetime.now()
75 | parser = argparse.ArgumentParser()
76 | # Simulator settings
77 | parser.add_argument('--min-trace-length', type=int, default=5, help='The filter of minimum frame number for a flow')
78 | parser.add_argument('--max-queue-length', type=int, default=16, help='The capacity of pre-decoder queue')
79 | parser.add_argument('--short-session', type=int, default=7200, help='The minimum number of a valid flow')
80 | parser.add_argument('--frame-rate-level', type=int, default=5, help='Quantized frame rate level')
81 | parser.add_argument('--increase-delay', type=float, default=1, help='Delay of frame rate increase')
82 | parser.add_argument('--random_sample', type=int, default=0, help='Random sample from traces. 0 means all')
83 |
84 | # Input and output settings
85 | parser.add_argument('--trace', default='../traces', help='The directory contains the trace directory')
86 | parser.add_argument('--log', default='../logs', help='The directory where resulting logs are generated')
87 | parser.add_argument('--action', default='../actions', help='The directory where frame rate actions are generated')
88 | parser.add_argument('--result', default='../results', type=str)
89 |
90 | # Algorithm settings
91 | parser.add_argument('--algorithm', choices=['afr', 'bba', 'qlen', 'hta', 'native', 'afrqcn', 'qwait', '60fps', 'pause', 'txrate'], help='Frame rate control algorithm.')
92 |
93 | # Parameters for AFR
94 | parser.add_argument('--wzero', type=float, default=0.002, help='W0 in HTA')
95 | parser.add_argument('--xi-arrv', type=float, default=0.033, help='Xi-arrv in preprocess')
96 | parser.add_argument('--xi-serv', type=float, default=0.25, help='Xi-serv in preprocess')
97 |
98 | # Parameters for Qlen or Qwait
99 | parser.add_argument('--qlen_target_arri_seq', type=list, nargs='*', default='0123', help='sequence of mapping qlen to target arrival interval')
100 | parser.add_argument('--qwait_target_arri_seq', type=list, nargs='*', default="0123", help='sequence of mapping qwait to target arrival interval')
101 |
102 | # Parameters for pause
103 | parser.add_argument('--pause_dimension', choices=['len', 'wait', 'hta'], default= 'len', help='dimention for pause judging.')
104 | parser.add_argument('--pause_stop_threshold', type=int, default= 1, help='deactivate encoder pausing when pause_dimension < the threshold (contorller subject to dimention <= threshold).' )
105 | parser.add_argument('--pause_activate_threshold', type=int, default= 1, help='activate encoder pausing when pause_dimension > the threshold (must > pause_stop_threshold).')
106 |
107 | # Parameters for txrate
108 | parser.add_argument('--rho', type=float, default=0.95)
109 |
110 | # Performance settings
111 | parser.add_argument('--threads', type=int, help='Number of parallel threads')
112 | parser.add_argument('--mode', choices=['debug', 'release', 'ray'], default='release')
113 | args = parser.parse_args()
114 |
115 | if args.algorithm == '60fps':
116 | args.max_queue_length = np.inf
117 |
118 | # set the settings of this experiment, with parameters on the name of the folder
119 | if args.algorithm == 'afr' or args.algorithm == 'hta':
120 | args.settings = "%s_%.5f_%.3f_%.3f" % (args.algorithm, args.wzero, args.xi_arrv, args.xi_serv)
121 | elif (args.algorithm == 'qlen' or args.algorithm == 'qwait'):
122 | target_arri_seq = args.qlen_target_arri_seq[0]
123 | if args.algorithm == 'qwait':
124 | target_arri_seq = args.qwait_target_arri_seq[0]
125 | target_arri_seq = [int(item) for item in target_arri_seq]
126 | if len(target_arri_seq) < 4:
127 | print("length of qlen/qwait's input target_arri_seq must >= 4")
128 | sys.exit()
129 | args.settings = "%s_%d_%s" % (args.algorithm, len(target_arri_seq), ''.join([str(elem) for elem in target_arri_seq]))
130 | elif args.algorithm == 'pause':
131 | if args.pause_stop_threshold > args.pause_activate_threshold:
132 | print("pause baseline's pause_activate_threshold must > pause_stop_threshold")
133 | sys.exit()
134 | args.settings = "%s_%s_%d_%d" % (args.algorithm, args.pause_dimension, args.pause_stop_threshold, args.pause_activate_threshold)
135 | elif args.algorithm == 'txrate':
136 | args.settings = "%s_%.2f" % (args.algorithm, args.rho)
137 | elif args.algorithm == 'native':
138 | args.settings = "%s_%d" % (args.algorithm, args.max_queue_length)
139 | else:
140 | args.settings = args.algorithm
141 |
142 | files_generator(args)
143 |
144 | # check the type of the session (network, client, etc.)
145 | fnames = os.listdir(args.trace)
146 |
147 | # the simulation mode (affecting the performance)
148 | # the release mode takes up the resources on the local server. if args.thread is defined, it will take args.thread cores, otherwise all cores on this server.
149 | if args.mode == 'release':
150 | from multiprocessing import Pool
151 | if args.threads:
152 | pool = Pool(args.threads)
153 | else:
154 | pool = Pool()
155 | if args.random_sample == 0:
156 | pool.map(run_single_trace, [(fname, args) for fname in fnames])
157 | else:
158 | fnames.sort()
159 | pool.map(run_single_trace, [(fname, args) for fname in random.sample(fnames, args.random_sample)])
160 | pool.close()
161 | pool.join()
162 |
163 | # the ray mode has been deprecated
164 | elif args.mode == 'ray':
165 | from ray.util.multiprocessing import Pool
166 | pool = Pool()
167 | pool.map(run_single_trace, [(fname, args) for fname in fnames])
168 | pool.close()
169 | pool.join()
170 |
171 | # the debug mode has no parallel acceleration
172 | elif args.mode == 'debug':
173 | if args.random_sample == 0:
174 | for fname in fnames:
175 | run_single_trace((fname, args))
176 | else:
177 | fnames.sort()
178 | for fname in random.sample(fnames, args.random_sample):
179 | run_single_trace((fname, args))
180 |
181 | endtime = datetime.datetime.now()
182 | print(f'{args.settings} totalTime: {(endtime - starttime).total_seconds()/60:.2f} minutes')
183 |
--------------------------------------------------------------------------------
/sim/stat.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import datetime
3 | import os
4 | import subprocess as sp
5 | from multiprocessing import Pool
6 |
7 | import numpy as np
8 |
9 |
10 | def stat(param):
11 | fname, args, prefix = param
12 |
13 | if args.queuing:
14 | fname_queuing = os.path.join(prefix, fname + '_queuing.tmp')
15 | f_queuing = open(fname_queuing, 'w')
16 | if args.interarrival:
17 | last_arrival = -1
18 | fname_interarrival = os.path.join(prefix, fname + '_interarrival.tmp')
19 | f_interarrival = open(fname_interarrival, 'w')
20 | if args.smooth:
21 | last_interarrival = -1
22 | inter_arrival = -1
23 | fname_smooth = os.path.join(prefix, fname + '_smooth.tmp')
24 | f_smooth = open(fname_smooth, 'w')
25 | if args.total:
26 | fname_total = os.path.join(prefix, fname + '_total.tmp')
27 | f_total = open(fname_total, 'w')
28 | if args.decode:
29 | fname_decode = os.path.join(prefix, fname + '_decode.tmp')
30 | f_decode = open(fname_decode, 'w')
31 | if args.network:
32 | fname_network = os.path.join(prefix, fname + '_network.tmp')
33 | f_network = open(fname_network, 'w')
34 |
35 | with open(os.path.join(args.log, args.settings, fname)) as f:
36 | while True:
37 | line = f.readline().split()
38 | if not line:
39 | break
40 | if args.queuing:
41 | queuing = float(line[2])
42 | f_queuing.write("%.2f\n" % queuing)
43 | if args.interarrival:
44 | if last_arrival < 0:
45 | # actually the time after decoding
46 | last_arrival = float(line[0]) + float(line[2]) + float(line[3])
47 | else:
48 | inter_arrival = float(line[0]) + float(line[2]) + float(line[3]) - last_arrival
49 | f_interarrival.write("%.2f\n" % inter_arrival)
50 | last_arrival = float(line[0]) + float(line[2]) + float(line[3])
51 | if args.smooth:
52 | if last_interarrival < 0:
53 | if inter_arrival >= 0:
54 | last_interarrival = inter_arrival
55 | else:
56 | smooth = abs(inter_arrival - last_interarrival)
57 | f_smooth.write("%.2f\n" % smooth)
58 | last_interarrival = inter_arrival
59 | if args.total:
60 | total = float(line[4])
61 | f_total.write("%.2f\n" % total)
62 | if args.decode:
63 | decode = float(line[3])
64 | f_decode.write("%.2f\n" % decode)
65 | if args.network:
66 | network = float(line[1])
67 | f_network.write("%.2f\n" % network)
68 |
69 | # sort results to calculate cdf
70 | if args.queuing:
71 | f_queuing.close()
72 | sp_queuing = sp.Popen('sort -T ./ -n ' + fname_queuing + ' -o ' + fname_queuing, shell=True)
73 | if args.interarrival:
74 | f_interarrival.close()
75 | sp_interarrival = sp.Popen('sort -T ./ -n ' + fname_interarrival + ' -o ' + fname_interarrival, shell=True)
76 | if args.smooth:
77 | f_smooth.close()
78 | sp_smooth = sp.Popen('sort -T ./ -n ' + fname_smooth + ' -o ' + fname_smooth, shell=True)
79 | if args.total:
80 | f_total.close()
81 | sp_total = sp.Popen('sort -T ./ -n ' + fname_total + ' -o ' + fname_total, shell=True)
82 | if args.decode:
83 | f_decode.close()
84 | sp_decode = sp.Popen('sort -T ./ -n ' + fname_decode + ' -o ' + fname_decode, shell=True)
85 | if args.network:
86 | f_network.close()
87 | sp_network = sp.Popen('sort -T ./ -n ' + fname_network + ' -o ' + fname_network, shell=True)
88 |
89 | # calculate per-log cdf
90 | # if args.queuing:
91 | # sp_queuing.wait()
92 | # sp_queuing = sp.Popen("awk -vstep=$(awk 'END{printf(\"%.4f\\n\", NR/10000)}' " + fname_queuing + " | bc) 'BEGIN{cnt=1} {sum+=$1;if (NR>=int(cnt*step)){print cnt,$0;cnt=cnt+1+int(NR-cnt*step)}} END{printf(\"Avg %.3f\\n\", sum/NR)}' " + fname_queuing + " > " + os.path.join(args.result, fname + '_queuing.log'), shell=True)
93 | # if args.interarrival:
94 | # sp_interarrival.wait()
95 | # sp_queuing = sp.Popen("awk -vstep=$(awk 'END{printf(\"%.4f\\n\", NR/10000)}' " + fname_interarrival + " | bc) 'BEGIN{cnt=1} {sum+=$1;if (NR>=int(cnt*step)){print cnt,$0;cnt=cnt+1+int(NR-cnt*step)}} END{printf(\"Avg %.2f\\n\", sum/NR)}' " + fname_interarrival + " > " + os.path.join(args.result, fname + '_interarrival.log'), shell=True)
96 | # if args.total:
97 | # sp_total.wait()
98 | # sp_queuing = sp.Popen("awk -vstep=$(awk 'END{printf(\"%.4f\\n\", NR/10000)}' " + fname_total + " | bc) 'BEGIN{cnt=1} {sum+=$1;if (NR>=int(cnt*step)){print cnt,$0;cnt=cnt+1+int(NR-cnt*step)}} END{printf(\"Avg %.2f\\n\", sum/NR)}' " + fname_total + " > " + os.path.join(args.result, fname + '_total.log'), shell=True)
99 |
100 | if args.queuing:
101 | sp_queuing.wait()
102 | if args.interarrival:
103 | sp_interarrival.wait()
104 | if args.smooth:
105 | sp_smooth.wait()
106 | if args.total:
107 | sp_total.wait()
108 | if args.decode:
109 | sp_decode.wait()
110 | if args.network:
111 | sp_network.wait()
112 |
113 |
114 | if __name__ == "__main__":
115 | starttime = datetime.datetime.now()
116 | parser = argparse.ArgumentParser()
117 | parser.add_argument('--log', type=str)
118 | parser.add_argument('--result', type=str)
119 | parser.add_argument('--settings', type=str)
120 |
121 | parser.add_argument('--queuing', action='store_true')
122 | parser.add_argument('--interarrival', action='store_true')
123 | parser.add_argument('--smooth', action='store_true')
124 | parser.add_argument('--total', action='store_true')
125 | parser.add_argument('--decode', action='store_true')
126 | parser.add_argument('--network', action='store_true')
127 |
128 | parser.add_argument('--threads', type=int, help='Number of parallel threads')
129 | parser.add_argument('--filter', type=str, default='w2w1',
130 | help='devType (UNKNOWN, DESKTOP, LAPTOP, PHONE, PAD, STB, TV), \n' +
131 | 'netType (NONE, MOBILE, ETHERNET, WIFI, OTHER), \n' +
132 | 'clientType (WinPC, IOS, MacPC, Android), \n' +
133 | 'decodeType (SOFTWARE, HARDWARE)')
134 | parser.add_argument('--flowinfo', type=str, default='../online_data/flowinfo.log')
135 | parser.add_argument('--flowlist', type=str)
136 | args = parser.parse_args()
137 |
138 | if args.filter[0] != "N": # N means none
139 | devType = args.filter[0]
140 | netType = args.filter[1]
141 | clientType = args.filter[2]
142 | decodeType = args.filter[3]
143 |
144 | filtered_sid = []
145 | with open(args.flowinfo, 'r') as f:
146 | while True:
147 | line = f.readline().split()
148 | if not line:
149 | break
150 | devCond = devType == 'w' or devType == line[2]
151 | netCond = netType == 'w' or netType == line[4]
152 | clientCond = clientType == 'w' or clientType == line[6]
153 | decodeCond = decodeType == 'w' or decodeType == line[8]
154 | if devCond and netCond and clientCond and decodeCond:
155 | filtered_sid.append(line[0])
156 | if args.flowlist:
157 | fnames = np.loadtxt(args.flowlist, dtype=str).tolist()
158 | prefix = os.path.join(args.result, args.filter + '_' + os.path.split(args.flowlist)[-1], args.settings)
159 | else:
160 | fnames = os.listdir(os.path.join(args.log, args.settings))
161 | prefix = os.path.join(args.result, args.filter, args.settings)
162 |
163 | fnames_new = []
164 | for fname in fnames:
165 | sid = fname.split('_')[0]
166 | if sid in filtered_sid:
167 | fnames_new.append(fname)
168 | print(len(fnames_new), '/', len(fnames))
169 | fnames = fnames_new
170 | else: # NX means running with sampling X traces
171 | fnames = os.listdir(os.path.join(args.log, args.settings))
172 | prefix = os.path.join(args.result, args.filter, args.settings)
173 |
174 | if not os.path.exists(prefix):
175 | os.makedirs(prefix)
176 |
177 |
178 | # RELEASE
179 | if args.threads:
180 | pool = Pool(args.threads)
181 | else:
182 | pool = Pool()
183 | pool.map(stat, [(fname, args, prefix) for fname in fnames])
184 | pool.close()
185 | pool.join()
186 |
187 | # DEBUG
188 | # for fname in fnames:
189 | # stat((fname, args))
190 |
191 | # Con'd
192 | # cat and sort all-long results
193 | if args.queuing:
194 | fname_queuing_all = os.path.join(prefix, "queuing_all.tmp")
195 | sp_queuing = sp.Popen('ls ' + prefix + ' | grep "_queuing.tmp" | xargs -i cat ' + os.path.join(prefix, '{}') + ' | sort -T ./ -n -o ' + fname_queuing_all, shell=True)
196 | if args.interarrival:
197 | fname_interarrival_all = os.path.join(prefix, "interarrival_all.tmp")
198 | sp_interarrival = sp.Popen('ls ' + prefix + ' | grep "_interarrival.tmp" | xargs -i cat ' + os.path.join(prefix, '{}') + ' | sort -T ./ -n -o ' + fname_interarrival_all, shell=True)
199 | if args.smooth:
200 | fname_smooth_all = os.path.join(prefix, "smooth_all.tmp")
201 | sp_smooth = sp.Popen('ls ' + prefix + ' | grep "_smooth.tmp" | xargs -i cat ' + os.path.join(prefix, '{}') + ' | sort -T ./ -n -o ' + fname_smooth_all, shell=True)
202 | if args.total:
203 | fname_total_all = os.path.join(prefix, "total_all.tmp")
204 | sp_total = sp.Popen('ls ' + prefix + ' | grep "_total.tmp" | xargs -i cat ' + os.path.join(prefix, '{}') + ' | sort -T ./ -n -o ' + fname_total_all, shell=True)
205 | if args.decode:
206 | fname_decode_all = os.path.join(prefix, "decode_all.tmp")
207 | sp_decode = sp.Popen('ls ' + prefix + ' | grep "_decode.tmp" | xargs -i cat ' + os.path.join(prefix, '{}') + ' | sort -T ./ -n -o ' + fname_decode_all, shell=True)
208 | if args.network:
209 | fname_network_all = os.path.join(prefix, "network_all.tmp")
210 | sp_network = sp.Popen('ls ' + prefix + ' | grep "_network.tmp" | xargs -i cat ' + os.path.join(prefix, '{}') + ' | sort -T ./ -n -o ' + fname_network_all, shell=True)
211 |
212 | # calculate all-log cdf
213 | if args.queuing:
214 | sp_queuing.wait()
215 | sp_queuing = sp.Popen("awk -vstep=$(awk 'END{printf(\"%.4f\\n\", NR/10000)}' " + fname_queuing_all + " | bc) 'BEGIN{cnt=1} {sum+=$1;if (NR>=int(cnt*step)){print cnt,$0;cnt=cnt+1+int(NR-cnt*step)}} END{printf(\"Avg %.3f\\n\", sum/NR)}' " + fname_queuing_all + " > " + os.path.join(prefix, 'queuing_all.log'), shell=True)
216 | if args.interarrival:
217 | sp_interarrival.wait()
218 | sp_interarrival = sp.Popen("awk -vstep=$(awk 'END{printf(\"%.4f\\n\", NR/10000)}' " + fname_interarrival_all + " | bc) 'BEGIN{cnt=1} {sum+=$1;if (NR>=int(cnt*step)){print cnt,$0;cnt=cnt+1+int(NR-cnt*step)}} END{printf(\"Avg %.2f\\n\", sum/NR)}' " + fname_interarrival_all + " > " + os.path.join(prefix, 'interarrival_all.log'), shell=True)
219 | if args.smooth:
220 | sp_smooth.wait()
221 | sp_smooth = sp.Popen("awk -vstep=$(awk 'END{printf(\"%.4f\\n\", NR/10000)}' " + fname_smooth_all + " | bc) 'BEGIN{cnt=1} {sum+=$1;if (NR>=int(cnt*step)){print cnt,$0;cnt=cnt+1+int(NR-cnt*step)}} END{printf(\"Avg %.6f\\n\", sum/NR)}' " + fname_smooth_all + " > " + os.path.join(prefix, 'smooth_all.log'), shell=True)
222 | if args.total:
223 | sp_total.wait()
224 | sp_total = sp.Popen("awk -vstep=$(awk 'END{printf(\"%.4f\\n\", NR/10000)}' " + fname_total_all + " | bc) 'BEGIN{cnt=1} {sum+=$1;if (NR>=int(cnt*step)){print cnt,$0;cnt=cnt+1+int(NR-cnt*step)}} END{printf(\"Avg %.2f\\n\", sum/NR)}' " + fname_total_all + " > " + os.path.join(prefix, 'total_all.log'), shell=True)
225 | if args.decode:
226 | sp_decode.wait()
227 | sp_decode = sp.Popen("awk -vstep=$(awk 'END{printf(\"%.4f\\n\", NR/10000)}' " + fname_decode_all + " | bc) 'BEGIN{cnt=1} {sum+=$1;if (NR>=int(cnt*step)){print cnt,$0;cnt=cnt+1+int(NR-cnt*step)}} END{printf(\"Avg %.2f\\n\", sum/NR)}' " + fname_decode_all + " > " + os.path.join(prefix, 'decode_all.log'), shell=True)
228 | if args.network:
229 | sp_network.wait()
230 | sp_network = sp.Popen("awk -vstep=$(awk 'END{printf(\"%.4f\\n\", NR/10000)}' " + fname_network_all + " | bc) 'BEGIN{cnt=1} {sum+=$1;if (NR>=int(cnt*step)){print cnt,$0;cnt=cnt+1+int(NR-cnt*step)}} END{printf(\"Avg %.2f\\n\", sum/NR)}' " + fname_network_all + " > " + os.path.join(prefix, 'network_all.log'), shell=True)
231 |
232 | if args.queuing:
233 | sp_queuing.wait()
234 | if args.interarrival:
235 | sp_interarrival.wait()
236 | if args.smooth:
237 | sp_smooth.wait()
238 | if args.total:
239 | sp_total.wait()
240 | if args.decode:
241 | sp_decode.wait()
242 | if args.network:
243 | sp_network.wait()
244 |
245 | # clear up tmp files
246 | sp_clean = sp.Popen('ls ' + prefix + ' | grep ".tmp" | xargs -i rm ' + os.path.join(prefix, '{}'), shell=True)
247 | sp_clean.wait()
248 | endtime = datetime.datetime.now()
249 | print(f'{args.settings} totalTime: {(endtime - starttime).total_seconds()/60:.2f} minutes')
250 |
--------------------------------------------------------------------------------
/sim/algorithm.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | # variables define
4 | QLEN_STEP = 1
5 | QLEN_UPPER_BOUNT_MAX = 8
6 | QWAIT_STEP = 4 # ms
7 | QWAIT_UPPER_BOUNT_MAX = 64
8 | TARGET_ARRIVAL_INTERVAL = [1.0/60, 1.0/48, 1.0/36, 1.0/24]
9 |
10 | IS_PAUSE = 1
11 | NOT_PAUSE = 0
12 |
13 | PAUSE_ACTIVATE_SLOWDOWN = -1
14 | PAUSE_DEACTIVATE_SLOWDOWN = 1
15 |
16 | class Algorithm:
17 | def __init__(self, args, fname):
18 | self.last_framerate = 60
19 | self.arrv_ewma = 1 / 60.0 # exponential weighted moving average of the interarrival time
20 | self.arrv_ewmv = 0 # exponential weighted moving variance of the interarrival time
21 | self.arrv_ewm_factor = args.xi_arrv # ewm calculation factor of the arrival process
22 | self.serv_ewma = 0 # exponential weighted moving average of the service time
23 | self.serv_ewmv = 0 # exponential weighted moving variance of the service time
24 | self.serv_ewm_factor = args.xi_serv # ewm calculation factor of the service process
25 |
26 | self.W0 = args.wzero # maintain the queue at wzero on average
27 | self.q_high = 1 # q_high for qcn (deprecated)
28 | self.q_low = [4, 2] # adaptive q_low for qcn (deprecated)
29 | self.qwait_high = 16 # q_high for qwait_qcn (transient controller in AFR)
30 | self.qwait_low = [64, 14] # adaptive q_low for qwait_qcn (transient controller in AFR)
31 | self.rtt_th = [100] # rtt threshold to adjust the q_low
32 | self.outlier = 0.050 # outlier filter in preprocess
33 | self.net_rto = 0.050 # upper-bound the interarrival time
34 |
35 | self.args = args
36 | self.fout = open(os.path.join(args.action, args.settings, fname), 'w')
37 |
38 | self.frame_intvl = 0 # delayed frame rate increase
39 | self.effective_intvl = 1.0 / 60
40 | self.effective_flag = True
41 | self.pending_intvl = -1
42 |
43 | self.ms_in_s = 0.001
44 | self.min_intvl = 1.0 / 60 # 60fps is the maximum frame rate that games can offer
45 | self.max_intvl = 1.0 / 24
46 |
47 | self.init_flag = True
48 |
49 | # qlen/qwait baselines input to target_arriv mapping sequance
50 | self.qlen_target_map_seq = []
51 | self.qwait_target_map_seq = []
52 |
53 | # encoder pause baseline
54 | self.pause_dimension = args.pause_dimension
55 | self.pause_stop_threshold = args.pause_stop_threshold
56 | self.pause_activate_threshold = args.pause_activate_threshold
57 | # 0 means not pause and 1 means pause
58 | self.pause_state = NOT_PAUSE
59 | self.pause_slowdown_stash = 0
60 |
61 | # when new intervals fed to the algorithm, first update the network measurement
62 | def preprocess(self, arrv_intvl, serv_intvl):
63 | for intvl in arrv_intvl:
64 | intvl = intvl * self.ms_in_s
65 | self.frame_intvl += intvl
66 | # when an interarrival time is too large, upper bound it to avoid the bias
67 | intvl = min(intvl, self.net_rto)
68 | self.arrv_ewma += self.arrv_ewm_factor * (intvl - self.arrv_ewma)
69 | self.arrv_ewmv += self.arrv_ewm_factor * ((intvl - self.arrv_ewma)**2 - self.arrv_ewmv)
70 |
71 | for intvl in serv_intvl:
72 | # outlier neglection for the service time
73 | intvl = intvl * self.ms_in_s
74 | if intvl < self.outlier:
75 | self.serv_ewma += self.serv_ewm_factor * (intvl - self.serv_ewma)
76 | self.serv_ewmv += self.serv_ewm_factor * ((intvl - self.serv_ewma)**2 - self.serv_ewmv)
77 |
78 | if self.init_flag:
79 | self.target_arrv = self.game_bound(self.arrv_ewma)
80 | self.init_flag = False
81 |
82 | # Heavy-traffic analysis (the stationary controller of AFR)
83 | def hta(self):
84 | if self.arrv_ewma < self.ms_in_s:
85 | arrv_c2 = 0
86 | else:
87 | arrv_c2 = self.arrv_ewmv / (self.arrv_ewma**2)
88 |
89 | if self.serv_ewma < self.ms_in_s:
90 | serv_c2 = 0
91 | else:
92 | serv_c2 = self.serv_ewmv / (self.serv_ewma**2)
93 |
94 | t_dec = self.serv_ewma
95 | self.target_arrv = (((arrv_c2 + serv_c2) / 2) * (t_dec / self.W0) + 1) * t_dec
96 |
97 | # a deprecated baseline
98 | def qcn(self, qlen, rtt):
99 | q_low = self.q_low[-1]
100 | for idx in range(len(self.rtt_th)):
101 | if rtt <= self.rtt_th[idx]:
102 | q_low = self.q_low[idx]
103 | break
104 | if qlen > q_low:
105 | reduction = self.min_intvl / self.max_intvl
106 | elif qlen < self.q_high:
107 | reduction = 1
108 | else:
109 | reduction = ((qlen - self.q_high) * (self.min_intvl / self.max_intvl) + (q_low - qlen))/(q_low - self.q_high)
110 | self.target_arrv /= reduction
111 |
112 | # the transient controller of AFR
113 | def qwait_qcn(self, qwait, rtt):
114 | qwait_low = self.qwait_low[-1]
115 | for idx in range(len(self.rtt_th)):
116 | if rtt <= self.rtt_th[idx]:
117 | qwait_low = self.qwait_low[idx]
118 | break
119 | if qwait > qwait_low:
120 | reduction = self.min_intvl / self.max_intvl
121 | elif qwait < self.qwait_high:
122 | reduction = 1
123 | else:
124 | reduction = ((qwait - self.qwait_high) * (self.min_intvl / self.max_intvl) + (qwait_low - qwait))/(qwait_low - self.qwait_high)
125 | self.target_arrv /= reduction
126 |
127 | # mapping the queue length to target frame-rate (interarrival time)
128 | def qlen_baseline(self, qlen):
129 | qlen_idx = max(0, int(qlen / QLEN_STEP))
130 | # get the corresponding target arrival interval in mapping seq
131 | self.target_arrv = self.qlen_target_map_seq[min(len(self.qlen_target_map_seq)-1, qlen_idx)]
132 |
133 | # mapping the queuing delay to target frame-rate (interarrival time)
134 | def qwait_baseline(self, qwait):
135 | qwait_idx = max(0, int(qwait / QWAIT_STEP))
136 | # get the corresponding target arrival interval in mapping seq
137 | self.target_arrv = self.qwait_target_map_seq[min(len(self.qwait_target_map_seq)-1, qwait_idx)]
138 |
139 | # mapping the pause dimention to target pause comand (interarrival time)
140 | def pause_baseline(self, qlen, qwait):
141 | input_value = qwait if self.pause_dimension == 'wait' else qlen
142 | # Moore State Machine
143 | if input_value > self.pause_activate_threshold and self.pause_state == NOT_PAUSE:
144 | self.pause_state = IS_PAUSE
145 | elif input_value < self.pause_stop_threshold and self.pause_state == IS_PAUSE:
146 | self.pause_state = NOT_PAUSE
147 | else:
148 | pass
149 |
150 |
151 | # frame-rate should be bounded
152 | def game_bound(self, arrv):
153 | arrv = max(self.min_intvl, arrv)
154 | arrv = min(self.max_intvl, arrv)
155 | return arrv
156 |
157 | # frame-rate should be quantized to avoid frequent adjustments
158 | def quantize(self, target_arrv):
159 | if abs(self.last_framerate - 1 / self.target_arrv) > self.args.frame_rate_level:
160 | self.last_framerate = round((1 / target_arrv) / self.args.frame_rate_level) * self.args.frame_rate_level
161 | return 1.0 / self.last_framerate
162 |
163 | # if the frame-rate adjustment is too frequent, this will block those frequent adjustments
164 | # Specifically, after a frame-rate adjustment, the subsequent frame-adjustments within
165 | # self.effective_intvl will be considered non-effective
166 | def delay_effective(self, target_arrv):
167 | if target_arrv > self.effective_intvl: # rate decrease
168 | self.effective_flag = True
169 | self.frame_intvl = 0
170 | self.pending_intvl = -1
171 | self.effective_intvl = target_arrv
172 | else:
173 | if not self.effective_flag: # pending rate increase actions
174 | if self.frame_intvl > self.args.increase_delay * self.arrv_ewma:
175 | self.effective_flag = True
176 | self.frame_intvl = 0
177 | if target_arrv < self.pending_intvl: # new higher rate
178 | self.effective_intvl = self.pending_intvl
179 | self.pending_intvl = target_arrv
180 | self.effective_flag = False
181 | else: # less higher rate
182 | self.effective_intvl = target_arrv
183 | self.pending_intvl = -1
184 | else:
185 | if target_arrv > self.pending_intvl: # new lower rate
186 | self.pending_intvl = target_arrv
187 | else:
188 | self.effective_flag = False
189 | self.frame_intvl = 0
190 | self.pending_intvl = target_arrv
191 | return self.effective_intvl
192 |
193 | # the main callback function of algorithm.py
194 | def predict(self, qlen, qwait, arrv_intvl, serv_intvl, rtt):
195 | self.target_arrv = self.arrv_ewma # set the default frame-rate as the
196 | self.preprocess(arrv_intvl, serv_intvl)
197 | if self.args.algorithm == '60fps':
198 | self.target_arrv = self.min_intvl
199 | else:
200 | if self.args.algorithm == 'native':
201 | self.target_arrv = self.arrv_ewma
202 | elif self.args.algorithm == 'afr':
203 | self.hta()
204 | self.target_arrv = self.game_bound(self.target_arrv)
205 | self.qwait_qcn(qwait, rtt)
206 | elif self.args.algorithm == 'afrqcn':
207 | self.hta()
208 | self.target_arrv = self.game_bound(self.target_arrv)
209 | self.qcn(qlen, rtt)
210 | elif self.args.algorithm == 'hta':
211 | self.hta()
212 | elif self.args.algorithm == 'qlen':
213 | self.qlen_baseline(qlen)
214 | elif self.args.algorithm == 'qwait':
215 | self.qwait_baseline(qwait)
216 | elif self.args.algorithm == 'pause' and self.args.pause_dimension != 'hta':
217 | self.pause_baseline(qlen, qwait)
218 | self.target_arrv = self.min_intvl
219 | elif self.args.algorithm == 'pause' and self.args.pause_dimension == 'hta':
220 | self.hta()
221 | elif self.args.algorithm == 'bba':
222 | self.bba(qlen)
223 | elif self.args.algorithm == 'txrate':
224 | self.target_arrv = self.serv_ewma / self.args.rho
225 | else:
226 | raise NotImplementedError
227 | self.target_arrv = self.game_bound(self.target_arrv)
228 | self.target_arrv = self.quantize(self.target_arrv)
229 | self.target_arrv = self.delay_effective(self.target_arrv)
230 | if self.args.algorithm == 'pause' and self.args.pause_dimension != 'hta':
231 | # pause baselinesP
232 | if self.pause_state == IS_PAUSE:
233 | self.target_arrv = - self.target_arrv
234 | # elif self.pause_state == NOT_PAUSE:
235 | # self.slowdown = PAUSE_DEACTIVATE_SLOWDOWN
236 | if self.args.algorithm == 'pause' and self.args.pause_dimension == 'hta':
237 | self.pause_slowdown_stash += self.arrv_ewma / self.target_arrv
238 | self.target_arrv = -self.min_intvl
239 | if self.pause_slowdown_stash >= 1:
240 | self.pause_slowdown_stash -= 1
241 | self.target_arrv = - self.target_arrv
242 | # print(self.target_arrv)
243 | self.fout.write("%d\n" % (1 / self.target_arrv))
244 | return self.target_arrv
245 |
246 | def __del__(self):
247 | self.fout.close()
248 |
249 | '''
250 | map input values to target_arri_interval
251 | return list, each item in the list will be a mapping seqence idx array
252 | '''
253 | def map_seq_idx_array_producer(seq_len):
254 | result_array = []
255 | if seq_len < len(TARGET_ARRIVAL_INTERVAL): # can't map seq Surjective onto target_arriv
256 | return []
257 |
258 | # get every valid combination (monotone increase)
259 | map_seq_partition_comb_list = [0 for _ in range(len(TARGET_ARRIVAL_INTERVAL) - 1)]
260 | # -1: n items need (n-1) parting line
261 |
262 | # init
263 | map_seq_partition_comb_list[-1] = (seq_len-1) -1 # final parting line is set, because [0,(N item),max,max] is equal to [0,(N item),MAX]
264 | for idx in range(len(map_seq_partition_comb_list)):
265 | map_seq_partition_comb_list[-1 - idx] = map_seq_partition_comb_list[-1] - idx # start from [0,.., MAX-1, MAX]
266 |
267 | defalt_map_seq_partition_comb_list = [item for item in map_seq_partition_comb_list]
268 | # get every combination
269 | while(map_seq_partition_comb_list[0] >= 0):
270 | seq_idx_array = [0 for _ in range(seq_len)]
271 | comb_list_idx = 0
272 | target_arriv_idx = 0
273 | for idx in range(len(seq_idx_array)): # seq_idx_array[0] = 0
274 | if idx > map_seq_partition_comb_list[comb_list_idx]:
275 | comb_list_idx = min(comb_list_idx+1, len(map_seq_partition_comb_list)-1)
276 | target_arriv_idx = min(target_arriv_idx+1, len(TARGET_ARRIVAL_INTERVAL)-1)
277 | seq_idx_array[idx] = target_arriv_idx
278 | result_array.append([item for item in seq_idx_array]) # deep copy
279 |
280 | # next combination
281 | is_continue = True
282 | comb_list_moving_offset = len(map_seq_partition_comb_list)-1 - 1
283 | while(is_continue):
284 | is_continue = False
285 | map_seq_partition_comb_list[comb_list_moving_offset] -= 1
286 | if comb_list_moving_offset == 0:
287 | if map_seq_partition_comb_list[comb_list_moving_offset] < 0:
288 | break
289 | else:
290 | if map_seq_partition_comb_list[comb_list_moving_offset] <= map_seq_partition_comb_list[comb_list_moving_offset - 1]: # can't partition in same location
291 | map_seq_partition_comb_list[comb_list_moving_offset] = defalt_map_seq_partition_comb_list[comb_list_moving_offset]
292 | comb_list_moving_offset -= 1
293 | is_continue = True
294 |
295 | return result_array
296 |
297 |
--------------------------------------------------------------------------------