├── .gitignore
├── CNAME
├── LICENSE
├── README.md
├── _config.yml
├── build
├── build_linux.sh
├── build_mac.sh
├── build_win.bat
└── spec
│ ├── tracecat_linux_dir.spec
│ ├── tracecat_linux_one.spec
│ ├── tracecat_mac_dir.spec
│ ├── tracecat_mac_one.spec
│ ├── tracecat_win_dir.spec
│ └── tracecat_win_one.spec
├── configs
└── perfetto
│ └── perfetto.conf
├── demon
├── Makefile
├── includes
│ ├── llist.c
│ └── llist.h
├── jni
│ ├── Android.mk
│ └── Application.mk
└── tracecatd.c
├── docs
└── user_guide.md
├── framework
├── config.py
├── executor.py
├── executors
│ └── adb_executor.py
├── helpers.py
├── module.py
├── objects.py
├── plotter.py
├── sampler.py
├── source.py
└── sources
│ ├── basefs.py
│ ├── ftrace.py
│ ├── instruments.py
│ ├── perfetto.py
│ ├── procfs.py
│ ├── profiler.py
│ ├── simpleperf.py
│ └── sysfs.py
├── libs
├── instruments
│ └── tracecat.tracetemplate
└── perfetto
│ ├── linux-amd64
│ └── trace_processor_shell
│ ├── mac-amd64
│ └── trace_processor_shell
│ └── windows-amd64
│ └── trace_processor_shell.exe
├── modules
├── app_load
│ └── app_load_module.py
├── cpu_freq
│ └── cpu_freq_module.py
├── cpu_freq2
│ └── cpu_freq2_module.py
├── cpu_freq_stat
│ └── cpu_freq_stat_module.py
├── cpu_freq_stat2
│ └── cpu_freq_stat2_module.py
├── cpu_idle
│ └── cpu_idle_module.py
├── cpu_load
│ └── cpu_load_module.py
├── cpu_load2
│ └── cpu_load2_module.py
├── cpu_load_summary
│ └── cpu_load_summary_module.py
├── ddr_freq
│ └── ddr_freq_module.py
├── ddr_freq_stat
│ └── ddr_freq_stat_module.py
├── dsu_freq
│ └── dsu_freq_module.py
├── gpu_freq
│ └── gpu_freq_module.py
├── gpu_freq_stat
│ └── gpu_freq_stat_module.py
├── ios_app_load
│ └── ios_app_load_module.py
├── ios_cpu_freq
│ └── ios_cpu_freq_module.py
├── ios_cpu_load
│ └── ios_cpu_load_module.py
├── profiler
│ └── profiler_module.py
├── simpleperf
│ └── simpleperf_module.py
└── thermal_zone
│ └── thermal_zone_module.py
├── scripts
├── run_all.py
└── self_test.sh
├── tracecat.py
└── venv
└── requirements.txt
/.gitignore:
--------------------------------------------------------------------------------
1 | .vscode
2 | **/__pycache__
3 | build
4 | dist
5 | demon/libs
6 | demon/obj
7 | runs/*
8 | venv/Include
9 | venv/Lib
10 | venv/Scripts
11 | venv/pyvenv.cfg
12 | venv/bin
13 | venv/include
14 | venv/lib
15 | venv/lib64
16 | venv/share
17 |
18 |
--------------------------------------------------------------------------------
/CNAME:
--------------------------------------------------------------------------------
1 | tracecat.kernel-tour.org
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Cyrus Huang (cyrus.kernel@gmail.com)
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Tracecat
2 |
3 | A general kernel trace analysis framework
4 |
5 | ## Download
6 |
7 | `git clone https://github.com/kernel-cyrus/tracecat.git`
8 |
9 | ## Start to Run
10 |
11 | #### Run on Linux or MacOS
12 |
13 | Setup environment:
14 |
15 | ```
16 | 1. Install python3
17 | sudo apt-get python3
18 |
19 | 2. Install virtualenv
20 | pip3 install virtualenv
21 |
22 | 3. Init virtual environment
23 | python3 -m venv ./venv
24 |
25 | 4. Enter virtual environment
26 | source ./venv/bin/activate
27 |
28 | 5. Install required packages
29 | pip3 install -r ./venv/requirements.txt
30 |
31 | 6. Quit virtual environment
32 | ./venv/bin/deactivate
33 | ```
34 |
35 | Then you can simply run by:
36 |
37 | ```
38 | 1. Enter virtual environment
39 | source ./venv/bin/activate
40 |
41 | 2. Run tracecat
42 | python3 tracecat.py
43 |
44 | 3. Quit virtual environment
45 | ./venv/bin/deactivate
46 | ```
47 |
48 | #### Run on Windows
49 |
50 | Setup environment:
51 |
52 | ```
53 | 1. Download and Install python3
54 | https://www.python.org/downloads/
55 |
56 | 2. Install virtualenv
57 | pip install virtualenv
58 |
59 | 3. Init virtual environment
60 | python -m venv .\venv
61 |
62 | 4. Enter virtual environment
63 | .\venv\Scripts\activate
64 |
65 | 5. Install required packages
66 | pip3 install -r .\venv\requirements.txt
67 |
68 | 6. Quit virtual environment
69 | .\venv\Scripts\deactivate
70 | ```
71 |
72 | Then you can simply run by:
73 |
74 | ```
75 | 1. Enter virtual environment
76 | .\venv\Scripts\activate
77 |
78 | 2. Run tracecat
79 | python tracecat.py
80 |
81 | 3. Quit virtual environment
82 | .\venv\Scripts\deactivate
83 | ```
84 |
85 | ## Build Binary Distribution
86 |
87 | You can also build tracecat into executables, so that it can simply run without any installations.
88 |
89 | Build Windows release
90 |
91 | ```
92 | .\build\build_win.bat (run on Windows)
93 | ```
94 |
95 | Build Linux release
96 |
97 | ```
98 | source ./build/build_linux.sh (run on Linux)
99 | ```
100 |
101 | Build Mac release
102 |
103 | ```
104 | source ./build/build_mac.sh (run on MacOS)
105 | ```
106 |
107 | Then you can get distribution package from:
108 |
109 | ```
110 | ./build/dist/tracecat.zip
111 | ```
112 |
113 | ## User Guide
114 |
115 | See docs/user_guide
116 |
117 | ## Contact
118 |
119 | Author: Cyrus Huang
120 |
121 | Github:
122 |
--------------------------------------------------------------------------------
/_config.yml:
--------------------------------------------------------------------------------
1 | title: Tracecat
2 | description: A general kernel trace analysis framework
3 | theme: jekyll-theme-minimal
4 |
--------------------------------------------------------------------------------
/build/build_linux.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if ! command -v ndk-build &> /dev/null
4 | then
5 | echo "ERROR: ndk-build not found, please install android NDK first."
6 | return
7 | fi
8 |
9 | if ! [ -f "./venv/bin/activate" ]; then
10 | echo "ERROR: Please run build from tracecat root path."
11 | return
12 | fi
13 |
14 | (cd demon; ndk-build clean; ndk-build)
15 |
16 | if ! [ -f "./demon/obj/local/arm64-v8a/tracecatd" ]; then
17 | echo "ERROR: tracecatd build failed, please check the errors."
18 | return
19 | fi
20 |
21 | source ./venv/bin/activate
22 |
23 | rm -rf ./build/build
24 |
25 | rm -rf ./build/dist
26 |
27 | pyinstaller ./build/spec/tracecat_mac_dir.spec --workpath="./build/build/" --distpath="./build/dist/"
28 |
29 | pyinstaller ./scripts/run_all.py --specpath="./build/build/" --workpath="./build/build/" --distpath="./build/dist/"
30 |
31 | cp -rf ./build/dist/run_all/* ./build/dist/tracecat/
32 |
33 | tar -zcvf ./build/dist/tracecat.tar.gz -C ./build/dist/ tracecat/
34 |
35 | deactivate
--------------------------------------------------------------------------------
/build/build_mac.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if ! command -v ndk-build &> /dev/null
4 | then
5 | echo "ERROR: ndk-build not found, please install android NDK first."
6 | return
7 | fi
8 |
9 | if ! [ -f "./venv/bin/activate" ]; then
10 | echo "ERROR: Please run build from tracecat root path."
11 | return
12 | fi
13 |
14 | (cd demon; ndk-build clean; ndk-build)
15 |
16 | if ! [ -f "./demon/obj/local/arm64-v8a/tracecatd" ]; then
17 | echo "ERROR: tracecatd build failed, please check the errors."
18 | return
19 | fi
20 |
21 | source ./venv/bin/activate
22 |
23 | rm -rf ./build/build
24 |
25 | rm -rf ./build/dist
26 |
27 | pyinstaller ./build/spec/tracecat_mac_dir.spec --workpath="./build/build/" --distpath="./build/dist/"
28 |
29 | pyinstaller ./scripts/run_all.py --specpath="./build/build/" --workpath="./build/build/" --distpath="./build/dist/"
30 |
31 | codesign -f -s "Apple Development: archman@126.com (AQ7V4926SD)" ./build/dist/tracecat/tracecat
32 |
33 | codesign -f -s "Apple Development: archman@126.com (AQ7V4926SD)" ./build/dist/tracecat/Python3
34 |
35 | codesign -f -s "Apple Development: archman@126.com (AQ7V4926SD)" ./build/dist/run_all/run_all
36 |
37 | codesign -f -s "Apple Development: archman@126.com (AQ7V4926SD)" ./build/dist/run_all/Python3
38 |
39 | cp -rf ./build/dist/run_all/* ./build/dist/tracecat/
40 |
41 | tar -zcvf ./build/dist/tracecat.tar.gz -C ./build/dist/ tracecat/
42 |
43 | deactivate
--------------------------------------------------------------------------------
/build/build_win.bat:
--------------------------------------------------------------------------------
1 | WHERE ndk-build
2 | @ IF %ERRORLEVEL% NEQ 0 (
3 | ECHO ERROR: ndk-build not found, please install android NDK first.
4 | EXIT /B 2
5 | )
6 |
7 | @ IF NOT EXIST ".\venv\Scripts\activate" (
8 | ECHO ERROR: Please run build from tracecat root path.
9 | EXIT /B
10 | )
11 |
12 | cd demon
13 |
14 | call ndk-build clean
15 |
16 | call ndk-build
17 |
18 | cd ..
19 |
20 | @ IF NOT EXIST ".\demon\obj\local\arm64-v8a\tracecatd" (
21 | ECHO ERROR: tracecatd build failed, please check the errors.
22 | EXIT /B
23 | )
24 |
25 | call .\venv\Scripts\activate
26 |
27 | rd /s /q .\build\build\
28 |
29 | rd /s /q .\build\dist\
30 |
31 | pyinstaller .\build\spec\tracecat_win_dir.spec --workpath=".\\build\build\\" --distpath=".\\build\dist\\"
32 |
33 | pyinstaller .\scripts\run_all.py --specpath=".\\build\\build\\" --workpath=".\\build\\build\\" --distpath=".\\build\\dist\\"
34 |
35 | xcopy /s /y .\build\dist\run_all .\build\dist\tracecat
36 |
37 | tar -acvf .\build\dist\tracecat.zip -C .\build\dist\ tracecat
38 |
39 | call deactivate
--------------------------------------------------------------------------------
/build/spec/tracecat_linux_dir.spec:
--------------------------------------------------------------------------------
1 | # -*- mode: python ; coding: utf-8 -*-
2 |
3 | block_cipher = None
4 |
5 |
6 | a = Analysis(['../../tracecat.py'],
7 | pathex=['../../'],
8 | binaries=[],
9 | datas=[
10 | ('../../venv/lib/python*/site-packages/perfetto/trace_processor', './perfetto/trace_processor'),
11 | ('../../libs/', './libs'),
12 | ('../../demon/obj/local/arm64-v8a/tracecatd', './demon/obj/local/arm64-v8a/'),
13 | ('../../configs', './configs')
14 | ],
15 | hiddenimports=[],
16 | hookspath=[],
17 | runtime_hooks=[],
18 | excludes=[],
19 | win_no_prefer_redirects=False,
20 | win_private_assemblies=False,
21 | cipher=block_cipher,
22 | noarchive=False)
23 | pyz = PYZ(a.pure, a.zipped_data,
24 | cipher=block_cipher)
25 | exe = EXE(pyz,
26 | a.scripts,
27 | [],
28 | exclude_binaries=True,
29 | name='tracecat',
30 | debug=False,
31 | bootloader_ignore_signals=False,
32 | strip=False,
33 | upx=True,
34 | console=True )
35 | coll = COLLECT(exe,
36 | a.binaries,
37 | a.zipfiles,
38 | a.datas,
39 | strip=False,
40 | upx=True,
41 | upx_exclude=[],
42 | name='tracecat')
43 |
--------------------------------------------------------------------------------
/build/spec/tracecat_linux_one.spec:
--------------------------------------------------------------------------------
1 | # -*- mode: python ; coding: utf-8 -*-
2 |
3 | block_cipher = None
4 |
5 |
6 | a = Analysis(['../../tracecat.py'],
7 | pathex=['../../'],
8 | binaries=[],
9 | datas=[
10 | ('../../venv/lib/python*/site-packages/perfetto/trace_processor', './perfetto/trace_processor'),
11 | ('../../libs/', './libs'),
12 | ('../../demon/obj/local/arm64-v8a/tracecatd', './demon/obj/local/arm64-v8a/'),
13 | ('../../configs', './configs')
14 | ],
15 | hiddenimports=[],
16 | hookspath=[],
17 | runtime_hooks=[],
18 | excludes=[],
19 | win_no_prefer_redirects=False,
20 | win_private_assemblies=False,
21 | cipher=block_cipher,
22 | noarchive=False)
23 | pyz = PYZ(a.pure, a.zipped_data,
24 | cipher=block_cipher)
25 | exe = EXE(pyz,
26 | a.scripts,
27 | a.binaries,
28 | a.zipfiles,
29 | a.datas,
30 | [],
31 | name='tracecat',
32 | debug=False,
33 | bootloader_ignore_signals=False,
34 | strip=False,
35 | upx=True,
36 | upx_exclude=[],
37 | runtime_tmpdir=None,
38 | console=True )
39 |
--------------------------------------------------------------------------------
/build/spec/tracecat_mac_dir.spec:
--------------------------------------------------------------------------------
1 | # -*- mode: python ; coding: utf-8 -*-
2 |
3 | block_cipher = None
4 |
5 |
6 | a = Analysis(['../../tracecat.py'],
7 | pathex=['../../'],
8 | binaries=[],
9 | datas=[
10 | ('../../venv/lib/python*/site-packages/perfetto/trace_processor', './perfetto/trace_processor'),
11 | ('../../libs/', './libs'),
12 | ('../../demon/obj/local/arm64-v8a/tracecatd', './demon/obj/local/arm64-v8a/'),
13 | ('../../configs', './configs')
14 | ],
15 | hiddenimports=['cmath'],
16 | hookspath=[],
17 | runtime_hooks=[],
18 | excludes=[],
19 | win_no_prefer_redirects=False,
20 | win_private_assemblies=False,
21 | cipher=block_cipher,
22 | noarchive=False)
23 | pyz = PYZ(a.pure, a.zipped_data,
24 | cipher=block_cipher)
25 | exe = EXE(pyz,
26 | a.scripts,
27 | [],
28 | exclude_binaries=True,
29 | name='tracecat',
30 | debug=False,
31 | bootloader_ignore_signals=False,
32 | strip=False,
33 | upx=True,
34 | console=True )
35 | coll = COLLECT(exe,
36 | a.binaries,
37 | a.zipfiles,
38 | a.datas,
39 | strip=False,
40 | upx=True,
41 | upx_exclude=[],
42 | name='tracecat')
43 |
--------------------------------------------------------------------------------
/build/spec/tracecat_mac_one.spec:
--------------------------------------------------------------------------------
1 | # -*- mode: python ; coding: utf-8 -*-
2 |
3 | block_cipher = None
4 |
5 |
6 | a = Analysis(['../../tracecat.py'],
7 | pathex=['../../'],
8 | binaries=[],
9 | datas=[
10 | ('../../venv/lib/python*/site-packages/perfetto/trace_processor', './perfetto/trace_processor'),
11 | ('../../libs/', './libs'),
12 | ('../../demon/obj/local/arm64-v8a/tracecatd', './demon/obj/local/arm64-v8a/'),
13 | ('../../configs', './configs')
14 | ],
15 | hiddenimports=['cmath'],
16 | hookspath=[],
17 | runtime_hooks=[],
18 | excludes=[],
19 | win_no_prefer_redirects=False,
20 | win_private_assemblies=False,
21 | cipher=block_cipher,
22 | noarchive=False)
23 | pyz = PYZ(a.pure, a.zipped_data,
24 | cipher=block_cipher)
25 | exe = EXE(pyz,
26 | a.scripts,
27 | a.binaries,
28 | a.zipfiles,
29 | a.datas,
30 | [],
31 | name='tracecat',
32 | debug=False,
33 | bootloader_ignore_signals=False,
34 | strip=False,
35 | upx=True,
36 | upx_exclude=[],
37 | runtime_tmpdir=None,
38 | console=True )
39 |
--------------------------------------------------------------------------------
/build/spec/tracecat_win_dir.spec:
--------------------------------------------------------------------------------
1 | # -*- mode: python ; coding: utf-8 -*-
2 |
3 | block_cipher = None
4 |
5 |
6 | a = Analysis(['..\\..\\tracecat.py'],
7 | pathex=['..\\..\\'],
8 | binaries=[],
9 | datas=[
10 | ('../../venv/lib/site-packages/perfetto/trace_processor', './perfetto/trace_processor'),
11 | ('../../libs/', './libs'),
12 | ('../../demon/obj/local/arm64-v8a/tracecatd', './demon/obj/local/arm64-v8a/'),
13 | ('../../configs', './configs')
14 | ],
15 | hiddenimports=[],
16 | hookspath=[],
17 | runtime_hooks=[],
18 | excludes=[],
19 | win_no_prefer_redirects=False,
20 | win_private_assemblies=False,
21 | cipher=block_cipher,
22 | noarchive=False)
23 | pyz = PYZ(a.pure, a.zipped_data,
24 | cipher=block_cipher)
25 | exe = EXE(pyz,
26 | a.scripts,
27 | [],
28 | exclude_binaries=True,
29 | name='tracecat',
30 | debug=False,
31 | bootloader_ignore_signals=False,
32 | strip=False,
33 | upx=True,
34 | console=True )
35 | coll = COLLECT(exe,
36 | a.binaries,
37 | a.zipfiles,
38 | a.datas,
39 | strip=False,
40 | upx=True,
41 | upx_exclude=[],
42 | name='tracecat')
43 |
--------------------------------------------------------------------------------
/build/spec/tracecat_win_one.spec:
--------------------------------------------------------------------------------
1 | # -*- mode: python ; coding: utf-8 -*-
2 |
3 | block_cipher = None
4 |
5 |
6 | a = Analysis(['..\\..\\tracecat.py'],
7 | pathex=['..\\..\\'],
8 | binaries=[],
9 | datas=[
10 | ('../../venv/lib/site-packages/perfetto/trace_processor', './perfetto/trace_processor'),
11 | ('../../libs/', './libs'),
12 | ('../../demon/obj/local/arm64-v8a/tracecatd', './demon/obj/local/arm64-v8a/'),
13 | ('../../configs', './configs')
14 | ],
15 | hiddenimports=[],
16 | hookspath=[],
17 | runtime_hooks=[],
18 | excludes=[],
19 | win_no_prefer_redirects=False,
20 | win_private_assemblies=False,
21 | cipher=block_cipher,
22 | noarchive=False)
23 | pyz = PYZ(a.pure, a.zipped_data,
24 | cipher=block_cipher)
25 | exe = EXE(pyz,
26 | a.scripts,
27 | a.binaries,
28 | a.zipfiles,
29 | a.datas,
30 | [],
31 | name='tracecat',
32 | debug=False,
33 | bootloader_ignore_signals=False,
34 | strip=False,
35 | upx=True,
36 | upx_exclude=[],
37 | runtime_tmpdir=None,
38 | console=True )
39 |
--------------------------------------------------------------------------------
/configs/perfetto/perfetto.conf:
--------------------------------------------------------------------------------
1 | buffers: {
2 | size_kb: 63488
3 | fill_policy: RING_BUFFER
4 | }
5 | buffers: {
6 | size_kb: 2048
7 | fill_policy: RING_BUFFER
8 | }
9 | flush_period_ms: 30000
10 | incremental_state_config {
11 | clear_period_ms: 5000
12 | }
13 |
--------------------------------------------------------------------------------
/demon/Makefile:
--------------------------------------------------------------------------------
1 | tracecatd: llist.o tracecatd.c
2 | gcc -Wall -Wextra llist.o tracecatd.c -o tracecatd
3 |
4 | llist.o: includes/llist.h includes/llist.c
5 | gcc -Wall -Wextra -fPIC includes/llist.h -c includes/llist.c
6 |
7 | clean:
8 | rm -rf *.o *.gch tracecatd
9 |
--------------------------------------------------------------------------------
/demon/includes/llist.c:
--------------------------------------------------------------------------------
1 | /* llist.c
2 | * Generic Linked List implementation
3 | */
4 |
5 | #include
6 | #include
7 | #include "llist.h"
8 |
9 | llist *llist_create(void *new_data)
10 | {
11 | struct node *new_node;
12 |
13 | llist *new_list = (llist *)malloc(sizeof (llist));
14 | *new_list = (struct node *)malloc(sizeof (struct node));
15 |
16 | new_node = *new_list;
17 | new_node->data = new_data;
18 | new_node->next = NULL;
19 | return new_list;
20 | }
21 |
22 | void llist_free(llist *list)
23 | {
24 | struct node *curr = *list;
25 | struct node *next;
26 |
27 | while (curr != NULL) {
28 | next = curr->next;
29 | free(curr);
30 | curr = next;
31 | }
32 |
33 | free(list);
34 | }
35 |
36 | // Returns 0 on failure
37 | int llist_add_inorder(void *data, llist *list,
38 | int (*comp)(void *, void *))
39 | {
40 | struct node *new_node;
41 | struct node *curr;
42 | struct node *prev = NULL;
43 |
44 | if (list == NULL || *list == NULL) {
45 | fprintf(stderr, "llist_add_inorder: list is null\n");
46 | return 0;
47 | }
48 |
49 | curr = *list;
50 | if (curr->data == NULL) {
51 | curr->data = data;
52 | return 1;
53 | }
54 |
55 | new_node = (struct node *)malloc(sizeof (struct node));
56 | new_node->data = data;
57 |
58 | // Find spot in linked list to insert new node
59 | while (curr != NULL && curr->data != NULL && comp(curr->data, data) < 0) {
60 | prev = curr;
61 | curr = curr->next;
62 | }
63 | new_node->next = curr;
64 |
65 | if (prev == NULL)
66 | *list = new_node;
67 | else
68 | prev->next = new_node;
69 |
70 | return 1;
71 | }
72 |
73 | void llist_push(llist *list, void *data)
74 | {
75 | struct node *head;
76 | struct node *new_node;
77 | if (list == NULL || *list == NULL) {
78 | fprintf(stderr, "llist_add_inorder: list is null\n");
79 | }
80 |
81 | head = *list;
82 |
83 | // Head is empty node
84 | if (head->data == NULL)
85 | head->data = data;
86 |
87 | // Head is not empty, add new node to front
88 | else {
89 | new_node = malloc(sizeof (struct node));
90 | new_node->data = data;
91 | new_node->next = head;
92 | *list = new_node;
93 | }
94 | }
95 |
96 | void *llist_pop(llist *list)
97 | {
98 | void *popped_data;
99 | struct node *head = *list;
100 |
101 | if (list == NULL || head->data == NULL)
102 | return NULL;
103 |
104 | popped_data = head->data;
105 | *list = head->next;
106 |
107 | free(head);
108 |
109 | return popped_data;
110 | }
111 |
112 | void llist_print(llist *list, void (*print)(void *))
113 | {
114 | struct node *curr = *list;
115 | while (curr != NULL) {
116 | print(curr->data);
117 | printf(" ");
118 | curr = curr->next;
119 | }
120 | putchar('\n');
121 | }
122 |
--------------------------------------------------------------------------------
/demon/includes/llist.h:
--------------------------------------------------------------------------------
1 | /* llist.h
2 | * Generic Linked List
3 | */
4 |
5 | struct node {
6 | void *data;
7 | struct node *next;
8 | };
9 |
10 | typedef struct node * llist;
11 |
12 | /* llist_create: Create a linked list */
13 | llist *llist_create(void *data);
14 |
15 | /* llist_free: Free a linked list */
16 | void llist_free(llist *list);
17 |
18 | /* llist_add_inorder: Add to sorted linked list */
19 | int llist_add_inorder(void *data, llist *list,
20 | int (*comp)(void *, void *));
21 |
22 | /* llist_push: Add to head of list */
23 | void llist_push(llist *list, void *data);
24 |
25 | /* llist_pop: remove and return head of linked list */
26 | void *llist_pop(llist *list);
27 |
28 | /* llist_print: print linked list */
29 | void llist_print(llist *list, void (*print)(void *data));
30 |
--------------------------------------------------------------------------------
/demon/jni/Android.mk:
--------------------------------------------------------------------------------
1 | include $(CLEAR_VARS)
2 | LOCAL_PATH := .
3 | LOCAL_MODULE := tracecatd
4 | LOCAL_SRC_FILES := includes/llist.c tracecatd.c
5 | include $(BUILD_EXECUTABLE)
--------------------------------------------------------------------------------
/demon/jni/Application.mk:
--------------------------------------------------------------------------------
1 | APP_ABI := arm64-v8a
2 | APP_PLATFORM := android-21
--------------------------------------------------------------------------------
/demon/tracecatd.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include "includes/llist.h"
7 |
8 | #define LINE_MAX_SIZE 255
9 | #define METRICS_NAME_SIZE 64
10 | #define OUTPUT_BUF_SIZE 4096
11 | #define EXEC_SEPERATOR "___NeXt___"
12 |
13 | enum metrics_type {
14 | NODES_METRICS,
15 | QUERY_METRICS
16 | };
17 |
18 | struct metrics_struct {
19 | char name[METRICS_NAME_SIZE];
20 | enum metrics_type type;
21 | llist *nodes;
22 | };
23 |
24 | struct node_struct {
25 | FILE* fd;
26 | char data[LINE_MAX_SIZE];
27 | };
28 |
29 | llist* list_create()
30 | {
31 | return llist_create(NULL);
32 | }
33 |
34 | int always_false(void* a, void* b)
35 | {
36 | void* tmp;
37 |
38 | tmp = a;
39 | tmp = b;
40 |
41 | return -1;
42 | }
43 |
44 | void list_push_back(llist *list, void *data)
45 | {
46 | llist_add_inorder(data, list, always_false);
47 | }
48 |
49 | char* get_metrics_type_str(enum metrics_type type)
50 | {
51 | switch (type) {
52 | case NODES_METRICS:
53 | return "NODES";
54 | case QUERY_METRICS:
55 | return "QUERY";
56 | }
57 |
58 | return NULL;
59 | }
60 |
61 | void get_current_timestamp(char* buf)
62 | {
63 | struct timespec tp;
64 |
65 | memset(&tp, 0, sizeof(tp));
66 |
67 | clock_gettime(CLOCK_BOOTTIME, &tp);
68 |
69 | sprintf(buf, "%ld%09ld", tp.tv_sec, tp.tv_nsec);
70 | }
71 |
72 | int main(int argc, char *argv[])
73 | {
74 | if (argc < 5) {
75 | printf("ERROR: Missing arguments.\n");
76 | return 0;
77 | }
78 |
79 | char *conf_path = argv[1];
80 | char *data_path = argv[2];
81 | int period = atoi(argv[3]);
82 | int duration = atoi(argv[4]);
83 |
84 | FILE *conf_file;
85 | FILE *data_file;
86 |
87 | char line_buf[LINE_MAX_SIZE];
88 | char output_buf[OUTPUT_BUF_SIZE];
89 |
90 | struct metrics_struct *metrics = NULL;
91 | struct node_struct *node = NULL;
92 | llist *metrics_list = list_create();
93 |
94 | conf_file = fopen(conf_path, "r");
95 |
96 | while (1) {
97 |
98 | memset(line_buf, 0, sizeof(line_buf));
99 |
100 | if (!fgets(line_buf, LINE_MAX_SIZE, conf_file))
101 | break;
102 |
103 | line_buf[strcspn(line_buf, "\n")] = 0;
104 | line_buf[strcspn(line_buf, "\r")] = 0;
105 |
106 | if (line_buf[0] == 'N') {
107 |
108 | if (metrics)
109 | list_push_back(metrics_list, (void *)metrics);
110 |
111 | metrics = malloc(sizeof(struct metrics_struct));
112 |
113 | memset(metrics, 0, sizeof(struct metrics_struct));
114 |
115 | strncpy(metrics->name, &line_buf[7], METRICS_NAME_SIZE - 1);
116 |
117 | metrics->type = NODES_METRICS;
118 |
119 | } else if (line_buf[0] == 'Q') {
120 |
121 | if (metrics)
122 | list_push_back(metrics_list, (void *)metrics);
123 |
124 | metrics = malloc(sizeof(struct metrics_struct));
125 |
126 | memset(metrics, 0, sizeof(struct metrics_struct));
127 |
128 | strncpy(metrics->name, &line_buf[7], METRICS_NAME_SIZE - 1);
129 |
130 | metrics->type = QUERY_METRICS;
131 |
132 | } else if (line_buf[0] == ' ') {
133 |
134 | if (!metrics) {
135 | printf("ERROR: config format error.");
136 | return 0;
137 | }
138 |
139 | if (!metrics->nodes)
140 | metrics->nodes = list_create();
141 |
142 | node = malloc(sizeof(struct node_struct));
143 |
144 | memset(node, 0, sizeof(struct node_struct));
145 |
146 | strncpy(node->data, line_buf + strspn(line_buf, " \t"), LINE_MAX_SIZE);
147 |
148 | list_push_back(metrics->nodes, (void *)node);
149 |
150 | } else {
151 | printf("ERROR: config format error.");
152 | return 0;
153 | }
154 | }
155 |
156 | if (metrics) {
157 | list_push_back(metrics_list, (void *)metrics);
158 | } else {
159 | printf("Success!\n");
160 | return 0;
161 | }
162 |
163 | fclose(conf_file);
164 |
165 | data_file = fopen(data_path, "a");
166 |
167 | if (!data_file) {
168 | printf("ERROR: Data file create failed.\n");
169 | return 0;
170 | }
171 |
172 | struct node* p_metrics = NULL;
173 |
174 | double time_spent = 0;
175 | double time_sleep = 0;
176 |
177 | char curr_time[32];
178 |
179 | int now_time = (int)time(NULL);
180 |
181 | int end_time = now_time + duration;
182 |
183 | while (now_time <= end_time) {
184 |
185 | clock_t begin = clock();
186 |
187 | for (p_metrics = *metrics_list; p_metrics != NULL; p_metrics = p_metrics->next) {
188 |
189 | metrics = (struct metrics_struct*)p_metrics->data;
190 |
191 | get_current_timestamp(curr_time);
192 |
193 | fprintf(data_file, "%s: %s\n%s\n", get_metrics_type_str(metrics->type), metrics->name, curr_time);
194 |
195 | struct node* p_node = NULL;
196 |
197 | for (p_node = *metrics->nodes; p_node != NULL; p_node = p_node->next) {
198 |
199 | node = (struct node_struct*)p_node->data;
200 |
201 | if (!node->fd) {
202 | node->fd = fopen(node->data, "r");
203 | if (!node->fd) {
204 | printf("ERROR: Node not found: %s.\n", node->data);
205 | return 0;
206 | }
207 | }
208 |
209 | memset(output_buf, 0, sizeof(output_buf));
210 |
211 | fread(output_buf, OUTPUT_BUF_SIZE, 1, node->fd);
212 |
213 | // some file node not support fseek
214 | if (!strlen(output_buf)) {
215 | fclose(node->fd);
216 | node->fd = fopen(node->data, "r");
217 | fread(output_buf, OUTPUT_BUF_SIZE, 1, node->fd);
218 | }
219 |
220 | fseek(node->fd, 0, SEEK_SET);
221 |
222 | fprintf(data_file, "%s", output_buf);
223 | }
224 |
225 | fprintf(data_file, "%s\n", EXEC_SEPERATOR);
226 | }
227 |
228 | clock_t end = clock();
229 |
230 | time_spent = (double)(end - begin) / CLOCKS_PER_SEC * 1000; // ms
231 |
232 | time_sleep = ((double)period - time_spent) * 1000; // us
233 |
234 | if (time_sleep > 0)
235 | usleep((unsigned int)time_sleep);
236 |
237 | now_time = (int)time(NULL);
238 | }
239 |
240 | fclose(data_file);
241 |
242 | printf("Success!\n");
243 |
244 | return 0;
245 | }
--------------------------------------------------------------------------------
/docs/user_guide.md:
--------------------------------------------------------------------------------
1 | # User Guide
2 |
3 | Tracecat是一个模块化设计的通用Trace解析框架。
4 |
5 | 支持从Systrace、Perfetto、Ftrace、Simpleperf、Dumpsys、Sysfs、Procfs、iPhone instruments、SnapdragonProfiler等数据源并行采集、解析数据,并生成Excel及各类分析图表。
6 |
7 | 你可以基于它轻松扩展Trace解析功能,也可以直接将他作为工具使用。
8 |
9 | ## 快速上手
10 |
11 | tracecat由trace, parse, chart三个命令和各类数据module(模块)组成:
12 |
13 | - trace:从手机抓取原始数据
14 | - parse:解析出对应的数据
15 | - chart:生成图表
16 |
17 | 每个module就是一种类型的数据。所有原始数据和生成的数据,都会保存在./runs/文件夹下。
18 |
19 | tracecat下载后,解压即可直接运行,无需任何额外配置。
20 |
21 | 手机连接,确定adb连接ok,命令行进入tracecat目录后,可以通过下面一组常用命令5分钟快速上手。
22 |
23 | ```
24 | tracecat
25 | ```
26 |
27 | 会直接显示tracecat的介绍和帮助,支持的模块
28 |
29 | ```
30 | tracecat -h cpu_load2
31 | ```
32 |
33 | 查看cpu_load2模块的详细帮助和参数
34 |
35 | ```
36 | tracecat "trace:cpu_load2" test -d 10s
37 | ```
38 |
39 | 从手机抓取cpu占用率(从proc下采集数据),抓取10s,数据存入./runs/test文件夹下(然后你可以去文件夹拿到原始trace文件)
40 |
41 | ```
42 | tracecat "parse:cpu_load2" test
43 | ```
44 |
45 | 从抓取的数据中解析出cpu占用率,解析后会在./runs/modules文件夹下生成cpu_load2.pkl和cpu_load2.xls两个文件
46 |
47 | 你可以用excel直接打开.xls文件来制作数据,也可以用python加载.pkl文件,做二次处理(见:更多功能 / 使用pkl文件数据)
48 |
49 | ```
50 | tracecat "chart:cpu_load2" test
51 | ```
52 |
53 | 如果你需要将数据以图表形式呈现,执行这条命令,会自动生成图表,可以放大缩小拖拽。
54 |
55 | ```
56 | tracecat "chart:cpu_load2(0-3,4-6,7)" test
57 | ```
58 |
59 | 你也可以在模块中添加参数,比如显示cpu 0-3, 4-6和7的平均占用率
60 |
61 | ```
62 | tracecat "chart:cpu_load2" test --export 1024,768
63 | ```
64 |
65 | 将图表导出成png文件,文件保存在./runs/modules/cpu_load2.png
66 |
67 | ## 更多功能
68 |
69 | 上面的例子中只演示了cpu_load2这个模块,tracecat的每种数据都实现为一个独立模块,用tracecat -h可以看到所有模块
70 |
71 | 每个模块,在parse和chart命令中,都支持传入参数,可以用tracecat -h \来查看对应module的参数
72 |
73 | ```
74 | tracecat -h cpu_load
75 | ```
76 |
77 | tracecat支持多数据并行抓取,你可以在命令中添加所有你想抓取的模块
78 |
79 | ```
80 | tracecat "trace:cpu_load,cpu_load_stat,cpu_freq,cpu_freq_stat,ddr_freq,ddr_freq_stat" test -d 10s
81 | ```
82 |
83 | 抓取后全部解析,其中cpu占用率以10ms的粒度统计
84 |
85 | ```
86 | tracecat "parse:cpu_load(10ms),cpu_load_stat,cpu_freq,cpu_freq_stat,ddr_freq,ddr_freq_stat" test
87 | ```
88 |
89 | 可以多个图表叠加显示
90 |
91 | ```
92 | tracecat "chart:cpu_load(0-3,4-6,7),cpu_freq(0,4,7)" test
93 | ```
94 |
95 | 利用&同时启动多个图表
96 |
97 | ```
98 | tracecat "chart:cpu_load" test &
99 | tracecat "chart:cpu_freq" test &
100 | ```
101 |
102 | 利用trace功能抓perfetto
103 |
104 | ```
105 | tracecat "cpu_load" test -d 10s
106 | ```
107 |
108 | 抓取结束后,可以直接将./runs/test/perfetto/perfetto.trace拖入打开
109 |
110 | 为了抓取更多额外trace,可以手动配置perfetto tracing config:
111 |
112 | ```
113 | vim ./configs/perfetto/perfetto.conf
114 | ```
115 |
116 | 这些设置会自动与tracecat默认抓取设置合并。
117 |
118 | tracecat在大量抓取场景数据后,支持批量解析和批量生成图片
119 |
120 | ```
121 | run_all "parse:cpu_load,cpu_freq"
122 | run_all "chart:cpu_laod,cpu_freq"
123 | ```
124 |
125 | 这个命令会将runs/所有文件夹解析并生成图片文件
126 |
127 | 有些机器在adb shell后需要手动执行su,对于这些机器,抓取时需要添加--execute-su参数
128 |
129 | ```
130 | tracecat "trace:cpu_load" test -d 10s --execute-su
131 | ```
132 |
133 | 使用在线采样或离线采样,目前默认为离线方式,资源占用更少,但是作为备选方案,仍然支持在线采样
134 |
135 | ```
136 | tracecat "trace:cpu_load2" test -d 10s --sampling-mode online
137 | ```
138 |
139 | 对于以采样方式获取的数据,可以指定采样频率
140 |
141 | ```
142 | tracecat "trace:cpu_load2,cpu_freq2" test -d 10s -s 10ms # 所有采样模块均以10ms粒度采样
143 | tracecat "trace:cpu_load2(50ms),cpu_freq2(10ms)" test -d 10s # cpu_load2以50ms采样,cpu_freq2以10ms采样
144 | ```
145 |
146 | ## 模块详解
147 |
148 | 下面样例中省略了文件夹名字、抓取时间等参数
149 |
150 | **cpu_load**
151 |
152 | 从perfetto的trace中解析CPU占用率
153 |
154 | ```
155 | tracecat "trace:cpu_load" # 抓取perfetto trace
156 | tracecat "parse:cpu_load" # 以1s粒度解析占用率
157 | tracecat "parse:cpu_load(100ms)" # 以100ms粒度解析占用率
158 | tracecat "chart:cpu_load" # 显示各cpu占用率
159 | tracecat "chart:cpu_load(0)" # 只显示cpu 0的占用率
160 | tracecat "chart:cpu_load(0-4,5-6,7)" # 显示平均占用率
161 | ```
162 |
163 | \* 不建议长时间抓取,因为生成的trace文件可能过大,长时间抓取请使用cpu_load2
164 |
165 | **cpu_load2**
166 |
167 | 从procfs采样CPU占用率
168 |
169 | ```
170 | tracecat "trace:cpu_load2" # 以500ms粒度采样(默认)
171 | tracecat "trace:cpu_load2(100ms)" # 以100ms粒度采样(模块设置)
172 | tracecat "trace:cpu_load2" -s 100ms # 以100ms粒度采样(全局设置)
173 | tracecat "parse:cpu_load2" # 解析
174 | tracecat "chart:cpu_load2" # 显示各cpu占用率
175 | tracecat "chart:cpu_load2(0)" # 只显示cpu 0的占用率
176 | tracecat "chart:cpu_load2(0-4,5-6,7)" # 显示平均占用率
177 | ```
178 |
179 | **cpu_load_summary**
180 |
181 | 统计该场景cpu占用率的最大、最小、平均值
182 |
183 | ```
184 | tracecat "trace:cpu_load" # 先要抓取cpu_load或者cpu_load2
185 | tracecat "parse:cpu_load,cpu_load_summary" # 从cpu_load或cpu_load2的解析结果中计算统计结果
186 | tracecat "chart:cpu_load_summary" # 显示柱状图
187 | ```
188 |
189 | **app_load**
190 |
191 | 某个进程的CPU占用率
192 |
193 | ```
194 | tracecat "trace:app_load" # 抓取perfetto trace
195 | tracecat "parse:app_load" # 解析app_load
196 | tracecat "parse:app_load(100ms)" # 以100ms粒度解析app_load
197 | tracecat "chart:app_load" # 显示所有process
198 | tracecat "chart:app_load(1532)" # 显示所有pid为1532的进程各核占用率
199 | tracecat "chart:app_load(pubg)" # 显示名字包含pubg的进程各核占用率
200 | ```
201 |
202 | \* 不建议长时间抓取,因为生成的trace文件可能过大
203 |
204 | **cpu_idle**
205 |
206 | 从perfetto的trace中解析CPU idle state (C-STATE)
207 |
208 | ```
209 | tracecat "trace:cpu_idle" # 抓取perfetto trace
210 | tracecat "parse:cpu_idle" # 解析cpu_idle
211 | tracecat "chart:cpu_idle" # 显示所有cpu的idle state曲线
212 | tracecat "chart:cpu_idle(0)" # 显示cpu 0的idle state曲线
213 | ```
214 |
215 | **cpu_freq**
216 |
217 | 从perfetto的trace中解析CPU频率
218 |
219 | ```
220 | tracecat "trace:cpu_freq" # 抓取perfetto trace
221 | tracecat "parse:cpu_freq" # 解析cpu_freq
222 | tracecat "chart:cpu_freq" # 显示所有cpu的频率曲线
223 | tracecat "chart:cpu_freq(0)" # 只显示cpu 0的频率曲线
224 | tracecat "chart:cpu_freq(0,4,7)" # 显示cpu0,4,7的频率曲线(cluster)
225 | ```
226 |
227 | **cpu_freq_stat**
228 |
229 | 统计cpu各频点及C-STATE运行时间占比(基于cpu_freq, cpu_idle)
230 |
231 | ```
232 | tracecat "trace:cpu_freq,cpu_idle,cpu_freq_stat" # 抓取
233 | tracecat "parse:cpu_freq,cpu_idle,cpu_freq_stat" # 解析
234 | tracecat "chart:cpu_freq_stat" # 生成柱状图
235 | ```
236 |
237 | \* 如果未抓取cpu_idle,则只解析频点的时间占比,不包含C-STATE信息
238 |
239 | **cpu_freq2**
240 |
241 | 从sysfs采样CPU频率
242 |
243 | ```
244 | tracecat "trace:cpu_freq2" # 以500ms粒度采样(默认)
245 | tracecat "trace:cpu_freq2(100ms)" # 以100ms粒度采样(模块设置)
246 | tracecat "trace:cpu_freq2" -s 100ms # 以100ms粒度采样(全局设置)
247 | tracecat "parse:cpu_freq2" # 解析
248 | tracecat "chart:cpu_freq2" # 显示所有cpu的频率曲线
249 | tracecat "chart:cpu_freq2(0)" # 只显示cpu 0的频率曲线
250 | tracecat "chart:cpu_freq2(0,4,7)" # 显示cpu0,4,7的频率曲线(cluster)
251 | ```
252 |
253 | **cpu_freq_stat2**
254 |
255 | 统计cpu各频点运行时间占比(基于cpu_freq2)
256 |
257 | ```
258 | tracecat "trace:cpu_freq2,cpu_freq_stat2" # 抓取
259 | tracecat "parse:cpu_freq2,cpu_freq_stat2" # 解析
260 | tracecat "chart:cpu_freq_stat2" # 生成柱状图
261 | ```
262 |
263 | **gpu_freq**
264 |
265 | 从sysfs采样GPU频率
266 |
267 | ```
268 | tracecat "trace:gpu_freq" # 以500ms粒度采样(默认)
269 | tracecat "trace:gpu_freq(100ms)" # 以100ms粒度采样(模块设置)
270 | tracecat "trace:gpu_freq" -s 100ms # 以100ms粒度采样(全局设置)
271 | tracecat "parse:gpu_freq" # 解析
272 | tracecat "chart:gpu_freq" # 显示GPU频率曲线
273 | ```
274 |
275 | **gpu_freq_stat**
276 |
277 | 统计gpu各频点运行时间占比(基于gpu_freq)
278 |
279 | ```
280 | tracecat "trace:gpu_freq,gpu_freq_stat" # 抓取
281 | tracecat "parse:gpu_freq,gpu_freq_stat" # 解析
282 | tracecat "chart:gpu_freq_stat" # 生成柱状图
283 | ```
284 |
285 | **ddr_freq**
286 |
287 | 从sysfs采样GPU频率
288 |
289 | ```
290 | tracecat "trace:ddr_freq" # 以500ms粒度采样(默认)
291 | tracecat "trace:ddr_freq(100ms)" # 以100ms粒度采样(模块设置)
292 | tracecat "trace:ddr_freq" -s 100ms # 以100ms粒度采样(全局设置)
293 | tracecat "parse:ddr_freq" # 解析
294 | tracecat "chart:ddr_freq" # 显示GPU频率曲线
295 | ```
296 |
297 | **ddr_freq_stat**
298 |
299 | 统计ddr各频点运行时间占比(基于ddr_freq)
300 |
301 | ```
302 | tracecat "trace:ddr_freq,ddr_freq_stat" # 抓取
303 | tracecat "parse:ddr_freq,ddr_freq_stat" # 解析
304 | tracecat "chart:ddr_freq_stat" # 生成柱状图
305 | ```
306 |
307 | **ios_cpu_load**
308 |
309 | iPhone CPU占用率
310 |
311 | ```
312 | tracecat "trace:ios_cpu_load" # 抓取instruments trace
313 | tracecat "parse:ios_cpu_load" # 以1s粒度解析占用率
314 | tracecat "parse:ios_cpu_load(100ms)" # 以100ms粒度解析占用率
315 | tracecat "chart:ios_cpu_load" # 显示各cpu占用率
316 | tracecat "chart:ios_cpu_load(0)" # 只显示cpu 0的占用率
317 | tracecat "chart:ios_cpu_load(0-4,5-6,7)" # 显示平均占用率
318 | ```
319 |
320 | \* 需要在MacOS运行,需要安装xcode软件
321 |
322 | **ios_app_load**
323 |
324 | iPhone某个进程的CPU占用率
325 |
326 | ```
327 | tracecat "trace:ios_app_load" # 抓取instruments trace
328 | tracecat "parse:ios_app_load" # 解析app_load
329 | tracecat "parse:ios_app_load(100ms)" # 以100ms粒度解析app_load
330 | tracecat "chart:ios_app_load" # 显示所有process
331 | tracecat "chart:ios_app_load(1532)" # 显示所有pid为1532的进程各核占用率
332 | tracecat "chart:ios_app_load(pubg)" # 显示名字包含pubg的进程各核占用率
333 | ```
334 |
335 | \* 需要在MacOS运行,需要安装xcode软件
336 |
337 | **ios_cpu_freq**
338 |
339 | iPhone CPU频率(Hack方式,实验功能)
340 |
341 | \* 不建议使用
342 |
343 | **thermal_zone**
344 |
345 | 从sysfs采样thermal信息
346 |
347 | ```
348 | tracecat "trace:thermal_zone" # 以500ms粒度采样所有thermal节点(默认)
349 | tracecat "trace:thermal_zone(0,1,2)" -s 1s # 以1s粒度采样0,1,2三个zone(设置全局采样频率为1s)
350 | tracecat "trace:thermal_zone(0,1,2|1s)" # 以1s粒度采样0,1,2三个zone(设置模块采样频率为1s)
351 | tracecat "parse:thermal_zone" # 解析
352 | tracecat "chart:thermal_zone" # 显示所有thermal曲线
353 | tracecat "chart:thermal_zone(0,1,2)" # 显示0,1,2三个zone曲线
354 | ```
355 |
356 | \* 由于大部分手机thermal节点比较多,建议尽量降低采样频率(>500ms)
357 |
358 | **dsu_freq**
359 |
360 | 从sysfs采样DSU频率
361 |
362 | ```
363 | tracecat "trace:dsu_freq" # 以500ms粒度采样(默认)
364 | tracecat "trace:dsu_freq(100m)" # 以100ms粒度采样(模块设置)
365 | tracecat "trace:dsu_freq" -s 100ms # 以100ms粒度采样(全局设置)
366 | tracecat "parse:dsu_freq" # 解析
367 | tracecat "chart:dsu_freq" # 显示DSU频率曲线
368 | ```
369 |
370 | **simpleperf**
371 |
372 | 从simpleperf stat统计simpleperf events,支持全局采样和APP采样两种模式
373 |
374 | 全局采样:
375 |
376 | ```
377 | adb shell simpleperf list # 获取手机支持的所有event
378 | tracecat "trace:simpleperf(cache-misses,cpu-cycles)" # 以500ms粒度全局采样(默认)
379 | tracecat "trace:simpleperf(cache-misses,cpu-cycles|100ms)" # 以100ms粒度全局采样
380 | ```
381 |
382 | \* 全局采样包括各个cpu的单独统计数据
383 |
384 | APP采样:
385 |
386 | ```
387 | adb shell pm list package # 获取所有APP包名
388 | tracecat "trace:simpleperf(com.android.dialer|cache-misses|100ms)"# 以100ms粒度只采样APP:com.android.dialer
389 | ```
390 |
391 | \* APP采样只包括所有cpu的总和数据,不包括单独cpu的数据
392 |
393 | 解析和显示:
394 |
395 | ```
396 | tracecat "parse:simpleperf" # 解析所有抓取的event
397 | tracecat "parse:simpleperf(cache-misses,cpu-cycles)" # 解析部分抓取的event
398 | tracecat "chart:simpleperf" # 显示所有event的曲线
399 | tracecat "chart:simpleperf(cache-misses,cpu-cycles)" # 显示部分event的曲线
400 | tracecat "chart:simpleperf(cache-misses(cpu0),cpu-cycles(cpu0))" # 显示某个核的event的曲线
401 | ```
402 |
403 | **profiler**
404 |
405 | 半自动方式抓取、解析SnapdragonProfiler提供的所有数据
406 |
407 | ```
408 | tracecat -h profiler
409 | ```
410 |
411 | 查看所有支持解析的数据类型
412 |
413 | ```
414 | tracecat "trace:profiler(cpu_branch_miss),profiler(cpu_cache_miss),profiler(cpu_clock)"
415 | ```
416 |
417 | 抓取cpu_branch_miss, cpu_cache_miss, cpu_clock,开始后命令行会进入等待,请手动运行profiler,并在profiler中启动这些数据的抓取,然后在命令行按y继续。
418 |
419 | 抓取结束后,命令行再次进入等待,请手动停止profiler,并将结果导出到./runs/xxx/profiler/profiler.csv,然后按y继续。
420 |
421 | ```
422 | tracecat "parse:profiler(cpu_branch_miss),profiler(cpu_cache_miss),profiler(cpu_clock)"
423 | ```
424 |
425 | 解析出cpu_branch_miss, cpu_cache_miss, cpu_clock
426 |
427 | ```
428 | tracecat "chart:profiler(cpu_branch_miss)"
429 | ```
430 |
431 | 显示cpu_branch_miss的图表
432 |
433 | \* 需要PC端安装高通Profiler工具
434 |
435 | ## 使用pkl文件数据
436 |
437 | pkl文件为pandas dataframe数据,可以用pandas加载后,进行二次处理:
438 |
439 | ```
440 | import pandas
441 |
442 | df = pandas.read_pickle('.../cpu_load2.pkl')
443 |
444 | print(df)
445 | ```
446 |
447 | 如果你不喜欢用dataframe处理数据,也可以将dataframe转换为正常的dict数组:
448 |
449 | ```
450 | dicts = df.to_dict('records')
451 | ```
452 |
453 | pandas的数据处理功能非常丰富,更多使用方法请查阅pandas的user guide。
454 |
455 | ## Tips
456 |
457 | - Windows下使用tracecat运行,Linux下使用./tracecat运行
458 |
459 | - trace 功能做了防止意外覆盖的处理,如果runs下已经有对应文件夹,将提示出错,请手动删除文件夹或改名运行
460 |
461 | ## Contact
462 |
463 | Author: Cyrus Huang
464 |
465 | Github:
466 |
--------------------------------------------------------------------------------
/framework/config.py:
--------------------------------------------------------------------------------
1 | VERSION = '0.8.3'
2 |
3 | CONFIG = {
4 | 'REMOTE_ROOT': '/data/local/tmp/tracecat',
5 | 'SAMPLING_MODE': 'OFFLINE', # Sampler working mode, ONLINE or OFFLINE
6 | 'SAMPLING_PERIOD': 500, # Sampling period(ms), 500ms by default
7 | 'EXEC_WITH_SU': False, # Execute "su" before run any command
8 | 'FTRACE_BUFFER_SIZE': 2048 # Size of ftrace buffer (kb)
9 | }
--------------------------------------------------------------------------------
/framework/executor.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 |
3 | class Executor(ABC):
4 |
5 | def __init__(self):
6 |
7 | pass
8 |
9 | @abstractmethod
10 | def connect(self, addr = None, port = None, username = None, password = None):
11 | pass
12 |
13 | @abstractmethod
14 | def exec(self, command, handler = None):
15 | pass
16 |
17 | @abstractmethod
18 | def push(self, local, remote):
19 | pass
20 |
21 | @abstractmethod
22 | def pull(self, remote, local):
23 | pass
--------------------------------------------------------------------------------
/framework/executors/adb_executor.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from ppadb.client import Client as Adb_Client
4 |
5 | from framework.executor import Executor
6 | from framework.config import CONFIG
7 |
8 | class Adb_executor(Executor):
9 |
10 | def __init__(self):
11 |
12 | self.client = Adb_Client()
13 |
14 | def connect(self, addr = None, port = None, username = None, password = None):
15 |
16 | try:
17 | devices = self.client.devices()
18 |
19 | except:
20 |
21 | os.system('adb start-server') # start up adb service
22 |
23 | devices = self.client.devices()
24 |
25 | self.device = devices[0] if devices else None
26 |
27 | return True if self.device else False
28 |
29 | def exec(self, command, handler = None):
30 |
31 | return self.device.shell(cmd = 'su -c ' + command if CONFIG['EXEC_WITH_SU'] else command, handler = handler)
32 |
33 | def push(self, local, remote):
34 |
35 | return self.device.push(local, remote)
36 |
37 | def pull(self, remote, local):
38 |
39 | return self.device.pull(remote, local)
40 |
41 | @staticmethod
42 | def print_handler(connection):
43 |
44 | while True:
45 |
46 | data = connection.read(1024)
47 |
48 | if not data:
49 |
50 | break
51 |
52 | print(data.decode('utf-8'), end='')
53 |
54 | connection.close()
--------------------------------------------------------------------------------
/framework/helpers.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import pandas
3 | import os.path
4 | import datetime
5 |
6 | def get_element(array, index, default = None):
7 |
8 | return array[index] if len(array) > index else default
9 |
10 | def get_slices(records, start_time, end_time = None, duration = None):
11 |
12 | slices = list()
13 |
14 | for row in records:
15 |
16 | new = {
17 | 'start_time': int(row[start_time]),
18 | 'end_time': int(row[end_time]) if end_time else 0,
19 | 'duration': int(row[duration]) if duration else 0
20 | }
21 |
22 | if not new['end_time'] and not new['duration']:
23 |
24 | raise Exception('end_time and duration cannot be both None.')
25 |
26 | if not end_time:
27 | new['end_time'] = new['start_time'] + new['duration']
28 |
29 | if not duration:
30 | new['duration'] = new['end_time'] - new['start_time']
31 |
32 | slices.append(new)
33 |
34 | return slices
35 |
36 | def sub_slices(slices, start_time, end_time):
37 |
38 | subset = list()
39 |
40 | for idx, row in enumerate(slices):
41 |
42 | if row['end_time'] <= start_time:
43 | continue
44 |
45 | if row['start_time'] >= end_time:
46 | break
47 |
48 | new = row.copy()
49 |
50 | if row['start_time'] < start_time:
51 | new['start_time'] = start_time
52 | new['duration'] = new['end_time'] - new['start_time']
53 |
54 | if row['end_time'] > end_time:
55 | new['end_time'] = end_time
56 | new['duration'] = new['end_time'] - new['start_time']
57 |
58 | subset.append(new)
59 |
60 | return subset
61 |
62 | def get_slices_usage(slices, start_time, end_time):
63 |
64 | up_time = sum([s['duration'] for s in slices])
65 |
66 | return up_time / (end_time - start_time)
67 |
68 | def get_time(time_str, time_unit, check_format = True):
69 |
70 | time_ms = 0
71 |
72 | if 'ms' in time_str:
73 |
74 | time_ms = int(time_str.replace('ms', ''))
75 |
76 | elif 's' in time_str:
77 |
78 | time_ms = int(time_str.replace('s', '')) * 1000
79 |
80 | else:
81 |
82 | if check_format:
83 |
84 | sys.exit('ERROR: Invalid time format, please use 1ms, 1s.')
85 |
86 | else:
87 |
88 | return int(time_str)
89 |
90 | if time_unit == 'ms':
91 |
92 | return time_ms
93 |
94 | elif time_unit == 's':
95 |
96 | return time_ms / 1000
97 |
98 | else:
99 |
100 | raise Exception('Invalide time unit, only support s, ms.')
101 |
102 | def pick_next_window(window, start_time, end_time, window_time, ignore_broken_window = False):
103 |
104 | window_id = 0 if not window else window['id'] + 1
105 |
106 | next_window = {
107 | 'id': window_id,
108 | 'start': start_time + window_id * window_time,
109 | 'end': start_time + window_id * window_time + window_time,
110 | 'dur': window_time,
111 | 'broken': False
112 | }
113 |
114 | if next_window['start'] >= end_time:
115 |
116 | return None
117 |
118 | if next_window['end'] > end_time:
119 |
120 | if ignore_broken_window:
121 |
122 | return None
123 |
124 | next_window['end'] = end_time
125 | next_window['dur'] = next_window['end'] - next_window['start']
126 | next_window['broken'] = True
127 |
128 | return next_window
129 |
130 | def get_unique_list(df, cols, skip_none = False):
131 |
132 | results = list()
133 |
134 | df_comb = pandas.DataFrame()
135 |
136 | for col_name, col_type in cols.items():
137 |
138 | df_comb['combine'] = (df_comb['combine'] if 'combine' in df_comb else '') + df[col_name].astype(str) + '\t'
139 |
140 | unique_list = df_comb['combine'].dropna().unique().tolist()
141 |
142 | for row in unique_list:
143 |
144 | has_none = False
145 |
146 | argx = 0
147 |
148 | args = row.split('\t')
149 |
150 | data = dict()
151 |
152 | for col_name, col_type in cols.items():
153 |
154 | if args[argx] == 'None':
155 |
156 | has_none = True
157 |
158 | data[col_name] = col_type(args[argx]) if args[argx] != 'None' else None # FIXME: It's a temporery fix None value.
159 |
160 | argx += 1
161 |
162 | if skip_none and has_none:
163 |
164 | continue
165 |
166 | results.append(data)
167 |
168 | return results
169 |
170 | def get_runtime_path():
171 |
172 | return getattr(sys, '_MEIPASS', os.path.abspath(os.path.dirname(__file__)) + '/../')
173 |
174 | def create_seq_list(extra, prefix, ids):
175 |
176 | if type(extra) == str:
177 |
178 | extra = [extra]
179 |
180 | return extra + [prefix + str(i) for i in ids]
181 |
182 | def create_seq_dict(extra, prefix, ids, data_type):
183 |
184 | result = dict()
185 |
186 | if type(extra) == str:
187 |
188 | extra = [extra]
189 |
190 | for attr in extra:
191 |
192 | result[attr] = data_type()
193 |
194 | for i in ids:
195 |
196 | result[prefix + str(i)] = data_type()
197 |
198 | return result
199 |
200 | def create_duration_column(df, end_time = None, ts_col = 'timestamp', dur_col = 'duration'):
201 |
202 | next_df = df[ts_col].shift(-1)
203 |
204 | if end_time:
205 |
206 | next_df.iloc[-1] = end_time
207 |
208 | df[dur_col] = next_df - df[ts_col]
209 |
210 | return df
211 |
212 | def log_current_command(log_file):
213 |
214 | now = datetime.datetime.now()
215 |
216 | with open(log_file, 'a') as log_file:
217 |
218 | time_str = now.strftime("%Y-%m-%d %H:%M:%S")
219 |
220 | cmd_line = ' '.join(sys.argv)
221 |
222 | log_file.write("%s: %s\n" % (time_str, cmd_line, ))
223 |
224 | def handle_thread_exceptions(thread_func):
225 |
226 | def wrapper(*args):
227 |
228 | try:
229 | thread_func(*args)
230 | except SystemExit as e:
231 | print(e)
232 | except:
233 | traceback.print_exc()
234 |
235 | return wrapper
236 |
237 | g_ftrace_taker = None
238 |
239 | def take_ftrace_buffer(taker):
240 |
241 | global g_ftrace_taker
242 |
243 | if g_ftrace_taker and g_ftrace_taker != taker:
244 |
245 | sys.exit('ERROR: Can\'t enable %s because ftrace buffer is taken by %s, please remove the conflict modules and try again.' % (taker, g_ftrace_taker))
246 |
247 | g_ftrace_taker = taker
--------------------------------------------------------------------------------
/framework/module.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import pandas
4 |
5 | from abc import ABC, abstractmethod
6 |
7 | from framework.plotter import Plotter
8 |
9 | class Module(ABC):
10 |
11 | def __init__(self):
12 |
13 | self.sources = dict()
14 |
15 | self.plotter = Plotter()
16 |
17 | self.workspace = None
18 |
19 | self.results = None
20 |
21 | def init_workspace(self, workspace):
22 |
23 | self.workspace = workspace
24 |
25 | def init_sources(self, sources):
26 |
27 | self.sources = sources
28 |
29 | def init_plotter(self, **kwargs):
30 |
31 | self.plotter.init_plot(**kwargs)
32 |
33 | def invoke_source(self, source_name, items = None):
34 |
35 | if not self.sources:
36 |
37 | raise Exception('Data source is used before init.')
38 |
39 | if source_name not in self.sources:
40 |
41 | raise Exception('Data source "%s" not found.' % source_name)
42 |
43 | source = self.sources[source_name]
44 |
45 | source.enable(items)
46 |
47 | return source
48 |
49 | def invoke_result(self, module_names, return_when_fail = False):
50 |
51 | file_list = list()
52 |
53 | if type(module_names) == str:
54 |
55 | module_names = [module_names]
56 |
57 | for module_name in module_names:
58 |
59 | pickle_file = self.workspace + module_name + '.pkl'
60 |
61 | if os.path.exists(pickle_file):
62 |
63 | return pandas.read_pickle(pickle_file)
64 |
65 | file_list.append(pickle_file)
66 |
67 | if return_when_fail:
68 |
69 | return None
70 |
71 | print('ERROR: Invoke results failed, pickle file not found: ', end='')
72 |
73 | if len(file_list) == 1:
74 |
75 | print(file_list[0])
76 |
77 | else:
78 |
79 | print('')
80 |
81 | for file in file_list:
82 |
83 | print(file)
84 |
85 | sys.exit()
86 |
87 | def save(self, pickle_file = None):
88 |
89 | if self.results is None:
90 |
91 | return None
92 |
93 | if not pickle_file:
94 |
95 | pickle_file = self.workspace + self.get_name() + '.pkl'
96 |
97 | self.results.to_pickle(pickle_file)
98 |
99 | return pickle_file
100 |
101 | def load(self, pickle_file = None):
102 |
103 | if not pickle_file:
104 |
105 | pickle_file = self.workspace + self.get_name() + '.pkl'
106 |
107 | if not os.path.exists(pickle_file):
108 |
109 | sys.exit('ERROR: Result pickle file not found. ' + pickle_file)
110 |
111 | self.results = pandas.read_pickle(pickle_file)
112 |
113 | return self.results.copy()
114 |
115 | def export(self, excel_file = None):
116 |
117 | if self.results is None:
118 |
119 | return None
120 |
121 | if len(self.results) < 1048576: # Excel format limit rows
122 |
123 | if not excel_file:
124 |
125 | excel_file = self.workspace + self.get_name() + '.xlsx'
126 |
127 | self.results.to_excel(excel_file)
128 |
129 | else: # Fallback to csv format
130 |
131 | if not excel_file:
132 |
133 | excel_file = self.workspace + self.get_name() + '.csv'
134 |
135 | self.results.to_csv(excel_file)
136 |
137 | return excel_file
138 |
139 | def get_result(self):
140 |
141 | return self.results.copy()
142 |
143 | def trace(self, params):
144 |
145 | self.do_trace(params)
146 |
147 | def parse(self, params):
148 |
149 | self.results = self.do_parse(params)
150 |
151 | def chart(self, params):
152 |
153 | self.do_chart(params, self.results.copy())
154 |
155 | # Virtual function but not requred to be implemented.
156 | def invoke_sources(self):
157 | pass
158 |
159 | # Virtual function but not requred to be implemented.
160 | def invoke_results(self):
161 | pass
162 |
163 | @abstractmethod
164 | def get_name(self):
165 | pass
166 |
167 | @abstractmethod
168 | def get_desc(self):
169 | pass
170 |
171 | @abstractmethod
172 | def get_help(self):
173 | pass
174 |
175 | def do_trace(self, params):
176 | pass
177 |
178 | @abstractmethod
179 | def do_parse(self, params):
180 | pass
181 |
182 | @abstractmethod
183 | def do_chart(self, params, df):
184 | pass
--------------------------------------------------------------------------------
/framework/objects.py:
--------------------------------------------------------------------------------
1 | # Data sources
2 | from framework.sources.perfetto import Perfetto
3 | from framework.sources.instruments import Instruments
4 | from framework.sources.sysfs import Sysfs
5 | from framework.sources.procfs import Procfs
6 | from framework.sources.profiler import Profiler
7 | from framework.sources.simpleperf import Simpleperf
8 | from framework.sources.ftrace import Ftrace
9 |
10 | # Modules
11 | from modules.cpu_load.cpu_load_module import Cpu_load_module
12 | from modules.cpu_load2.cpu_load2_module import Cpu_load2_module
13 | from modules.cpu_load_summary.cpu_load_summary_module import Cpu_load_summary_module
14 | from modules.app_load.app_load_module import App_load_module
15 | from modules.cpu_idle.cpu_idle_module import Cpu_idle_module
16 | from modules.cpu_freq.cpu_freq_module import Cpu_freq_module
17 | from modules.cpu_freq2.cpu_freq2_module import Cpu_freq2_module
18 | from modules.cpu_freq_stat.cpu_freq_stat_module import Cpu_freq_stat_module
19 | from modules.cpu_freq_stat2.cpu_freq_stat2_module import Cpu_freq_stat2_module
20 | from modules.gpu_freq.gpu_freq_module import Gpu_freq_module
21 | from modules.gpu_freq_stat.gpu_freq_stat_module import Gpu_freq_stat_module
22 | from modules.ddr_freq.ddr_freq_module import Ddr_freq_module
23 | from modules.ddr_freq_stat.ddr_freq_stat_module import Ddr_freq_stat_module
24 | from modules.dsu_freq.dsu_freq_module import Dsu_freq_module
25 | from modules.thermal_zone.thermal_zone_module import Thermal_zone_module
26 | from modules.ios_cpu_load.ios_cpu_load_module import Ios_cpu_load_module
27 | from modules.ios_app_load.ios_app_load_module import Ios_app_load_module
28 | from modules.ios_cpu_freq.ios_cpu_freq_module import Ios_cpu_freq_module
29 | from modules.profiler.profiler_module import Profiler_module
30 | from modules.simpleperf.simpleperf_module import Simpleperf_module
31 |
32 | SOURCES = [
33 | Ftrace(),
34 | Sysfs(),
35 | Procfs(),
36 | Perfetto(),
37 | Instruments(),
38 | Profiler(),
39 | Simpleperf(),
40 | ]
41 |
42 | MODULES = [
43 | Cpu_load_module(),
44 | Cpu_load2_module(),
45 | Cpu_load_summary_module(),
46 | App_load_module(),
47 | Cpu_idle_module(),
48 | Cpu_freq_module(),
49 | Cpu_freq2_module(),
50 | Cpu_freq_stat_module(),
51 | Cpu_freq_stat2_module(),
52 | Gpu_freq_module(),
53 | Gpu_freq_stat_module(),
54 | Ddr_freq_module(),
55 | Ddr_freq_stat_module(),
56 | Dsu_freq_module(),
57 | Thermal_zone_module(),
58 | Ios_cpu_load_module(),
59 | Ios_app_load_module(),
60 | Ios_cpu_freq_module(),
61 | Profiler_module(),
62 | Simpleperf_module()
63 | ]
--------------------------------------------------------------------------------
/framework/plotter.py:
--------------------------------------------------------------------------------
1 | import pandas
2 |
3 | from abc import ABC, abstractmethod
4 |
5 | class Plotter():
6 |
7 | def __init__(self):
8 |
9 | self.kwargs = dict()
10 |
11 | def init_plot(self, **kwargs):
12 |
13 | self.kwargs = kwargs
14 |
15 | # This function parameters are the same as pandas df.plot
16 | def plot(self, df, name = None, **kwargs):
17 |
18 | kwargs.update(self.kwargs)
19 |
20 | df.plot(**kwargs).set_ylabel(name)
21 |
22 | def plot_index_chart(self, params, df, name, index, x, y, **kwargs):
23 |
24 | if df.dtypes[index] == 'int64':
25 |
26 | index_type = int
27 | else:
28 | index_type = str
29 |
30 | index_list = params if params else sorted(df[index].unique().tolist())
31 |
32 | for idx in index_list:
33 |
34 | self.plot(df[df[index] == index_type(idx)], name, x=x, y=y, label=y + '_' + str(idx), **kwargs)
35 |
36 | def plot_paral_chart(self, params, df, name, x, y_prefixer, **kwargs):
37 |
38 | if not params:
39 |
40 | self.plot(df, name, x=x, y=[col for col in df.columns.values if y_prefixer in col], **kwargs)
41 |
42 | else:
43 |
44 | for param in params:
45 |
46 | if '-' in param:
47 |
48 | args = param.split('-')
49 |
50 | ids = range(int(args[0]), int(args[1]))
51 |
52 | df_mean = pandas.DataFrame()
53 |
54 | df_mean[x] = df[x]
55 |
56 | df_mean[y_prefixer + param] = df[[y_prefixer + str(i) for i in ids]].mean(axis=1)
57 |
58 | self.plot(df_mean, name, x=x, y=[y_prefixer + param], **kwargs)
59 |
60 | else:
61 |
62 | self.plot(df, name, x=x, y=[y_prefixer + param], **kwargs)
--------------------------------------------------------------------------------
/framework/sampler.py:
--------------------------------------------------------------------------------
1 | # About sampler
2 | # ------------------------------------------
3 | # Sampler is for sampling from fs node or from command output. It support
4 | # sample for one time(info) or periodically(data) and stores all the results
5 | # in a data file for loading it later, you can think it as a sampling data
6 | # database. It provides get_matrics api to get data from the data file.
7 |
8 | import os
9 | import sys
10 | import time
11 | import json
12 | import threading
13 |
14 | from framework.config import CONFIG
15 | from framework.helpers import get_runtime_path, handle_thread_exceptions
16 |
17 | EXEC_SEPERATOR = '___NeXt___'
18 |
19 | class Sampler():
20 |
21 | counter = 0
22 |
23 | counter_mutex = threading.Lock()
24 |
25 | datafile_mutex = threading.Lock()
26 |
27 | def __init__(self, sampler_name = None):
28 |
29 | self.executor = None
30 |
31 | self.info_metrics = dict()
32 |
33 | self.data_metrics = dict()
34 |
35 | self.periods = set()
36 |
37 | self.data = dict()
38 |
39 | Sampler.counter_mutex.acquire()
40 |
41 | Sampler.counter += 1
42 |
43 | self.sampler_id = Sampler.counter
44 |
45 | Sampler.counter_mutex.release()
46 |
47 | self.sampler_name = sampler_name if sampler_name else sampler_id
48 |
49 | def init(self, executor):
50 |
51 | self.executor = executor
52 |
53 | if not self.executor.connect():
54 |
55 | return False
56 |
57 | return True
58 |
59 | def test_source(self, source, source_type):
60 |
61 | error_messages = [
62 | 'No such file or directory',
63 | 'Permission denied',
64 | 'Operation not permitted',
65 | 'Is a directory',
66 | 'inaccessible or not found',
67 | 'syntax error: unexpected'
68 | ]
69 |
70 | if type(source) == str:
71 |
72 | source = [source]
73 |
74 | if source_type == 'NODES':
75 |
76 | command = 'cat ' + ' '.join(source)
77 | else:
78 | command = '; '.join(source)
79 |
80 | result = self.executor.exec(command)
81 |
82 | for error in error_messages:
83 |
84 | if error in result:
85 |
86 | return False
87 |
88 | return True
89 |
90 | def register_metrics(self, metrics_name, source, parser = None, userdata = None, period = CONFIG['SAMPLING_PERIOD'], source_type = 'NODES', run_test = True):
91 |
92 | if source_type not in ['NODES', 'QUERY']:
93 |
94 | raise Exception('Invalid source type: %s' % source_type)
95 |
96 | if metrics_name in self.info_metrics or metrics_name in self.data_metrics:
97 |
98 | return False
99 |
100 | if type(source) == str:
101 |
102 | source = [source]
103 |
104 | if run_test and not self.test_source(source, source_type):
105 |
106 | return False
107 |
108 | metrics = self.data_metrics if period else self.info_metrics
109 |
110 | metrics[metrics_name] = {
111 | 'name': metrics_name,
112 | 'type': source_type,
113 | 'source': source,
114 | 'parser': parser,
115 | 'period': period,
116 | 'userdata': userdata
117 | }
118 |
119 | if period:
120 |
121 | self.periods.add(period)
122 |
123 | return True
124 |
125 | def __parse_data(self, metrics_name, raw_data):
126 |
127 | metrics = None
128 |
129 | results = None
130 |
131 | if metrics_name in self.info_metrics:
132 |
133 | metrics = self.info_metrics[metrics_name]
134 |
135 | elif metrics_name in self.data_metrics:
136 |
137 | metrics = self.data_metrics[metrics_name]
138 |
139 | if not metrics:
140 |
141 | raise Exception('Parsing invalid metrics: ' + metrics_name)
142 |
143 | if not metrics['parser']:
144 |
145 | return raw_data
146 |
147 | try:
148 |
149 | results = metrics['parser'](raw_data, metrics['userdata']) if metrics['userdata'] else metrics['parser'](raw_data)
150 |
151 | except:
152 |
153 | sys.exit('ERROR: Parse "%s" raw data failed: %s' % (metrics_name, raw_data))
154 |
155 | return results
156 |
157 | def __get_query_command(self, metrics_list):
158 |
159 | command = ''
160 |
161 | for metrics in metrics_list:
162 |
163 | if metrics['type'] == 'QUERY':
164 |
165 | inner_command = '; '.join(metrics['source'])
166 |
167 | command += 'echo "%s"; cat /proc/uptime | awk \'{print $1}\'; %s; echo "%s"; ' % (metrics['name'], inner_command, EXEC_SEPERATOR)
168 |
169 | return command
170 |
171 | def __parse_query_result(self, result):
172 |
173 | records = list()
174 |
175 | result = result.replace('\r', '')
176 |
177 | outputs = result.split(EXEC_SEPERATOR + '\n')
178 |
179 | for output in outputs:
180 |
181 | if not output:
182 | break
183 |
184 | parts = output.split('\n', 2)
185 |
186 | name = parts[0].strip()
187 |
188 | time = int(float(parts[1]) * 1000000000)
189 |
190 | data = self.__parse_data(metrics_name, parts[2])
191 |
192 | records.append({
193 | 'metrics': name,
194 | 'time': time,
195 | 'data': data
196 | })
197 |
198 | return records
199 |
200 | def __get_nodes_command(self, metrics_list):
201 |
202 | command = ''
203 |
204 | for metrics in metrics_list:
205 |
206 | if metrics['type'] == 'NODES':
207 |
208 | command += (' "%s" ' % metrics['name']) + ' /proc/uptime ' + ' '.join(metrics['source']) + (' "%s" ' % EXEC_SEPERATOR)
209 |
210 | return 'cat' + command if command else command
211 |
212 | def __parse_nodes_result(self, result):
213 |
214 | records = list()
215 |
216 | result = result.replace('\r', '')
217 |
218 | outputs = result.split('cat: %s: No such file or directory' % EXEC_SEPERATOR + '\n')
219 |
220 | for output in outputs:
221 |
222 | if not output:
223 | break
224 |
225 | parts = output.split('\n', 2)
226 |
227 | name = parts[0].strip().replace(': No such file or directory', '')[5:]
228 |
229 | time = int(float(parts[1].split()[0]) * 1000000000)
230 |
231 | data = self.__parse_data(name, parts[2])
232 |
233 | records.append({
234 | 'metrics': name,
235 | 'time': time,
236 | 'data': data
237 | })
238 |
239 | return records
240 |
241 | def __dump_metrics(self, metrics):
242 |
243 | results = dict()
244 |
245 | for name, item in metrics.items():
246 |
247 | results[name] = {
248 | 'type': item['type'],
249 | 'source': item['source'],
250 | 'period': item['period']
251 | }
252 |
253 | return results
254 |
255 | def execute(self, metrics_list):
256 |
257 | nodes_command = self.__get_nodes_command(metrics_list)
258 |
259 | query_command = self.__get_query_command(metrics_list)
260 |
261 | if nodes_command:
262 |
263 | nodes_result = self.executor.exec(nodes_command)
264 |
265 | if query_command:
266 |
267 | query_result = self.executor.exec(query_command)
268 |
269 | records = list()
270 |
271 | if nodes_command and nodes_result.strip():
272 |
273 | records += self.__parse_nodes_result(nodes_result)
274 |
275 | if query_command and query_result.strip():
276 |
277 | records += self.__parse_query_result(query_result)
278 |
279 | return records
280 |
281 | def start(self, data_file, duration):
282 |
283 | # Write info results
284 |
285 | with open(data_file, 'w') as file:
286 |
287 | file.write('# INFO METRICS:\n')
288 |
289 | if self.info_metrics:
290 |
291 | file.write(json.dumps(self.__dump_metrics(self.info_metrics)) + '\n')
292 |
293 | file.write('# DATA METRICS:\n')
294 |
295 | if self.data_metrics:
296 |
297 | file.write(json.dumps(self.__dump_metrics(self.data_metrics)) + '\n')
298 |
299 | file.write('# INFO RESULTS:\n')
300 |
301 | records = self.execute(self.info_metrics.values())
302 |
303 | for record in records:
304 |
305 | file.write(json.dumps(record) + '\n')
306 |
307 | file.write('# DATA RESULTS:\n')
308 |
309 | # Select sampler mode
310 |
311 | sampling_mode = CONFIG['SAMPLING_MODE']
312 |
313 | if sampling_mode == 'ONLINE':
314 |
315 | sampling_func = self.__sampling_online
316 |
317 | elif sampling_mode == 'OFFLINE':
318 |
319 | sampling_func = self.__sampling_offline
320 |
321 | self.__clear_remote_folder()
322 |
323 | else:
324 | raise Exception('ERROR: Invalid sampling mode: %s' % sampling_mode)
325 |
326 | # Create sampler thread by period
327 |
328 | threads = list()
329 |
330 | for period in self.periods:
331 |
332 | metrics_list = [m for m in self.data_metrics.values() if m['period'] == period]
333 |
334 | thread = threading.Thread(target=sampling_func, args=(metrics_list, period, duration, data_file, ))
335 |
336 | threads.append({
337 | 'name': '%s-%dms' % (self.sampler_name, period),
338 | 'period': period,
339 | 'thread': thread
340 | })
341 |
342 | for thread in threads:
343 |
344 | print('Start sampling thread: ' + thread['name'])
345 |
346 | thread['thread'].start()
347 |
348 | for thread in threads:
349 |
350 | thread['thread'].join()
351 |
352 | print('Thread finished: ' + thread['name'])
353 |
354 | @handle_thread_exceptions
355 | def __sampling_online(self, metrics_list, period, duration, data_file):
356 |
357 | with open(data_file, 'w') as file:
358 |
359 | run_time = 0
360 |
361 | end_time = time.time() + duration
362 |
363 | while run_time < end_time:
364 |
365 | run_time = time.time()
366 |
367 | records = self.execute(metrics_list)
368 |
369 | Sampler.datafile_mutex.acquire()
370 |
371 | for record in records:
372 |
373 | file.write(json.dumps(record) + '\n')
374 |
375 | Sampler.datafile_mutex.release()
376 |
377 | sleep_time = period / 1000 - (time.time() - run_time)
378 |
379 | if sleep_time > 0:
380 |
381 | time.sleep(sleep_time)
382 |
383 | @handle_thread_exceptions
384 | def __sampling_offline(self, metrics_list, period, duration, data_file):
385 |
386 | working_path = os.path.dirname(data_file)
387 |
388 | runtime_path = get_runtime_path()
389 |
390 | path = {
391 | 'remote_proc': self.__get_remote_folder() + '/tracecatd-%s' % period,
392 | 'remote_conf': self.__get_remote_folder() + '/tracecatd-%s.conf' % period,
393 | 'remote_data': self.__get_remote_folder() + '/tracecatd-%s.data' % period,
394 | 'local_proc': runtime_path + '/demon/obj/local/arm64-v8a/tracecatd',
395 | 'local_conf': working_path + '/tracecatd-%s.conf' % period,
396 | 'local_data': working_path + '/tracecatd-%s.data' % period,
397 | }
398 |
399 | # Push tracecatd to device
400 |
401 | if not os.path.exists(path['local_proc']):
402 |
403 | sys.exit('ERROR: Tracecatd not found.')
404 |
405 | self.executor.push(path['local_proc'], path['remote_proc'])
406 |
407 | self.executor.exec('chmod a+x %s' % path['remote_proc'])
408 |
409 | # Push tracecatd.conf to devcie
410 |
411 | config = self.__get_tracecatd_config(metrics_list)
412 |
413 | with open(path['local_conf'], 'w') as file:
414 |
415 | file.write(config)
416 |
417 | self.executor.push(path['local_conf'], path['remote_conf'])
418 |
419 | # Run tracecatd and wait for it finish
420 |
421 | ret = self.executor.exec('%s %s %s %s %s' % (path['remote_proc'], path['remote_conf'], path['remote_data'], period, duration, ))
422 |
423 | ret = ret.strip()
424 |
425 | if ret != 'Success!':
426 |
427 | sys.exit('ERROR: Tracecatd error: %s' % ret.strip())
428 |
429 | # Get tracecatd.data back
430 |
431 | self.executor.pull(path['remote_data'], path['local_data'])
432 |
433 | # Parse data from tracecat.data
434 |
435 | records = self.__parse_tracecatd_data(path['local_data'])
436 |
437 | Sampler.datafile_mutex.acquire()
438 |
439 | with open(data_file, 'a+') as file:
440 |
441 | for record in records:
442 |
443 | file.write(json.dumps(record) + '\n')
444 |
445 | Sampler.datafile_mutex.release()
446 |
447 | def __get_remote_folder(self):
448 |
449 | return CONFIG['REMOTE_ROOT'] + '/sampler-%s' % self.sampler_name
450 |
451 | def __clear_remote_folder(self):
452 |
453 | return self.executor.exec('rm -rf %s' % self.__get_remote_folder())
454 |
455 | def __get_tracecatd_config(self, metrics_list):
456 |
457 | config = ''
458 |
459 | for metrics in metrics_list:
460 |
461 | if metrics['type'] == 'NODES':
462 |
463 | config += 'NODES: %s\n' % metrics['name']
464 |
465 | for node in metrics['source']:
466 |
467 | config += ' %s\n' % node
468 |
469 | elif metrics['type'] == 'QUERY':
470 |
471 | config += 'QUERY: %s\n' % metrics['name']
472 |
473 | for cmd in metrics['source']:
474 |
475 | config += ' %s\n' % cmd
476 |
477 | return config
478 |
479 | def __parse_tracecatd_data(self, data_file):
480 |
481 | records = list()
482 |
483 | with open(data_file, 'r') as file:
484 |
485 | content = file.read()
486 |
487 | outputs = content.split(EXEC_SEPERATOR + '\n')
488 |
489 | for output in outputs:
490 |
491 | if not output:
492 | break
493 |
494 | parts = output.split('\n', 2)
495 |
496 | name = parts[0][7:]
497 |
498 | time = int(parts[1])
499 |
500 | data = self.__parse_data(name, parts[2])
501 |
502 | records.append({
503 | 'metrics': name,
504 | 'time': time,
505 | 'data': data
506 | })
507 |
508 | return records
509 |
510 | def load(self, data_file):
511 |
512 | # NOTE: Do not use self.info_metrics, self.data_metrics and self.excutor, this function shoud be able to use stand along.
513 |
514 | if not os.path.exists(data_file):
515 |
516 | return False
517 |
518 | parsing_results = False
519 |
520 | with open(data_file, 'r') as file:
521 |
522 | lines = file.readlines()
523 |
524 | for idx, line in enumerate(lines):
525 |
526 | if line[0] == '#':
527 |
528 | if line == '# INFO RESULTS:\n' or line == '# DATA RESULTS:\n':
529 |
530 | parsing_results = True
531 |
532 | continue
533 |
534 | if parsing_results:
535 |
536 | record = json.loads(line)
537 |
538 | if record['metrics'] not in self.data:
539 |
540 | self.data[record['metrics']] = list()
541 |
542 | self.data[record['metrics']].append({
543 | 'time': record['time'],
544 | 'data': record['data'],
545 | })
546 |
547 | return True
548 |
549 | def get_metrics(self, metrics_name, raise_exception = True):
550 |
551 | # NOTE: Do not use self.info_metrics, self.data_metrics and self.excutor, this function shoud be able to use stand along.
552 |
553 | if metrics_name not in self.data:
554 |
555 | if raise_exception:
556 |
557 | raise Exception('Metrics not found in data file: %s' % metrics_name)
558 |
559 | return None
560 |
561 | return self.data[metrics_name].copy()
562 |
--------------------------------------------------------------------------------
/framework/source.py:
--------------------------------------------------------------------------------
1 | import os
2 | import traceback
3 |
4 | from abc import ABC, abstractmethod
5 | from framework.helpers import handle_thread_exceptions
6 |
7 | class Source(ABC):
8 |
9 | def __init__(self):
10 |
11 | self.workspace = None
12 |
13 | self.status = False
14 |
15 | self.sources = None
16 |
17 | def init_workspace(self, workspace):
18 |
19 | if not os.path.exists(workspace):
20 |
21 | os.makedirs(workspace)
22 |
23 | self.workspace = workspace
24 |
25 | def init_invoke(self, sources):
26 |
27 | self.sources = sources
28 |
29 | def enable(self, items = None):
30 |
31 | if items:
32 |
33 | if type(items) != list:
34 |
35 | items = [items]
36 |
37 | for item in items:
38 |
39 | self._enable(item)
40 |
41 | self.status = True
42 |
43 | def config(self, item, conf):
44 |
45 | self._config(item, conf)
46 |
47 | def is_enable(self):
48 |
49 | return self.status
50 |
51 | def get_name(self):
52 |
53 | return self._name()
54 |
55 | def get_workspace(self):
56 |
57 | if not self.workspace:
58 |
59 | raise Exception('Workspace is used before init.')
60 |
61 | return self.workspace
62 |
63 | @handle_thread_exceptions
64 | def trace(self, duration):
65 |
66 | if not self.is_enable():
67 |
68 | raise Exception('Data source run before enable.')
69 |
70 | return self._trace(duration)
71 |
72 | def pre_trace(self, duration):
73 |
74 | self._pre_trace(duration)
75 |
76 | def post_trace(self, duration):
77 |
78 | self._post_trace(duration)
79 |
80 | def parse(self):
81 |
82 | if not self.is_enable():
83 |
84 | raise Exception('Data source run before enable.')
85 |
86 | return self._parse()
87 |
88 | def invoke_source(self, source_name, items = None):
89 |
90 | if not self.sources:
91 |
92 | raise Exception('Data source is used before init.')
93 |
94 | if source_name not in self.sources:
95 |
96 | raise Exception('Data source "%s" not found.' % source_name)
97 |
98 | source = self.sources[source_name]
99 |
100 | source.enable(items)
101 |
102 | return source
103 |
104 | def invoke_sources(self):
105 | pass
106 |
107 | @abstractmethod
108 | def _name(self):
109 | pass
110 |
111 | @abstractmethod
112 | def _enable(self, item):
113 | pass
114 |
115 | def _config(self, item, conf):
116 |
117 | raise Exception('Should be implemented or not called.')
118 |
119 | @abstractmethod
120 | def _trace(self, duration):
121 | pass
122 |
123 | @abstractmethod
124 | def _parse(self):
125 | pass
126 |
127 | def _pre_trace(self, duration):
128 | pass
129 |
130 | def _post_trace(self, duration):
131 | pass
--------------------------------------------------------------------------------
/framework/sources/basefs.py:
--------------------------------------------------------------------------------
1 | # About basefs
2 | # ------------------------------------------
3 | # Basefs is a file node sampler's base class. It provides a file node sampler
4 | # template so that the specific file system sampler can just implement a node
5 | # list(metrics function) and all the magic will take effect. Basefs will get
6 | # all the metrics (implemented by the child class) and register them into
7 | # sampler, and tell the sampler whitch are the info nodes and witch are data
8 | # nodes, and let the sampler do the rest of the work. Finally it provides
9 | # get_metrics function for the modules which will get the data from sampler's
10 | # database file.
11 |
12 | import os
13 | import sys
14 | import re
15 |
16 | from abc import ABC, abstractmethod
17 |
18 | from framework.source import Source
19 | from framework.sampler import Sampler
20 | from framework.executors.adb_executor import Adb_executor
21 | from framework.config import CONFIG
22 |
23 | class Basefs(Source, ABC):
24 |
25 | def __init__(self):
26 |
27 | super().__init__()
28 |
29 | self.sampler = Sampler(self.name())
30 |
31 | self.metrics = self.metrics()
32 |
33 | self.enabled = set()
34 |
35 | @abstractmethod
36 | def name(self):
37 | pass
38 |
39 | @abstractmethod
40 | def metrics(self):
41 | pass
42 |
43 | def _name(self):
44 |
45 | return self.name()
46 |
47 | def _enable(self, item):
48 |
49 | if item not in self.metrics:
50 |
51 | raise Exception('Metrics not supported: %s' % item)
52 |
53 | if item not in self.enabled:
54 |
55 | self.enabled.add(item)
56 |
57 | def _config(self, item, conf):
58 |
59 | if item not in self.enabled:
60 |
61 | raise Exception('Metrics configure before enabled: %s' % item)
62 |
63 | if 'period' in conf:
64 |
65 | self.metrics[item]['period'] = conf['period']
66 |
67 | if 'filter' in conf:
68 |
69 | self.metrics[item]['filter'] = conf['filter']
70 |
71 | def _trace(self, duration):
72 |
73 | executor = Adb_executor()
74 |
75 | ret = self.sampler.init(Adb_executor())
76 |
77 | if not ret or not executor.connect():
78 |
79 | sys.exit('ERROR: Adb device connect failed.')
80 |
81 | # Regist metrics profile.
82 |
83 | for metrics_name in self.enabled:
84 |
85 | is_registed = False
86 |
87 | metrics_type = self.metrics[metrics_name]['type']
88 |
89 | period = None
90 |
91 | if metrics_type == 'DATA':
92 |
93 | period = self.metrics[metrics_name].get('period', None)
94 |
95 | if period is None:
96 |
97 | period = CONFIG['SAMPLING_PERIOD']
98 |
99 | for profile in self.metrics[metrics_name]['profiles']: # Find a suitble node to register
100 |
101 | if self.sampler.test_source(profile['nodes'], source_type = 'NODES'):
102 |
103 | nodelist = profile['nodes']
104 |
105 | userdata = ''
106 |
107 | # Parse batch nodes
108 |
109 | if type(profile['nodes']) == str and ('*' in profile['nodes'] or '?' in profile['nodes']):
110 |
111 | results = executor.exec('ls -l %s | awk \'{print $NF}\'' % profile['nodes']).split('\n')
112 |
113 | results = [x for x in results if x]
114 |
115 | pattern = re.split('\*|\?', profile['nodes'])
116 |
117 | for row in results:
118 |
119 | node_id = row[len(pattern[0]):-len(pattern[-1])]
120 |
121 | userdata += node_id + '\n'
122 |
123 | if CONFIG['SAMPLING_MODE'] == 'OFFLINE':
124 |
125 | id_filter = self.metrics[metrics_name].get('filter', list())
126 |
127 | if id_filter:
128 |
129 | nodelist = list()
130 |
131 | userdata = ''
132 |
133 | for row in results:
134 |
135 | node_id = row[len(pattern[0]):-len(pattern[-1])]
136 |
137 | if str(node_id) in id_filter:
138 |
139 | nodelist.append(row)
140 |
141 | userdata += node_id + '\n'
142 | else:
143 |
144 | nodelist = results
145 |
146 | is_registed = self.sampler.register_metrics(metrics_name, nodelist, profile['parser'], userdata = userdata if userdata else None, period = period, source_type = 'NODES', run_test = False)
147 |
148 | if is_registed:
149 |
150 | break
151 |
152 | if not is_registed:
153 |
154 | print('WARNING: Can\'t enable metrics on device: %s' % metrics_name)
155 |
156 | # Start sampling.
157 |
158 | print('Start sampling: %s...' % self.name())
159 |
160 | data_file = self.get_workspace() + '%s.data' % self.get_name()
161 |
162 | self.sampler.start(data_file, duration)
163 |
164 | print('Done. (%s)' % data_file)
165 |
166 | def _parse(self):
167 |
168 | data_file = self.get_workspace() + '%s.data' % self.get_name()
169 |
170 | if not os.path.exists(data_file):
171 |
172 | sys.exit('ERROR: %s data file not found: ' % self.get_name() + data_file)
173 |
174 | ret = self.sampler.load(data_file)
175 |
176 | if not ret:
177 |
178 | sys.exit('ERROR: can not load %s data file.' % self.get_name() + data_file)
179 |
180 | def get_metrics(self, metrics_name, not_found_value = 'EXCEPTION'):
181 |
182 | if metrics_name not in self.metrics:
183 |
184 | raise Exception('Metrics not supported: %s' % metrics_name)
185 |
186 | result = self.sampler.get_metrics(metrics_name, raise_exception = (not_found_value == 'EXCEPTION'))
187 |
188 | if result is None:
189 |
190 | return None
191 |
192 | metrics_type = self.metrics[metrics_name]['type']
193 |
194 | return result if metrics_type == 'DATA' else result[0]['data']
195 |
196 | def batch_nodes_parser(self, outputs, userdata = None, parser = None):
197 |
198 | results = dict()
199 |
200 | vals = outputs.split('\n')
201 |
202 | keys = userdata.split('\n')
203 |
204 | for idx, val in enumerate(vals):
205 |
206 | if keys[idx]:
207 |
208 | results[keys[idx]] = parser(val) if parser else val
209 |
210 | return results
--------------------------------------------------------------------------------
/framework/sources/ftrace.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import time
4 | import threading
5 | import pandas
6 | import re
7 |
8 | from framework.source import Source
9 | from framework.helpers import take_ftrace_buffer
10 | from framework.executors.adb_executor import Adb_executor
11 | from framework.config import CONFIG
12 |
13 | class Ftrace(Source):
14 |
15 | def __init__(self):
16 |
17 | super().__init__()
18 |
19 | self.events = set()
20 |
21 | self.df = None
22 |
23 | self.wait_num = 0
24 |
25 | self.wait_num_mutex = threading.Lock()
26 |
27 | def _name(self):
28 |
29 | return 'ftrace'
30 |
31 | def _enable(self, item):
32 |
33 | self.events.add(item)
34 |
35 | def __init_instance(self):
36 |
37 | self.ftrace_instance = '/sys/kernel/tracing'
38 |
39 | try:
40 |
41 | # Try to use global buffer first
42 |
43 | take_ftrace_buffer('ftrace')
44 |
45 | except:
46 |
47 | # Try to create unique instance
48 |
49 | self.ftrace_instance = '/sys/kernel/tracing/instances/tracecat'
50 |
51 | executor = Adb_executor()
52 |
53 | if not executor.connect():
54 |
55 | sys.exit('ERROR: Adb device not found.')
56 |
57 | errors = executor.exec('mkdir %s' % self.ftrace_instance)
58 |
59 | # Trigger the exception
60 |
61 | if 'Permission denied' in errors:
62 |
63 | sys.exit('ERROR: Can\'t enable ftrace instance, you may need root authority.')
64 |
65 | def _pre_trace(self, duration):
66 |
67 | self.__init_instance()
68 |
69 | self.__reset_ftrace()
70 |
71 | def __reset_ftrace(self, executor = None):
72 |
73 | if not executor:
74 |
75 | executor = Adb_executor()
76 |
77 | if not executor.connect():
78 |
79 | sys.exit('ERROR: Adb device not found.')
80 |
81 | executor.exec('echo boot > %s/trace_clock' % self.ftrace_instance)
82 |
83 | executor.exec('echo 0 > %s/tracing_on' % self.ftrace_instance)
84 |
85 | executor.exec('echo > %s/trace' % self.ftrace_instance)
86 |
87 | executor.exec('echo > %s/set_event' % self.ftrace_instance)
88 |
89 | executor.exec('echo %s > %s/buffer_size_kb' % (CONFIG['FTRACE_BUFFER_SIZE'], self.ftrace_instance))
90 |
91 | def _trace(self, duration):
92 |
93 | executor = Adb_executor()
94 |
95 | executor.connect()
96 |
97 | local_file = self.get_workspace() + 'ftrace.data'
98 |
99 | remote_file = CONFIG['REMOTE_ROOT'] + '/ftrace/ftrace.data'
100 |
101 | print('Start ftrace...')
102 |
103 | self.__reset_ftrace(executor)
104 |
105 | executor.exec('mkdir ' + os.path.dirname(remote_file))
106 |
107 | # Enable ftrace events
108 |
109 | for event in self.events:
110 |
111 | errors = executor.exec('echo %s >> %s/set_event' % (event, self.ftrace_instance))
112 |
113 | if 'Permission denied' in errors:
114 |
115 | sys.exit('ERROR: Can\'t set ftrace events, you may need root authority.')
116 |
117 | events = executor.exec('cat %s/set_event' % self.ftrace_instance)
118 |
119 | events = events.split()
120 |
121 | for event in self.events:
122 |
123 | if event not in events:
124 |
125 | sys.exit('ERROR: Invalid ftrace event: %s' % event)
126 |
127 | # Start tracing
128 |
129 | executor.exec('rm -rf %s' % remote_file)
130 |
131 | executor.exec('echo 1 > %s/tracing_on' % self.ftrace_instance)
132 |
133 | executor.exec('timeout %s cat %s/trace_pipe > %s; sync' % (duration + 1, self.ftrace_instance, remote_file))
134 |
135 | self.__reset_ftrace(executor)
136 |
137 | # Get files back
138 |
139 | print('Fetching ftrace data file...')
140 |
141 | executor.pull(remote_file, local_file)
142 |
143 | print('Done. (%s)' % local_file)
144 |
145 | def _parse(self):
146 |
147 | raw_file = self.get_workspace() + 'ftrace.data'
148 |
149 | pkl_file = self.get_workspace() + 'ftrace.pkl'
150 |
151 | if not os.path.exists(raw_file):
152 |
153 | sys.exit('ERROR: Ftrace data file not found.')
154 |
155 | if not os.path.exists(pkl_file):
156 |
157 | self.df = self.__load_raw_file(raw_file)
158 |
159 | print('Saving ftrace data to pickle...')
160 |
161 | self.df.to_pickle(pkl_file)
162 |
163 | self.df.to_excel(pkl_file[:-4] + '.xlsx')
164 |
165 | else:
166 |
167 | print('Loading ftrace data from pickle...')
168 |
169 | self.df = self.__load_pkl_file(pkl_file)
170 |
171 | def __load_raw_file(self, file_path):
172 |
173 | results = {
174 | 'task': list(),
175 | 'pid': list(),
176 | 'cpu': list(),
177 | 'status': list(),
178 | 'timestamp': list(),
179 | 'function': list(),
180 | 'data': list(),
181 | }
182 |
183 | with open(file_path, 'r') as file:
184 |
185 | lines = file.readlines()
186 |
187 | total = len(lines)
188 |
189 | count = 0
190 |
191 | share = int(total / 100)
192 |
193 | for line in lines:
194 |
195 | if total > 100000 and count % share == 0:
196 | print('\rProcessing... (%d%%)' % (count / share), end='', flush=True)
197 |
198 | if not line:
199 | continue
200 |
201 | try:
202 |
203 | cpu_match = re.search('\[[0-9]{3}\]', line).span()
204 |
205 | part_a = line[:cpu_match[0]]
206 |
207 | part_b = line[cpu_match[1]:]
208 |
209 | args_a = part_a.rsplit('-', 1)
210 |
211 | args_b = part_b.split(None, 2)
212 |
213 | results['task'].append(args_a[0].strip())
214 |
215 | results['pid'].append(args_a[1].strip())
216 |
217 | results['cpu'].append(int(line[cpu_match[0]+1:cpu_match[1]-1]))
218 |
219 | results['status'].append(args_b[0])
220 |
221 | results['timestamp'].append(int(float(args_b[1][:-1]) * 1000000000))
222 |
223 | parts = args_b[2].split(':', 1)
224 |
225 | results['function'].append(parts[0])
226 |
227 | results['data'].append(parts[1].strip())
228 |
229 | except Exception as e:
230 |
231 | if 'LOST' in line and 'EVENTS' in line:
232 | print('WARNING: Ftrace data lost, try enlarge the ftrace buffer size.')
233 | else:
234 | sys.exit('ERROR: Parsing ftrace data line error:\n%s%s' % (line, e))
235 |
236 | count += 1
237 |
238 | print('\rOK. (%d rows parsed)' % total)
239 |
240 | return pandas.DataFrame(results)
241 |
242 | def __load_pkl_file(self, file_path):
243 |
244 | return pandas.read_pickle(file_path)
245 |
246 | def wait_trace_on(self):
247 |
248 | self.wait_num_mutex.acquire()
249 |
250 | first_waiter = self.wait_num == 0
251 |
252 | self.wait_num += 1
253 |
254 | self.wait_num_mutex.release()
255 |
256 | executor = Adb_executor()
257 |
258 | executor.connect()
259 |
260 | for cnt in range(0, 30):
261 |
262 | trace_on = int(executor.exec('cat %s/tracing_on' % self.ftrace_instance))
263 |
264 | if trace_on:
265 |
266 | time.sleep(0.1)
267 |
268 | if first_waiter and cnt > 0:
269 | print('Trace start...')
270 |
271 | return True
272 |
273 | if first_waiter and cnt == 0:
274 | print('Waiting for ftrace on...')
275 |
276 | time.sleep(0.1)
277 |
278 | return False
279 |
280 | def get_data(self, task = None, pid = None, cpu = None, function = None, start_time = None, end_time = None):
281 |
282 | df = self.df.copy()
283 |
284 | if task is not None:
285 |
286 | df = df[df.task == task]
287 |
288 | if pid is not None:
289 |
290 | df = df[df.pid == pid]
291 |
292 | if cpu is not None:
293 |
294 | df = df[df.cpu == cpu]
295 |
296 | if function is not None:
297 |
298 | df = df[df.function == function]
299 |
300 | if start_time is not None:
301 |
302 | df = df[df.timestamp >= start_time]
303 |
304 | if end_time is not None:
305 |
306 | df = df[df.timestamp <= end_time]
307 |
308 | return df
309 |
--------------------------------------------------------------------------------
/framework/sources/instruments.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import pandas
4 | import platform
5 | import xml.etree.ElementTree as xml
6 |
7 | from framework.source import Source
8 | from framework.helpers import get_unique_list, get_runtime_path
9 |
10 | #FIXME: auto select device
11 |
12 | class Instruments(Source):
13 |
14 | def __init__(self):
15 |
16 | super().__init__()
17 |
18 | self.data = {
19 | 'trace_info': dict(),
20 | 'cpu_state': None
21 | }
22 |
23 | def _name(self):
24 |
25 | return 'instruments'
26 |
27 | def _enable(self, item):
28 |
29 | pass
30 |
31 | def _trace(self, duration):
32 |
33 | if platform.system() != 'Darwin':
34 |
35 | sys.exit('ERROR: xcrun is only supported on MacOS.')
36 |
37 | trace_path = self.get_workspace() + 'instruments.trace'
38 |
39 | os.system('xcrun xctrace record --device "" --template "%s/libs/instruments/tracecat.tracetemplate" --time-limit %ds --all-processes --append-run --output %s' % (get_runtime_path(), duration, trace_path))
40 |
41 | if not os.path.exists(trace_path):
42 |
43 | sys.exit('ERROR: Create trace file failed, please check error message.')
44 |
45 | print('Done. (%s)' % trace_path)
46 |
47 | def _parse(self):
48 |
49 | self.__parse_cpu_state()
50 |
51 | def __parse_cpu_state(self):
52 |
53 | # for tunning pandas performance
54 |
55 | results = {
56 | 'start_time': list(),
57 | 'duration': list(),
58 | 'end_time': list(),
59 | 'cpu_id': list(),
60 | 'cpu_state': list(),
61 | 'process_id': list(),
62 | 'process_name': list(),
63 | 'thread_id': list(),
64 | 'thread_name': list(),
65 | 'priority': list(),
66 | }
67 |
68 | # export xml file
69 |
70 | print('Exporting cpu_state...')
71 |
72 | trace_path = self.get_workspace() + 'instruments.trace'
73 |
74 | cpu_state_path = self.get_workspace() + 'cpu_state.xml'
75 |
76 | if not os.path.exists(cpu_state_path):
77 |
78 | os.system('xcrun xctrace export --input %s --xpath \'/trace-toc/run[@number="1"]/data/table[@schema="cpu-state"]\' --output %s' % (trace_path, cpu_state_path))
79 |
80 | if not os.path.exists(cpu_state_path):
81 |
82 | sys.exit('ERROR: Failed. Please check error message.')
83 |
84 | else:
85 |
86 | print('Already exported (cpu_state.xml), skip.')
87 |
88 | print('Success. (%s)' % cpu_state_path)
89 |
90 | # parse xml
91 |
92 | print('Parsing cpu_state...')
93 |
94 | tree = xml.parse(cpu_state_path)
95 |
96 | root = tree.getroot()
97 |
98 | refs = dict()
99 |
100 | for row in root.iter('row'): # loop for each row
101 |
102 | new = {
103 | 'start_time': None,
104 | 'duration': None,
105 | 'end_time': None,
106 | 'cpu_id': None,
107 | 'cpu_state': None,
108 | 'process_id': None,
109 | 'process_name': None,
110 | 'thread_id': None,
111 | 'thread_name': None,
112 | 'priority': None
113 | }
114 |
115 | for col in row: # loop for each col
116 |
117 | # get ref node
118 |
119 | node = None
120 |
121 | if 'id' in col.attrib:
122 |
123 | refs[col.attrib['id']] = col
124 |
125 | node = col
126 |
127 | elif 'ref' in col.attrib:
128 |
129 | node = refs[col.attrib['ref']]
130 |
131 | # get each data
132 |
133 | if col.tag == 'start-time':
134 |
135 | new['start_time'] = int(node.text)
136 |
137 | elif col.tag == 'duration':
138 |
139 | new['duration'] = int(node.text)
140 |
141 | elif col.tag == 'core':
142 |
143 | new['cpu_id'] = int(node.text)
144 |
145 | elif col.tag == 'core-state':
146 |
147 | new['cpu_state'] = node.text
148 |
149 | elif col.tag == 'process':
150 |
151 | new['process_id'] = node.find('pid').attrib['fmt']
152 |
153 | new['process_name'] = node.attrib['fmt']
154 |
155 | elif col.tag == 'thread':
156 |
157 | new['thread_id'] = node.find('tid').attrib['fmt']
158 |
159 | new['thread_name'] = node.attrib['fmt']
160 |
161 | elif col.tag == 'sched-priority':
162 |
163 | new['priority'] = int(node.text)
164 |
165 | new['end_time'] = new['start_time'] + new['duration']
166 |
167 | for key, val in new.items():
168 |
169 | results[key].append(val)
170 |
171 | # save results
172 |
173 | df = pandas.DataFrame(results)
174 |
175 | df.sort_values(by=['start_time'], inplace=True)
176 |
177 | df.reset_index(drop=True, inplace=True)
178 |
179 | self.data['cpu_state'] = df
180 |
181 | self.data['trace_info']['start_time'] = df.start_time.min()
182 |
183 | self.data['trace_info']['end_time'] = df.end_time.max()
184 |
185 | self.data['trace_info']['duration'] = self.data['trace_info']['end_time'] - self.data['trace_info']['start_time']
186 |
187 | self.data['trace_info']['cpu_list'] = sorted(df.cpu_id.unique().tolist())
188 |
189 | print('Done.')
190 |
191 | def get_cpu_state(self):
192 |
193 | return self.data['cpu_state'].copy()
194 |
195 | def get_process_list(self):
196 |
197 | return get_unique_list(self.data['cpu_state'], {'process_id': str, 'process_name': str})
198 |
199 | def get_trace_info(self):
200 |
201 | return self.data['trace_info'].copy()
--------------------------------------------------------------------------------
/framework/sources/perfetto.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import platform
4 |
5 | from perfetto.trace_processor import TraceProcessor
6 |
7 | from framework.source import Source
8 | from framework.helpers import get_runtime_path, take_ftrace_buffer
9 | from framework.executors.adb_executor import Adb_executor
10 | from framework.config import CONFIG
11 |
12 | class Perfetto(Source):
13 |
14 | def __init__(self):
15 |
16 | super().__init__()
17 |
18 | self.config = self.__init_config()
19 |
20 | self.processor = None
21 |
22 | self.enabled = set()
23 |
24 | def __del__(self):
25 |
26 | if self.processor:
27 |
28 | self.processor.close()
29 |
30 | def __get_path(self, file):
31 |
32 | paths = {
33 | 'loc_trace': self.get_workspace() + 'perfetto.trace',
34 | 'loc_config': self.get_workspace() + 'perfetto.config',
35 | 'dev_trace': '/data/misc/perfetto-traces/trace'
36 | }
37 |
38 | return paths[file]
39 |
40 | def __init_config(self):
41 |
42 | config = ''
43 |
44 | runtime_path = get_runtime_path()
45 |
46 | config_file = runtime_path + '/configs/perfetto/perfetto.conf'
47 |
48 | if not os.path.exists(config_file):
49 |
50 | sys.exit('ERROR: Perfetto config file is missing.')
51 |
52 | with open(config_file, 'r') as file:
53 |
54 | lines = file.readlines()
55 |
56 | for line in lines:
57 |
58 | if 'duration_ms:' not in line:
59 |
60 | config += line
61 |
62 | return config
63 |
64 | def __set_config(self, config):
65 |
66 | self.config += config
67 |
68 | def __get_config(self, duration):
69 |
70 | return self.config + 'duration_ms: %d' % (duration * 1000)
71 |
72 | def _name(self):
73 |
74 | return 'perfetto'
75 |
76 | def enable(self, item = None):
77 |
78 | take_ftrace_buffer('perfetto')
79 |
80 | super().enable(item)
81 |
82 | def _enable(self, item):
83 |
84 | if item in self.enabled:
85 |
86 | return
87 |
88 | self.enabled.add(item)
89 |
90 | if item == 'sched':
91 |
92 | self.__set_config('''
93 | data_sources: {
94 | config {
95 | name: "linux.process_stats"
96 | target_buffer: 1
97 | process_stats_config {
98 | scan_all_processes_on_start: true
99 | }
100 | }
101 | }
102 | data_sources: {
103 | config {
104 | name: "linux.ftrace"
105 | ftrace_config {
106 | ftrace_events: "sched/sched_switch"
107 | ftrace_events: "power/suspend_resume"
108 | ftrace_events: "sched/sched_wakeup"
109 | ftrace_events: "sched/sched_wakeup_new"
110 | ftrace_events: "sched/sched_waking"
111 | ftrace_events: "sched/sched_process_exit"
112 | ftrace_events: "sched/sched_process_free"
113 | ftrace_events: "task/task_newtask"
114 | ftrace_events: "task/task_rename"
115 | ftrace_events: "sched/sched_blocked_reason"
116 | buffer_size_kb: 2048
117 | drain_period_ms: 250
118 | }
119 | }
120 | }'''.strip() + '\n')
121 |
122 | elif item == 'cpu_freq':
123 |
124 | self.__set_config('''
125 | data_sources: {
126 | config {
127 | name: "linux.ftrace"
128 | ftrace_config {
129 | ftrace_events: "power/cpu_frequency"
130 | ftrace_events: "power/suspend_resume"
131 | buffer_size_kb: 2048
132 | drain_period_ms: 250
133 | }
134 | }
135 | }'''.strip() + '\n')
136 |
137 | elif item == 'cpu_idle':
138 |
139 | self.__set_config('''
140 | data_sources: {
141 | config {
142 | name: "linux.ftrace"
143 | ftrace_config {
144 | ftrace_events: "power/cpu_idle"
145 | ftrace_events: "power/suspend_resume"
146 | buffer_size_kb: 2048
147 | drain_period_ms: 250
148 | }
149 | }
150 | }'''.strip() + '\n')
151 |
152 | else:
153 | self.enabled.remove(item)
154 |
155 | def _trace(self, duration):
156 |
157 | executor = Adb_executor()
158 |
159 | if not executor.connect():
160 |
161 | sys.exit('ERROR: Adb device not found.')
162 |
163 | executor.exec('setprop persist.traced.enable 1') # enable trace service
164 |
165 | # save config file
166 |
167 | config = self.__get_config(duration)
168 |
169 | with open(self.__get_path('loc_config'), 'w') as file:
170 |
171 | file.write(config)
172 |
173 | # start tracing
174 |
175 | print('Start tracing...')
176 |
177 | ret_a = executor.exec('ls -l %s' % self.__get_path('dev_trace'))
178 |
179 | execute_su = 'su -c' if CONFIG['EXEC_WITH_SU'] else ''
180 |
181 | os.system('adb shell %s perfetto --txt -c - -o %s < %s' % (execute_su, self.__get_path('dev_trace'), self.__get_path('loc_config')))
182 |
183 | #FIXME: temporary use os.system, exec cant run well on OPPO reno4
184 | #executor.exec('perfetto --txt -c - -o %s <= 100000 and len(results) % 100 == 0:
233 |
234 | print('\rProcessing... (%d)' % len(results), end='', flush=True)
235 |
236 | results.append(obj.__dict__.copy())
237 |
238 | if len(results) >= 100000:
239 |
240 | print('\rOK. (%d) ' % len(results)) # Use space to overwrite exsit text
241 |
242 | return results
243 |
244 | def get_trace_info(self):
245 |
246 | trace_info = {}
247 |
248 | # Get trace time info
249 |
250 | query = 'select min(ts) as start_time, max(ts) as end_time from counter'
251 |
252 | df = self.processor.query(query).as_pandas_dataframe()
253 |
254 | # If counter is empty, try to get trace time from sched table
255 |
256 | if df.iloc[0]['start_time'] is None or df.iloc[0]['end_time'] is None:
257 |
258 | query = 'select min(ts) as start_time, max(ts_end) as end_time from sched'
259 |
260 | df = self.processor.query(query).as_pandas_dataframe()
261 |
262 | if df.iloc[0]['start_time'] is None or df.iloc[0]['end_time'] is None:
263 |
264 | sys.exit('ERROR: Failed to parse trace start / end time from perfetto trace.')
265 |
266 | trace_info['start_time'] = int(df.iloc[0]['start_time'])
267 | trace_info['end_time'] = int(df.iloc[0]['end_time'])
268 | trace_info['duration'] = trace_info['end_time'] - trace_info['start_time']
269 |
270 | # Get trace cpu info
271 |
272 | query = 'select distinct(cpu) as cpu from cpu_counter_track order by cpu asc'
273 |
274 | df = self.processor.query(query).as_pandas_dataframe()
275 |
276 | # If counter is empty, try to get trace time from sched table
277 |
278 | if not df['cpu'].tolist():
279 |
280 | query = 'select distinct(cpu) as cpu from sched order by cpu asc'
281 |
282 | df = self.processor.query(query).as_pandas_dataframe()
283 |
284 | if not df['cpu'].tolist():
285 |
286 | sys.exit('ERROR: Failed to parse cpu list from perfetto trace.')
287 |
288 | trace_info['cpu_list'] = df['cpu'].tolist()
289 |
290 | # Done
291 |
292 | return trace_info
--------------------------------------------------------------------------------
/framework/sources/procfs.py:
--------------------------------------------------------------------------------
1 | from framework.sources.basefs import Basefs
2 |
3 | class Procfs(Basefs):
4 |
5 | def __init__(self):
6 |
7 | super().__init__()
8 |
9 | def name(self):
10 |
11 | return 'procfs'
12 |
13 | def metrics(self):
14 |
15 | metrics = {
16 |
17 | 'stat': {
18 | 'type': 'DATA',
19 | 'profiles': [
20 | {'nodes': '/proc/stat', 'parser': self.__parse_stat},
21 | ]
22 | },
23 |
24 | }
25 |
26 | return metrics
27 |
28 | def __parse_stat(self, output):
29 |
30 | stat = {
31 | 'cpu': dict()
32 | }
33 |
34 | lines = output.split('\n')
35 |
36 | for line in lines:
37 |
38 | if line[:3] == 'cpu' and line[3] != ' ':
39 |
40 | args = line.split()
41 |
42 | cpu_id = args[0].replace('cpu', '')
43 |
44 | stat['cpu'][cpu_id] = {
45 | 'user': int(args[1]),
46 | 'nice': int(args[2]),
47 | 'system': int(args[3]),
48 | 'idle': int(args[4]),
49 | 'iowait': int(args[5]),
50 | 'irq': int(args[6]),
51 | 'softirq': int(args[7]),
52 | 'steal': int(args[8]),
53 | 'guest': int(args[9]),
54 | 'guest_nice': int(args[10])
55 | }
56 |
57 | return stat
--------------------------------------------------------------------------------
/framework/sources/profiler.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import time
4 | import click
5 | import json
6 | import pandas
7 |
8 | from framework.source import Source
9 | from framework.executors.adb_executor import Adb_executor
10 |
11 | class Profiler(Source):
12 |
13 | # Register new metrics here.
14 |
15 | metrics = {
16 | # CPU
17 | 'cpu_branch_miss': {'matcher': '^CPU ([0-9]*) Branch Misses$', 'indexer': True},
18 | 'cpu_cache_miss': {'matcher': '^CPU ([0-9]*) Cache Misses$', 'indexer': True},
19 | 'cpu_cache_miss_r': {'matcher': '^CPU ([0-9]*) Cache Miss Ratio$', 'indexer': True},
20 | 'cpu_cache_refs': {'matcher': '^CPU ([0-9]*) Cache Refs$', 'indexer': True},
21 | 'cpu_clock': {'matcher': '^CPU ([0-9]*) Clock$', 'indexer': True},
22 | 'cpu_cs': {'matcher': '^CPU ([0-9]*) Context Switches$', 'indexer': True},
23 | 'cpu_freq': {'matcher': '^CPU ([0-9]*) Frequency$', 'indexer': True},
24 | 'cpu_load': {'matcher': '^CPU ([0-9]*) Load$', 'indexer': True},
25 | 'cpu_util': {'matcher': '^CPU ([0-9]*) % Utilization$', 'indexer': True},
26 | 'cpu_cycles': {'matcher': '^CPU ([0-9]*) Cycles$', 'indexer': True},
27 | 'cpu_inst': {'matcher': '^CPU ([0-9]*) Instructions$', 'indexer': True},
28 | 'cpu_page_faults': {'matcher': '^CPU ([0-9]*) Page Faults$', 'indexer': True},
29 | # GPU
30 | 'gpu_clock_per_sec': {'matcher': '^Clocks / Second$'},
31 | 'gpu_bus_busy': {'matcher': '^GPU % Bus Busy$'},
32 | 'gpu_util': {'matcher': '^GPU % Utilization$'},
33 | 'gpu_freq': {'matcher': '^GPU Frequency$'},
34 | 'gpu_temp': {'matcher': '^GPU Temperature$'},
35 | 'gpu_bytes_per_frag': {'matcher': '^Avg Bytes / Fragment$'},
36 | 'gpu_bytes_per_vert': {'matcher': '^Avg Bytes / Vertex$'},
37 | 'gpu_total_read': {'matcher': '^Read Total (Bytes/sec)$'},
38 | 'gpu_sp_mem_read': {'matcher': '^SP Memory Read (Bytes/Second)$'},
39 | 'gpu_texture_mem_read': {'matcher': '^Texture Memory Read BW (Bytes/Second)$'},
40 | 'gpu_vertex_mem_read': {'matcher': '^Vertex Memory Read (Bytes/Second)$'},
41 | 'gpu_total_write': {'matcher': '^Write Total (Bytes/sec)$'},
42 | }
43 |
44 | def __init__(self):
45 |
46 | super().__init__()
47 |
48 | self.trace_info = {
49 | 'boot_time': None,
50 | 'start_time': None,
51 | 'end_time': None
52 | }
53 |
54 | self.data = None
55 |
56 | self.info = None
57 |
58 | def init_workspace(self, workspace):
59 |
60 | super().init_workspace(workspace)
61 |
62 | self.data_file = self.get_workspace() + 'profiler.csv'
63 |
64 | self.info_file = self.get_workspace() + 'trace_info.json'
65 |
66 | def __get_config(self, duration):
67 |
68 | return None
69 |
70 | def _name(self):
71 |
72 | return 'profiler'
73 |
74 | def _enable(self, item):
75 |
76 | pass
77 |
78 | def _trace(self, duration):
79 |
80 | time.sleep(duration)
81 |
82 | def _pre_trace(self, duration):
83 |
84 | # Run snapdragon profiler mannully.
85 |
86 | print('Please open snapdragon profiler and start realtime tracing...')
87 |
88 | confirm = click.confirm('Continue: Is the tracing started?', default=True)
89 |
90 | if not confirm:
91 |
92 | sys.exit('ERROR: Profiler data source canceled.')
93 |
94 | # Get start time.
95 |
96 | executor = Adb_executor()
97 |
98 | if not executor.connect():
99 |
100 | sys.exit('ERROR: Adb device not found.')
101 |
102 | results = executor.exec('cat /proc/uptime | awk \'{print $1}\'; date +%s.%N')
103 |
104 | data = results.split()
105 |
106 | self.trace_info['start_time'] = int(float(data[1]) * 1000000) # us
107 |
108 | self.trace_info['boot_time'] = self.trace_info['start_time'] - int(float(data[0]) * 1000000) # us
109 |
110 | self.trace_info['end_time'] = self.trace_info['start_time'] + duration * 1000000
111 |
112 | def _post_trace(self, duration):
113 |
114 | # Get end time.
115 |
116 | executor = Adb_executor()
117 |
118 | if not executor.connect():
119 |
120 | sys.exit('ERROR: Adb device not found.')
121 |
122 | # Stop snapdragon profiler mannully.
123 |
124 | confirm = True
125 |
126 | while (confirm):
127 |
128 | print('Please stop snapdragon profiler and export the data file to the following path:')
129 |
130 | print(os.path.abspath(self.data_file))
131 |
132 | confirm = click.confirm('Continue: Is the file exported?', default=True)
133 |
134 | if confirm:
135 |
136 | # Check data file is there.
137 |
138 | if os.path.exists(self.data_file):
139 |
140 | # Create info file.
141 |
142 | with open(self.info_file, 'w') as file:
143 |
144 | json.dump(self.trace_info, file)
145 |
146 | break
147 |
148 | else:
149 | print('WARNING: File not found, please check export path.')
150 |
151 | else:
152 | print('WARNING: Profiler data source canceled.')
153 |
154 | def _parse(self):
155 |
156 | # Load data & info file.
157 |
158 | self.data = pandas.read_csv(self.data_file)
159 |
160 | with open(self.info_file, 'r') as file:
161 |
162 | self.info = json.load(file)
163 |
164 | # Only keep the data between start_time and end_time
165 |
166 | self.data.drop(columns=['Timestamp'], inplace=True)
167 |
168 | self.data.rename(columns = {'Process': 'process', 'Metric': 'metric', 'TimestampRaw': 'timestamp', 'Value': 'value'}, inplace=True)
169 |
170 | self.data = self.data[(self.data.timestamp >= self.info['start_time']) & (self.data.timestamp <= self.info['end_time'])]
171 |
172 | self.data.timestamp = (self.data.timestamp - self.info['boot_time']) * 1000
173 |
174 | def get_raw_metric(self, raw_metric):
175 |
176 | return self.data[self.data.metric==raw_metric].to_dict(orient='records')
177 |
178 | def get_metrics(self, metrics_name, raise_exception = True):
179 |
180 | # Check metrics is valid.
181 |
182 | if self.data is None:
183 |
184 | raise Exception('Metrics data used before loaded.')
185 |
186 | if metrics_name not in self.metrics:
187 |
188 | if raise_exception:
189 |
190 | raise Exception('Metrics not supported: %s' % metrics_name)
191 |
192 | return None
193 |
194 | # Generate results
195 |
196 | matcher = self.metrics[metrics_name]['matcher']
197 |
198 | dataset = self.data[self.data.metric.str.match(matcher)]
199 |
200 | results = pandas.DataFrame()
201 |
202 | if 'indexer' in self.metrics[metrics_name]:
203 |
204 | indexer = self.metrics[metrics_name]['indexer']
205 |
206 | if indexer is True:
207 |
208 | indexer = matcher
209 |
210 | if indexer is not None:
211 |
212 | results['index'] = dataset.metric.str.extract(indexer, expand = False)
213 |
214 | results['timestamp'] = dataset.timestamp
215 |
216 | results[metrics_name] = dataset.value
217 |
218 | results.reset_index(drop=True, inplace=True)
219 |
220 | return results
221 |
--------------------------------------------------------------------------------
/framework/sources/simpleperf.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import time
4 | import pandas
5 |
6 | from framework.source import Source
7 | from framework.executors.adb_executor import Adb_executor
8 | from framework.config import CONFIG
9 |
10 | class Simpleperf(Source):
11 |
12 | def __init__(self):
13 |
14 | super().__init__()
15 |
16 | self.events = set()
17 |
18 | self.period = 500 # use 500ms as default interval
19 |
20 | self.app = None
21 |
22 | self.df = None
23 |
24 | def invoke_sources(self):
25 |
26 | self.ftrace = self.invoke_source('ftrace', 'sched:sched_process_exec')
27 |
28 | def _name(self):
29 |
30 | return 'simpleperf'
31 |
32 | def _enable(self, item):
33 |
34 | self.events.add(item)
35 |
36 | def _trace(self, duration):
37 |
38 | executor = Adb_executor()
39 |
40 | if not executor.connect():
41 |
42 | sys.exit('ERROR: Adb device not found.')
43 |
44 | local_file = self.get_workspace() + 'simpleperf.data'
45 |
46 | remote_file = CONFIG['REMOTE_ROOT'] + '/simpleperf/simpleperf.data'
47 |
48 | print('Start simpleperf...')
49 |
50 | # Wait for tracing on
51 |
52 | ret = self.ftrace.wait_trace_on()
53 |
54 | if not ret:
55 |
56 | sys.exit('ERROR: Wait for ftrace on timeout.')
57 |
58 | # Start simpleperf
59 |
60 | executor.exec('mkdir ' + os.path.dirname(remote_file))
61 |
62 | if self.app: # App Mode
63 | # NOTE: remove --use-devfreq-counters for compatibility
64 | errors = executor.exec('simpleperf stat --app %s -e %s --interval %d --duration %d -o %s' % (self.app, ','.join(self.events), self.period, duration, remote_file))
65 | else: # Global Mode
66 | # NOTE: remove --use-devfreq-counters for compatibility
67 | errors = executor.exec('simpleperf stat -a -e %s --per-core --interval %d --duration %d -o %s' % (','.join(self.events), self.period, duration, remote_file))
68 |
69 | if errors:
70 |
71 | if 'multiplexing' in errors:
72 | print('WARNING: Simpleperf: Some events only run a subset of enabled time, using hardware counter multiplexing.')
73 | else:
74 | sys.exit('ERROR: Simpleperf error: ' + errors.strip())
75 |
76 | # Fetch perf data back
77 |
78 | print('Fetching simpleperf data file...')
79 |
80 | executor.pull(remote_file, local_file)
81 |
82 | print('Done. (%s)' % local_file)
83 |
84 | def _parse(self):
85 |
86 | raw_file = self.get_workspace() + 'simpleperf.data'
87 |
88 | pkl_file = self.get_workspace() + 'simpleperf.pkl'
89 |
90 | if not os.path.exists(pkl_file):
91 |
92 | self.df = self.__load_raw_file(raw_file)
93 |
94 | print('Saving simpleperf data to pickle...')
95 |
96 | self.df.to_pickle(pkl_file)
97 |
98 | self.df.to_excel(pkl_file[:-4] + '.xlsx')
99 |
100 | else:
101 | print('Loading simpleperf data from pickle...')
102 |
103 | self.df = self.__load_pkl_file(pkl_file)
104 |
105 | def __get_start_timestamp(self):
106 |
107 | df = self.ftrace.get_data(task='simpleperf', function='sched_process_exec')
108 |
109 | if df.empty:
110 |
111 | # Fallback to search all task
112 | df = self.ftrace.get_data(function='sched_process_exec')
113 |
114 | df = df[df['data'].str.startswith('filename=/system/bin/simpleperf')]
115 |
116 | if len(df) != 1:
117 |
118 | sys.exit('ERROR: Can\'t get simpleperf timestamp data.')
119 |
120 | ts = df.iloc[0].timestamp
121 |
122 | ts_fix = 27 * 1000000 # adjust real start time for 27ms
123 |
124 | return ts + ts_fix
125 |
126 | def __load_raw_file(self, file_path):
127 |
128 | results = {
129 | 'timestamp': list(),
130 | 'cpu': list(),
131 | 'event': list(),
132 | 'count': list(),
133 | 'count_normalize': list(),
134 | 'runtime_percent': list(),
135 | 'remark': list(),
136 | }
137 |
138 | start_time = self.__get_start_timestamp()
139 |
140 | prev_count = dict()
141 |
142 | with open(file_path, 'r') as file:
143 |
144 | lines = file.readlines()
145 |
146 | for line in lines:
147 |
148 | if not line.strip():
149 |
150 | continue
151 |
152 | if line.lstrip()[0] == '#': # Confirm data format by parsing the headline
153 |
154 | columns = line.split()
155 |
156 | per_cpu = True if columns[1] == 'cpu' else False # If we have per cpu data (app mode or global mode)
157 |
158 | elif line.lstrip()[0].isnumeric():
159 |
160 | if per_cpu:
161 |
162 | l_parts = line.split(None, 4)
163 |
164 | else:
165 |
166 | l_parts = line.split(None, 3)
167 |
168 | l_parts.insert(0, None)
169 |
170 | cpu = l_parts[0]
171 |
172 | count = int(l_parts[1].replace(',', ''))
173 |
174 | event = l_parts[2]
175 |
176 | r_parts = l_parts[4].rsplit(None, 1)
177 |
178 | if len(r_parts) == 1: # count/runtime data may not exsist
179 |
180 | r_parts.insert(0, None)
181 |
182 | remark = r_parts[0]
183 |
184 | runtime_percent = float(r_parts[1][1:-2]) / 100
185 |
186 | results['cpu'].append(cpu)
187 |
188 | results['event'].append(event)
189 |
190 | results['remark'].append(remark)
191 |
192 | results['runtime_percent'].append(runtime_percent)
193 |
194 | if event not in prev_count:
195 |
196 | prev_count[event] = dict()
197 |
198 | if cpu not in prev_count[event]:
199 |
200 | prev_count[event][cpu] = 0
201 |
202 | count_delta = count - prev_count[event][cpu]
203 |
204 | results['count'].append(count_delta)
205 |
206 | count_normalize = int(count_delta / runtime_percent)
207 |
208 | results['count_normalize'].append(count_normalize)
209 |
210 | prev_count[event][cpu] = count
211 |
212 | elif line[:16] == 'Total test time:':
213 |
214 | cells = len(results['event']) - len(results['timestamp'])
215 |
216 | timestamp = int(float(line[17:].split()[0]) * 1000000000) + start_time
217 |
218 | results['timestamp'].extend([timestamp] * cells)
219 |
220 | df = pandas.DataFrame(results)
221 |
222 | df.sort_values(['timestamp', 'event', 'cpu'], inplace=True)
223 |
224 | df.reset_index(drop=True, inplace=True)
225 |
226 | return df
227 |
228 | def __load_pkl_file(self, file_path):
229 |
230 | return pandas.read_pickle(file_path)
231 |
232 | def set_period(self, period):
233 |
234 | if period:
235 |
236 | self.period = period
237 |
238 | def set_app(self, app):
239 |
240 | if app:
241 |
242 | self.app = app
243 |
244 | def get_data(self, event = None, cpu = None, start_time = None, end_time = None):
245 |
246 | df = self.df.copy()
247 |
248 | if type(event) is str:
249 |
250 | df = df[df.event == event]
251 |
252 | if type(event) is list:
253 |
254 | df = df[df.event.isin(event)]
255 |
256 | if cpu is not None:
257 |
258 | df = df[df.cpu == cpu]
259 |
260 | if start_time is not None:
261 |
262 | df = df[df.timestamp >= start_time]
263 |
264 | if end_time is not None:
265 |
266 | df = df[df.timestamp <= end_time]
267 |
268 | return df
269 |
--------------------------------------------------------------------------------
/framework/sources/sysfs.py:
--------------------------------------------------------------------------------
1 | from framework.sources.basefs import Basefs
2 |
3 | class Sysfs(Basefs):
4 |
5 | def __init__(self):
6 |
7 | super().__init__()
8 |
9 | def name(self):
10 |
11 | return 'sysfs'
12 |
13 | def metrics(self):
14 |
15 | metrics = {
16 |
17 | 'cpu_freq': {
18 | 'type': 'DATA',
19 | 'profiles': [
20 | {'nodes': '/sys/devices/system/cpu/cpu?/cpufreq/scaling_cur_freq', 'parser': lambda x, y: self.batch_nodes_parser(x, y, int)}, # FindX2, Mate40 (kHz)
21 | ]
22 | },
23 |
24 | 'cpu_freq_table': {
25 | 'type': 'INFO',
26 | 'profiles': [
27 | {'nodes': '/sys/devices/system/cpu/cpu?/cpufreq/scaling_available_frequencies', 'parser': lambda x, y: self.batch_nodes_parser(x, y, lambda z: [int(freq) for freq in z.split()])}, # FindX2, Mate40 (kHz)
28 | ]
29 | },
30 |
31 | 'gpu_freq': {
32 | 'type': 'DATA',
33 | 'profiles': [
34 | {'nodes': '/sys/kernel/gpu/gpu_clock', 'parser': lambda x : int(x) * 1000}, # FindX2 (kHz)
35 | {'nodes': '/sys/class/kgsl/kgsl-3d0/devfreq/cur_freq', 'parser': lambda x : int(int(x) / 1000)}, # FindX2 (kHz)
36 | {'nodes': '/sys/class/devfreq/gpufreq/cur_freq', 'parser': lambda x : int(int(x) / 1000)}, # Mate40 (kHz)
37 | {'nodes': '/sys/devices/platform/1c500000.mali/cur_freq', 'parser': lambda x : int(x)}, # Pixel6Pro (kHz)
38 | {'nodes': '/sys/kernel/ged/hal/current_freqency', 'parser': lambda x : int(x.split()[1])}, # M? (kHz) # Spelling mistake
39 | {'nodes': '/sys/class/devfreq/13000000.mali/cur_freq', 'parser': lambda x : int(int(x) / 1000)}, # M? (kHz) # Seems never change
40 | ]
41 | },
42 |
43 | 'gpu_freq_table': {
44 | 'type': 'INFO',
45 | 'profiles': [
46 | {'nodes': '/sys/kernel/gpu/gpu_freq_table', 'parser': lambda x : [int(freq) * 1000 for freq in x.split()]}, # FindX2 (kHz)
47 | {'nodes': '/sys/class/devfreq/gpufreq/available_frequencies', 'parser': lambda x : [int(int(freq) / 1000) for freq in x.split()]}, # Mate40 (kHz)
48 | {'nodes': '/sys/devices/platform/1c500000.mali/available_frequencies', 'parser': lambda x : [int(freq) for freq in x.split()]}, # Pixel6Pro (kHz)
49 | {'nodes': '/sys/class/devfreq/13000000.mali/available_frequencies', 'parser': lambda x : [int(int(freq) / 1000) for freq in x.split()]}, # M? (kHz)
50 | ]
51 | },
52 |
53 | 'ddr_freq': {
54 | 'type': 'DATA',
55 | 'profiles': [
56 | {'nodes': '/sys/devices/platform/1c00f000.dvfsrc/helio-dvfsrc/dvfsrc_dump', 'parser': self.__parse_helio_ddr_freq}, # M*9000 (kHz)
57 | {'nodes': '/sys/kernel/debug/clk/measure_only_mccc_clk/clk_measure', 'parser': lambda x : int(int(x) / 1000)}, # FindX2 (kHz)
58 | {'nodes': '/proc/clk/mc_cc_debug_mux/clk_measure', 'parser': lambda x : int(int(x) / 1000)}, # 8+ (kHz)
59 | {'nodes': '/sys/class/devfreq/ddrfreq/cur_freq', 'parser': lambda x : int(int(x) / 1000)}, # Mate40 (kHz)
60 | {'nodes': '/sys/class/devfreq/17000010.devfreq_mif/cur_freq', 'parser': lambda x : int(x)}, # Pixel6Pro (kHz)
61 | {'nodes': '/sys/class/devfreq/mtk-dvfsrc-devfreq/cur_freq', 'parser': lambda x : int(int(x) / 1000)}, # M? (kHz)
62 | {'nodes': '/sys/devices/system/cpu/bus_dcvs/DDR/cur_freq', 'parser': lambda x : int(x)}, # 8GEN1 (kHz)
63 |
64 |
65 | ]
66 | },
67 |
68 | 'ddr_freq_table': {
69 | 'type': 'INFO',
70 | 'profiles': [
71 | {'nodes': '/sys/class/devfreq/ddrfreq/available_frequencies', 'parser': lambda x : [int(int(freq) / 1000) for freq in x.split()]}, # Mate40 (kHz)
72 | {'nodes': '/sys/class/devfreq/17000010.devfreq_mif/available_frequencies', 'parser': lambda x : [int(freq) for freq in x.split()]}, # Pixel6Pro (kHz)
73 | {'nodes': '/sys/class/devfreq/mtk-dvfsrc-devfreq/available_frequencies', 'parser': lambda x : [int(int(freq) / 1000) for freq in x.split()]}, # M? (kHz)
74 | {'nodes': '/sys/devices/system/cpu/bus_dcvs/DDR/available_frequencies', 'parser': lambda x : [int(freq) for freq in x.split()]}, # 8GEN1 (kHz)
75 | ]
76 | },
77 |
78 | 'dsu_freq': {
79 | 'type': 'DATA',
80 | 'profiles': [
81 | {'nodes': '/sys/class/devfreq/18590000.qcom,devfreq-l3:qcom,cpu?-cpu-l3-lat/cur_freq', 'parser': lambda x, y: self.batch_nodes_parser(x, y, lambda z: int(int(z) / 1000))}, # FindX2 (kHz)
82 | ]
83 | },
84 |
85 | 'thermal_zone': {
86 | 'type': 'DATA',
87 | 'profiles': [
88 | {'nodes': '/sys/class/thermal/thermal_zone*/temp', 'parser': lambda x, y: self.batch_nodes_parser(x, y, lambda z: int(z) if z.lstrip('-').isdigit() else 0)}, # FindX2
89 | {'nodes': '/sys/devices/virtual/thermal/thermal_zone*/temp', 'parser': lambda x, y: self.batch_nodes_parser(x, y, lambda z: int(z) if z.lstrip('-').isdigit() else 0)}, # Mate40Pro
90 | ]
91 | },
92 | }
93 |
94 | return metrics
95 |
96 | def __parse_helio_ddr_freq(self, output):
97 |
98 | lines = output.split('\n')
99 |
100 | for line in lines:
101 |
102 | if line[:3] == 'DDR' and line[-3:] == 'khz':
103 |
104 | args = line.split()
105 |
106 | ddr_freq = int(args[2])
107 |
108 | return ddr_freq
109 |
--------------------------------------------------------------------------------
/libs/instruments/tracecat.tracetemplate:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kernel-cyrus/tracecat/c692f2ad91a014f22739e828eee0015957b5e4cd/libs/instruments/tracecat.tracetemplate
--------------------------------------------------------------------------------
/libs/perfetto/linux-amd64/trace_processor_shell:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kernel-cyrus/tracecat/c692f2ad91a014f22739e828eee0015957b5e4cd/libs/perfetto/linux-amd64/trace_processor_shell
--------------------------------------------------------------------------------
/libs/perfetto/mac-amd64/trace_processor_shell:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kernel-cyrus/tracecat/c692f2ad91a014f22739e828eee0015957b5e4cd/libs/perfetto/mac-amd64/trace_processor_shell
--------------------------------------------------------------------------------
/libs/perfetto/windows-amd64/trace_processor_shell.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kernel-cyrus/tracecat/c692f2ad91a014f22739e828eee0015957b5e4cd/libs/perfetto/windows-amd64/trace_processor_shell.exe
--------------------------------------------------------------------------------
/modules/app_load/app_load_module.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import pandas
3 |
4 | from framework.module import Module
5 | from framework.helpers import sub_slices, get_time, pick_next_window, get_unique_list
6 |
7 | class App_load_module(Module):
8 |
9 | def __init__(self):
10 |
11 | super().__init__()
12 |
13 | def get_name(self):
14 |
15 | return 'app_load'
16 |
17 | def get_desc(self):
18 |
19 | return 'Process cpu usage on each core. (Parse from trace)'
20 |
21 | def get_help(self):
22 |
23 | text = '''
24 | 某个进程的CPU占用率
25 |
26 | > tracecat "trace:app_load" # 抓取perfetto trace
27 | > tracecat "parse:app_load" # 解析app_load
28 | > tracecat "parse:app_load(100ms)" # 以100ms粒度解析app_load
29 | > tracecat "chart:app_load" # 显示所有process
30 | > tracecat "chart:app_load(1532)" # 显示所有pid为1532的进程各核占用率
31 | > tracecat "chart:app_load(pubg)" # 显示名字包含pubg的进程各核占用率
32 |
33 | * 不建议长时间抓取,因为生成的trace文件可能过大'''
34 |
35 | return text
36 |
37 | def invoke_sources(self):
38 |
39 | self.perfetto = self.invoke_source('perfetto', 'sched')
40 |
41 | def __get_process_list(self):
42 |
43 | results = self.perfetto.query('select upid, pid, name from process')
44 |
45 | return results
46 |
47 | def __get_sched_slices(self):
48 |
49 | return self.perfetto.query('select s.cpu cpu_id, s.ts start_time, s.ts_end end_time, s.dur duration, t.utid, t.tid, p.upid, p.pid from sched as s join thread as t on t.utid=s.utid join process as p on p.upid=t.upid where s.utid > 0 order by s.ts asc')
50 |
51 | def do_parse(self, params):
52 |
53 | results = list()
54 |
55 | window = None
56 |
57 | window_time = get_time(params[0], 'ms') * 1000000 if params else 1000 * 1000000
58 |
59 | trace_info = self.perfetto.get_trace_info()
60 |
61 | process_list = self.__get_process_list()
62 |
63 | sched_slices = self.__get_sched_slices()
64 |
65 | while window := pick_next_window(window, trace_info['start_time'], trace_info['end_time'], window_time, True):
66 |
67 | # init window records
68 |
69 | records = dict()
70 |
71 | for proc in process_list:
72 |
73 | records[proc['upid']] = {
74 | 'timestamp': window['end'],
75 | 'proc_id': proc['pid'],
76 | 'proc_name': proc['name']
77 | }
78 |
79 | for cpu_id in trace_info['cpu_list']:
80 |
81 | records[proc['upid']]['cpu_time_' + str(cpu_id)] = 0
82 | records[proc['upid']]['cpu_load_' + str(cpu_id)] = 0
83 |
84 | # get each process cpu load
85 |
86 | slices = sub_slices(sched_slices, window['start'], window['end'])
87 |
88 | for row in slices:
89 |
90 | records[row['upid']]['cpu_time_' + str(row['cpu_id'])] += row['duration']
91 |
92 | for record in records.values():
93 |
94 | for cpu_id in trace_info['cpu_list']:
95 |
96 | record['cpu_load_' + str(cpu_id)] = int((record['cpu_time_' + str(cpu_id)] / window_time) * 10000) / 100
97 |
98 | # append to results
99 | results += records.values()
100 |
101 | columns = ['timestamp', 'proc_id', 'proc_name'] + ['cpu_load_' + str(cpu_id) for cpu_id in trace_info['cpu_list']]
102 |
103 | return pandas.DataFrame(results, columns = columns)
104 |
105 | def __get_proc_list(self, df):
106 |
107 | proc_list = get_unique_list(df, {'proc_id': int, 'proc_name': str}, skip_none = True) # None means interrupt, skip it.
108 |
109 | return sorted(proc_list, key = lambda i: i['proc_id'])
110 |
111 | def __print_proc_list(self, proc_list):
112 |
113 | print('Process:')
114 |
115 | for proc in proc_list:
116 |
117 | print('\t' + str(proc['proc_id']) + '\t' + proc['proc_name'])
118 |
119 | def __search_proc(self, proc_list, proc_id = None, proc_name = None):
120 |
121 | if proc_id:
122 |
123 | return [proc for proc in proc_list if proc['proc_id'] == proc_id]
124 |
125 | elif proc_name:
126 |
127 | return [proc for proc in proc_list if proc_name in proc['proc_name']]
128 |
129 | else:
130 | return None
131 |
132 | def do_chart(self, params, df):
133 |
134 | # find proc_id
135 |
136 | proc_list = self.__get_proc_list(df)
137 |
138 | if not params:
139 |
140 | self.__print_proc_list(proc_list)
141 |
142 | sys.exit('Please input process id or process name filter.')
143 |
144 | param = params[0]
145 |
146 | if param.isdigit():
147 |
148 | proc_id = int(param)
149 |
150 | results = self.__search_proc(proc_list, proc_id = proc_id)
151 |
152 | if not results:
153 |
154 | sys.exit('ERROR: Process not found.')
155 |
156 | else:
157 |
158 | results = self.__search_proc(proc_list, proc_name = param)
159 |
160 | if not results:
161 |
162 | sys.exit('ERROR: Process not found.')
163 |
164 | if len(results) > 1:
165 |
166 | self.__print_proc_list(results)
167 |
168 | sys.exit('Found multiple result, please specify process id or an unique process name.')
169 |
170 | proc_id = results[0]['proc_id']
171 |
172 | # plot chart
173 |
174 | self.plotter.plot(df[df.proc_id == proc_id], 'app cpu load', x='timestamp', y=[col for col in df.columns if 'cpu_load_' in col], kind='line', marker='.')
--------------------------------------------------------------------------------
/modules/cpu_freq/cpu_freq_module.py:
--------------------------------------------------------------------------------
1 | import pandas
2 |
3 | from framework.module import Module
4 |
5 | class Cpu_freq_module(Module):
6 |
7 | def __init__(self):
8 |
9 | super().__init__()
10 |
11 | def get_name(self):
12 |
13 | return 'cpu_freq'
14 |
15 | def get_desc(self):
16 |
17 | return 'CPU frequency for each core. (Parse from trace)'
18 |
19 | def get_help(self):
20 |
21 | text = '''
22 | 从perfetto的trace中解析CPU频率
23 |
24 | > tracecat "trace:cpu_freq" # 抓取perfetto trace
25 | > tracecat "parse:cpu_freq" # 解析cpu_freq
26 | > tracecat "chart:cpu_freq" # 显示所有cpu的频率曲线
27 | > tracecat "chart:cpu_freq(0)" # 只显示cpu 0的频率曲线
28 | > tracecat "chart:cpu_freq(0,4,7)" # 显示cpu0,4,7的频率曲线(cluster)'''
29 |
30 | return text
31 |
32 | def invoke_sources(self):
33 |
34 | self.perfetto = self.invoke_source('perfetto', 'cpu_freq')
35 |
36 | def __get_freq_points(self):
37 |
38 | return self.perfetto.query('select t.cpu cpu_id, c.ts timestamp, c.value as cpu_freq from counter as c left join cpu_counter_track as t on c.track_id = t.id where t.name = "cpufreq" order by t.cpu asc, c.ts asc')
39 |
40 | def do_parse(self, params):
41 |
42 | return pandas.DataFrame(self.__get_freq_points(), columns = ['cpu_id', 'timestamp', 'cpu_freq'])
43 |
44 | def do_chart(self, params, df):
45 |
46 | self.plotter.plot_index_chart(params, df, 'cpu freq', index='cpu_id', x='timestamp', y='cpu_freq', kind='line', drawstyle='steps-post', marker='.')
--------------------------------------------------------------------------------
/modules/cpu_freq2/cpu_freq2_module.py:
--------------------------------------------------------------------------------
1 | import pandas
2 |
3 | from framework.module import Module
4 | from framework.helpers import create_seq_list, create_seq_dict, get_time
5 |
6 | class Cpu_freq2_module(Module):
7 |
8 | def __init__(self):
9 |
10 | super().__init__()
11 |
12 | def get_name(self):
13 |
14 | return 'cpu_freq2'
15 |
16 | def get_desc(self):
17 |
18 | return 'CPU frequency for each core. (Sample from sysfs)'
19 |
20 | def get_help(self):
21 |
22 | text = '''
23 | 从sysfs采样CPU频率
24 |
25 | > tracecat "trace:cpu_freq2" # 以500ms粒度采样(默认)
26 | > tracecat "trace:cpu_freq2(100ms)" # 以100ms粒度采样(模块设置)
27 | > tracecat "trace:cpu_freq2" -s 100ms # 以100ms粒度采样(全局设置)
28 | > tracecat "parse:cpu_freq2" # 解析
29 | > tracecat "chart:cpu_freq2" # 显示所有cpu的频率曲线
30 | > tracecat "chart:cpu_freq2(0)" # 只显示cpu 0的频率曲线
31 | > tracecat "chart:cpu_freq2(0,4,7)" # 显示cpu0,4,7的频率曲线(cluster)'''
32 |
33 | return text
34 |
35 | def invoke_sources(self):
36 |
37 | self.sysfs = self.invoke_source('sysfs', 'cpu_freq')
38 |
39 | def do_trace(self, params):
40 |
41 | period = get_time(params[0], 'ms') if params else None
42 |
43 | self.sysfs.config('cpu_freq', {'period': period})
44 |
45 | def do_parse(self, params):
46 |
47 | cpu_freqs = self.sysfs.get_metrics('cpu_freq')
48 |
49 | cpu_ids = sorted(cpu_freqs[0]['data'].keys()) if cpu_freqs else []
50 |
51 | columns = create_seq_list('timestamp', 'cpu_freq_', cpu_ids)
52 |
53 | results = create_seq_dict('timestamp', 'cpu_freq_', cpu_ids, list)
54 |
55 | for row in cpu_freqs:
56 |
57 | results['timestamp'].append(row['time'])
58 |
59 | for cpu_id, cpu_freq in row['data'].items():
60 |
61 | column = 'cpu_freq_' + str(cpu_id)
62 |
63 | results[column].append(cpu_freq)
64 |
65 | return pandas.DataFrame(results, columns = columns)
66 |
67 | def do_chart(self, params, df):
68 |
69 | self.plotter.plot_paral_chart(params, df, 'cpu freq', x='timestamp', y_prefixer='cpu_freq_', kind='line', drawstyle='steps-post', marker='.')
70 |
--------------------------------------------------------------------------------
/modules/cpu_freq_stat/cpu_freq_stat_module.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import pandas
3 |
4 | from framework.module import Module
5 | from framework.helpers import create_duration_column
6 |
7 | class Cpu_freq_stat_module(Module):
8 |
9 | def __init__(self):
10 |
11 | super().__init__()
12 |
13 | def get_name(self):
14 |
15 | return 'cpu_freq_stat'
16 |
17 | def get_desc(self):
18 |
19 | return 'CPU frequency / idle state statistics. (Based on cpu_freq / cpu_idle module result.)'
20 |
21 | def get_help(self):
22 |
23 | text = '''
24 | 统计cpu各频点及C-STATE运行时间占比(基于cpu_freq, cpu_idle)
25 |
26 | > tracecat "trace:cpu_freq,cpu_idle,cpu_freq_stat" # 抓取
27 | > tracecat "parse:cpu_freq,cpu_idle,cpu_freq_stat" # 解析
28 | > tracecat "chart:cpu_freq_stat" # 生成柱状图
29 |
30 | * 如果未抓取cpu_idle,则只解析频点的时间占比,不包含C-STATE信息'''
31 |
32 | return text
33 |
34 | def invoke_sources(self):
35 |
36 | self.sysfs = self.invoke_source('sysfs', 'cpu_freq_table')
37 |
38 | def invoke_results(self):
39 |
40 | self.cpu_freq = self.invoke_result('cpu_freq')
41 |
42 | self.cpu_idle = self.invoke_result('cpu_idle', return_when_fail = True)
43 |
44 | def __get_trace_time(self):
45 |
46 | return self.cpu_freq.timestamp.min(), self.cpu_freq.timestamp.max()
47 |
48 | def __get_cpu_list(self):
49 |
50 | return sorted(self.cpu_freq.cpu_id.unique())
51 |
52 | def __get_cpu_freq_subset(self):
53 |
54 | results = self.cpu_freq.copy()
55 |
56 | results['cpu_idle'] = None
57 |
58 | return results[['cpu_id', 'timestamp', 'cpu_freq', 'cpu_idle']]
59 |
60 | def __get_cpu_idle_subset(self):
61 |
62 | results = self.cpu_idle.copy()
63 |
64 | results['cpu_freq'] = None
65 |
66 | return results[['cpu_id', 'timestamp', 'cpu_freq', 'cpu_idle']]
67 |
68 | def do_parse(self, params):
69 |
70 | # Combine two dataset
71 |
72 | dataset = self.__get_cpu_freq_subset()
73 |
74 | if self.cpu_idle is not None:
75 |
76 | dataset = pandas.concat([dataset, self.__get_cpu_idle_subset()])
77 |
78 | dataset.sort_values(by=['cpu_id', 'timestamp'], inplace=True)
79 |
80 | dataset.fillna(method='ffill', inplace=True)
81 |
82 | dataset.dropna(inplace=True)
83 |
84 | dataset.reset_index(inplace=True)
85 |
86 | # Create duration column
87 |
88 | start_time, end_time = self.__get_trace_time()
89 |
90 | duration = end_time - start_time
91 |
92 | results = pandas.DataFrame()
93 |
94 | for cpu_id in self.__get_cpu_list():
95 |
96 | cpu_dataset = dataset[dataset.cpu_id == cpu_id].copy()
97 |
98 | results = results.append(create_duration_column(cpu_dataset, end_time=end_time))
99 |
100 | results = results[results['duration'] >= 0]
101 |
102 | results = results[['cpu_id', 'cpu_freq', 'cpu_idle', 'duration']]
103 |
104 | # Create statistics
105 |
106 | cpu_freq_table = self.sysfs.get_metrics('cpu_freq_table', None)
107 |
108 | stats = results.groupby(['cpu_id', 'cpu_freq', 'cpu_idle'], dropna=False).sum().reset_index()
109 |
110 | # Add missing freq points if we have freq table
111 |
112 | if cpu_freq_table:
113 |
114 | for cpu_id in self.__get_cpu_list():
115 |
116 | for freq in cpu_freq_table[str(cpu_id)]:
117 |
118 | if stats[(stats.cpu_id == cpu_id) & (stats.cpu_freq == freq)].empty:
119 |
120 | stats = stats.append({'cpu_id': cpu_id, 'cpu_freq': freq, 'cpu_idle': 0, 'duration': 0}, ignore_index=True)
121 |
122 | stats = stats.sort_values(by=['cpu_id', 'cpu_freq', 'cpu_idle']).reset_index()
123 |
124 | # Create percent column
125 |
126 | stats['percent'] = stats['duration'] / duration
127 |
128 | return stats[['cpu_id', 'cpu_freq', 'cpu_idle', 'duration', 'percent']]
129 |
130 | def do_chart(self, params, df):
131 |
132 | if not params:
133 |
134 | sys.exit('ERROR: You need specify a cpu id.')
135 |
136 | if len(params) == 1:
137 |
138 | cpu_id = int(params[0])
139 |
140 | df = df[df.cpu_id == cpu_id]
141 |
142 | if df.empty:
143 |
144 | sys.exit('ERROR: cpu id not found.')
145 |
146 | pivot_df = df.fillna('')
147 |
148 | pivot_df = pandas.pivot_table(data=pivot_df, index=['cpu_freq'], columns=['cpu_idle'], values=['percent'])
149 |
150 | pivot_df.columns.set_names(['percent', 'cpu_idle'], inplace=True)
151 |
152 | self.plotter.plot(pivot_df, 'cpu%s' % cpu_id, kind='bar', stacked=True)
--------------------------------------------------------------------------------
/modules/cpu_freq_stat2/cpu_freq_stat2_module.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import pandas
3 |
4 | from framework.module import Module
5 | from framework.helpers import create_duration_column
6 |
7 | class Cpu_freq_stat2_module(Module):
8 |
9 | def __init__(self):
10 |
11 | super().__init__()
12 |
13 | def get_name(self):
14 |
15 | return 'cpu_freq_stat2'
16 |
17 | def get_desc(self):
18 |
19 | return 'CPU frequency statistics. (Based on cpu_freq2 module result.)'
20 |
21 | def get_help(self):
22 |
23 | text = '''
24 | 统计cpu各频点运行时间占比(基于cpu_freq2)
25 |
26 | > tracecat "trace:cpu_freq2,cpu_freq_stat2" # 抓取
27 | > tracecat "parse:cpu_freq2,cpu_freq_stat2" # 解析
28 | > tracecat "chart:cpu_freq_stat2" # 生成柱状图'''
29 |
30 | return text
31 |
32 | def invoke_sources(self):
33 |
34 | self.sysfs = self.invoke_source('sysfs', 'cpu_freq_table')
35 |
36 | def invoke_results(self):
37 |
38 | self.cpu_freq = self.invoke_result('cpu_freq2')
39 |
40 | def __get_cpu_list(self):
41 |
42 | cpu_list = list()
43 |
44 | for col in self.cpu_freq.columns:
45 |
46 | if 'cpu_freq_' in col:
47 |
48 | cpu_list.append(col.replace('cpu_freq_', ''))
49 |
50 | return sorted(cpu_list)
51 |
52 | def __get_cpu_freq(self, cpu_id):
53 |
54 | start_time, end_time = self.__get_trace_time()
55 |
56 | columns = dict()
57 |
58 | columns['cpu_freq_' + cpu_id] = 'cpu_freq'
59 |
60 | cpu_freq = self.cpu_freq.copy()
61 |
62 | cpu_freq = self.cpu_freq.rename(columns = columns)
63 |
64 | cpu_freq['timestamp'] = cpu_freq['timestamp']
65 |
66 | cpu_freq = create_duration_column(cpu_freq, end_time)
67 |
68 | return cpu_freq[['cpu_freq', 'duration']]
69 |
70 | def __get_trace_time(self):
71 |
72 | return self.cpu_freq.timestamp.min(), self.cpu_freq.timestamp.max()
73 |
74 | def do_parse(self, params):
75 |
76 | result = pandas.DataFrame()
77 |
78 | start_time, end_time = self.__get_trace_time()
79 |
80 | duration = end_time - start_time
81 |
82 | cpu_freq_table = self.sysfs.get_metrics('cpu_freq_table', None)
83 |
84 | for cpu_id in self.__get_cpu_list():
85 |
86 | cpu_freq = self.__get_cpu_freq(cpu_id)
87 |
88 | stats = cpu_freq.groupby(['cpu_freq']).sum().reset_index()
89 |
90 | # Add freq points
91 |
92 | if cpu_freq_table:
93 |
94 | for freq in cpu_freq_table[cpu_id]:
95 |
96 | if stats[stats.cpu_freq == freq].empty:
97 |
98 | stats = stats.append({'cpu_freq': freq, 'duration': 0}, ignore_index=True)
99 |
100 | # Add columns
101 |
102 | stats['cpu_id'] = cpu_id
103 |
104 | stats['percent'] = stats['duration'] / duration
105 |
106 | stats = stats.sort_values(by=['cpu_freq'])
107 |
108 | result = result.append(stats)
109 |
110 | result.reset_index(inplace=True)
111 |
112 | return result[['cpu_id', 'cpu_freq', 'percent']]
113 |
114 | def do_chart(self, params, df):
115 |
116 | if not params:
117 |
118 | sys.exit('ERROR: You need specify a cpu id.')
119 |
120 | if len(params) == 1:
121 |
122 | cpu_id = params[0]
123 |
124 | df = df[df.cpu_id == cpu_id]
125 |
126 | if df.empty:
127 |
128 | sys.exit('ERROR: cpu id not found.')
129 |
130 | self.plotter.plot(df, 'cpu%s' % cpu_id, x='cpu_freq', y='percent', kind='bar', color='orange')
131 |
--------------------------------------------------------------------------------
/modules/cpu_idle/cpu_idle_module.py:
--------------------------------------------------------------------------------
1 | import pandas
2 |
3 | from framework.module import Module
4 |
5 | class Cpu_idle_module(Module):
6 |
7 | def __init__(self):
8 |
9 | super().__init__()
10 |
11 | def get_name(self):
12 |
13 | return 'cpu_idle'
14 |
15 | def get_desc(self):
16 |
17 | return 'CPU idle state (C-STATE) for each core. (Parse from trace)'
18 |
19 | def get_help(self):
20 |
21 | text = '''
22 | 从perfetto的trace中解析CPU idle state (C-STATE)
23 |
24 | > tracecat "trace:cpu_idle" # 抓取perfetto trace
25 | > tracecat "parse:cpu_idle" # 解析cpu_idle
26 | > tracecat "chart:cpu_idle" # 显示所有cpu的idle state曲线
27 | > tracecat "chart:cpu_idle(0)" # 显示cpu 0的idle state曲线'''
28 |
29 | return text
30 |
31 | def invoke_sources(self):
32 |
33 | self.perfetto = self.invoke_source('perfetto', 'cpu_idle')
34 |
35 | def __get_idle_points(self):
36 |
37 | return self.perfetto.query('select t.cpu cpu_id, c.ts timestamp, c.value as cpu_idle from counter as c left join cpu_counter_track as t on c.track_id = t.id where t.name = "cpuidle" order by t.cpu asc, c.ts asc')
38 |
39 | def do_parse(self, params):
40 |
41 | return pandas.DataFrame(self.__get_idle_points(), columns = ['cpu_id', 'timestamp', 'cpu_idle'])
42 |
43 | def do_chart(self, params, df):
44 |
45 | self.plotter.plot_index_chart(params, df, 'cpu idle', index='cpu_id', x='timestamp', y='cpu_idle', kind='line', drawstyle='steps-post', marker='.')
--------------------------------------------------------------------------------
/modules/cpu_load/cpu_load_module.py:
--------------------------------------------------------------------------------
1 | import pandas
2 |
3 | from framework.module import Module
4 | from framework.helpers import get_slices, sub_slices, get_slices_usage, get_time, pick_next_window, create_seq_list, create_seq_dict
5 |
6 | class Cpu_load_module(Module):
7 |
8 | def __init__(self):
9 |
10 | super().__init__()
11 |
12 | def get_name(self):
13 |
14 | return 'cpu_load'
15 |
16 | def get_desc(self):
17 |
18 | return 'CPU load for each core. (Parse from trace)'
19 |
20 | def get_help(self):
21 |
22 | text = '''
23 | 从perfetto的trace中解析CPU占用率
24 |
25 | > tracecat "trace:cpu_load" # 抓取perfetto trace
26 | > tracecat "parse:cpu_load" # 以1s粒度解析占用率
27 | > tracecat "parse:cpu_load(100ms)" # 以100ms粒度解析占用率
28 | > tracecat "chart:cpu_load" # 显示各cpu占用率
29 | > tracecat "chart:cpu_load(0)" # 只显示cpu 0的占用率
30 | > tracecat "chart:cpu_load(0-4,5-6,7)" # 显示平均占用率
31 |
32 | * 不建议长时间抓取,因为生成的trace文件可能过大,长时间抓取请使用cpu_load2'''
33 |
34 | return text
35 |
36 | def invoke_sources(self):
37 |
38 | self.perfetto = self.invoke_source('perfetto', 'sched')
39 |
40 | def __get_sched_slices(self, cpu_id):
41 |
42 | results = self.perfetto.query('select ts start_time, ts_end end_time, dur duration from sched where cpu = %s and utid > 0 order by ts asc' % cpu_id)
43 |
44 | return get_slices(results, 'start_time', 'end_time', 'duration')
45 |
46 | def do_parse(self, params):
47 |
48 | window = None
49 |
50 | window_time = get_time(params[0], 'ms') * 1000000 if params else 1000 * 1000000
51 |
52 | trace_info = self.perfetto.get_trace_info()
53 |
54 | cpu_ids = trace_info['cpu_list']
55 |
56 | columns = create_seq_list('timestamp', 'cpu_load_', cpu_ids)
57 |
58 | results = create_seq_dict('timestamp', 'cpu_load_', cpu_ids, list)
59 |
60 | for cpu_id in cpu_ids:
61 |
62 | column = 'cpu_load_' + str(cpu_id)
63 |
64 | sched_slices = self.__get_sched_slices(cpu_id)
65 |
66 | while window := pick_next_window(window, trace_info['start_time'], trace_info['end_time'], window_time, True):
67 |
68 | window_slices = sub_slices(sched_slices, window['start'], window['end'])
69 |
70 | cpu_load = get_slices_usage(window_slices, window['start'], window['end'])
71 |
72 | results[column].append(int(cpu_load * 10000) / 100)
73 |
74 | if len(results['timestamp']) == window['id']: # only append in first loop
75 |
76 | results['timestamp'].append(window['end'])
77 |
78 | return pandas.DataFrame(results, columns = columns)
79 |
80 | def do_chart(self, params, df):
81 |
82 | self.plotter.plot_paral_chart(params, df, 'cpu load', x='timestamp', y_prefixer='cpu_load_', kind='line', marker='.')
83 |
--------------------------------------------------------------------------------
/modules/cpu_load2/cpu_load2_module.py:
--------------------------------------------------------------------------------
1 | import pandas
2 |
3 | from framework.module import Module
4 | from framework.helpers import create_seq_list, create_seq_dict, get_time
5 |
6 | class Cpu_load2_module(Module):
7 |
8 | def __init__(self):
9 |
10 | super().__init__()
11 |
12 | def get_name(self):
13 |
14 | return 'cpu_load2'
15 |
16 | def get_desc(self):
17 |
18 | return 'CPU load for each core. (Sample from procfs)'
19 |
20 | def get_help(self):
21 |
22 | text = '''
23 | 从procfs采样CPU占用率
24 |
25 | > tracecat "trace:cpu_load2" # 以500ms粒度采样(默认)
26 | > tracecat "trace:cpu_load2(100ms)" # 以100ms粒度采样(模块设置)
27 | > tracecat "trace:cpu_load2" -s 100ms # 以100ms粒度采样(全局设置)
28 | > tracecat "parse:cpu_load2" # 解析
29 | > tracecat "chart:cpu_load2" # 显示各cpu占用率
30 | > tracecat "chart:cpu_load2(0)" # 只显示cpu 0的占用率
31 | > tracecat "chart:cpu_load2(0-4,5-6,7)" # 显示平均占用率'''
32 |
33 | return text
34 |
35 | def invoke_sources(self):
36 |
37 | self.procfs = self.invoke_source('procfs', 'stat')
38 |
39 | def do_trace(self, params):
40 |
41 | period = get_time(params[0], 'ms') if params else None
42 |
43 | self.procfs.config('stat', {'period': period})
44 |
45 | def do_parse(self, params):
46 |
47 | stats = self.procfs.get_metrics('stat')
48 |
49 | cpu_ids = sorted(stats[0]['data']['cpu'].keys()) if stats else []
50 |
51 | columns = create_seq_list('timestamp', 'cpu_load_', cpu_ids)
52 |
53 | results = create_seq_dict('timestamp', 'cpu_load_', cpu_ids, list)
54 |
55 | prev_row = None
56 |
57 | for row in stats:
58 |
59 | if prev_row:
60 |
61 | record = dict()
62 |
63 | complete_recored = True
64 |
65 | for cpu_id, this_stat in row['data']['cpu'].items():
66 |
67 | column = 'cpu_load_' + str(cpu_id)
68 |
69 | prev_stat = prev_row['data']['cpu'][cpu_id]
70 |
71 | prev_time = sum(prev_stat.values())
72 |
73 | this_time = sum(this_stat.values())
74 |
75 | if this_time - prev_time == 0:
76 |
77 | complete_recored = False
78 |
79 | break
80 |
81 | cpu_load = ((this_time - this_stat['idle']) - (prev_time - prev_stat['idle'])) / (this_time - prev_time) * 100
82 |
83 | record[column] = cpu_load
84 |
85 | if complete_recored:
86 |
87 | results['timestamp'].append(row['time'])
88 |
89 | for column, cpu_load in record.items():
90 |
91 | results[column].append(cpu_load)
92 |
93 | prev_row = row.copy()
94 | else:
95 | prev_row = row.copy()
96 |
97 | return pandas.DataFrame(results, columns = columns)
98 |
99 | def do_chart(self, params, df):
100 |
101 | self.plotter.plot_paral_chart(params, df, 'cpu load', x='timestamp', y_prefixer='cpu_load_', kind='line', marker='.')
102 |
--------------------------------------------------------------------------------
/modules/cpu_load_summary/cpu_load_summary_module.py:
--------------------------------------------------------------------------------
1 | import pandas
2 |
3 | from framework.module import Module
4 | from framework.helpers import get_slices, sub_slices, get_slices_usage, get_time, pick_next_window, create_seq_list, create_seq_dict
5 |
6 | class Cpu_load_summary_module(Module):
7 |
8 | def __init__(self):
9 |
10 | super().__init__()
11 |
12 | def get_name(self):
13 |
14 | return 'cpu_load_summary'
15 |
16 | def get_desc(self):
17 |
18 | return 'CPU max / min / avg load for each core. (Calculate from cpu_load result)'
19 |
20 | def get_help(self):
21 |
22 | text = '''
23 | 统计该场景cpu占用率的最大、最小、平均值
24 |
25 | > tracecat "trace:cpu_load" # 先要抓取cpu_load或者cpu_load2
26 | > tracecat "parse:cpu_load,cpu_load_summary" # 从cpu_load或cpu_load2的解析结果中计算统计结果
27 | > tracecat "chart:cpu_load_summary" # 显示柱状图'''
28 |
29 | return text
30 |
31 | def invoke_results(self):
32 |
33 | self.cpu_load = self.invoke_result(['cpu_load', 'cpu_load2'])
34 |
35 | def do_parse(self, params):
36 |
37 | results = list()
38 |
39 | cpu_list = [x for x in self.cpu_load.columns if 'cpu_load_' in x]
40 |
41 | for cpu_id in cpu_list:
42 |
43 | results.append({
44 | 'cpu_id': cpu_id.replace('cpu_load_', ''),
45 | 'min_load': self.cpu_load[cpu_id].min(),
46 | 'max_load': self.cpu_load[cpu_id].max(),
47 | 'avg_load': self.cpu_load[cpu_id].mean(),
48 | })
49 |
50 | return pandas.DataFrame(results)
51 |
52 | def do_chart(self, params, df):
53 |
54 | self.plotter.plot(df, 'cpu load', kind='bar')
55 |
--------------------------------------------------------------------------------
/modules/ddr_freq/ddr_freq_module.py:
--------------------------------------------------------------------------------
1 | import pandas
2 |
3 | from framework.module import Module
4 | from framework.helpers import get_time
5 |
6 | class Ddr_freq_module(Module):
7 |
8 | def __init__(self):
9 |
10 | super().__init__()
11 |
12 | def get_name(self):
13 |
14 | return 'ddr_freq'
15 |
16 | def get_desc(self):
17 |
18 | return 'DDR frequency. (Sample from sysfs)'
19 |
20 | def get_help(self):
21 |
22 | text = '''
23 | 从sysfs采样DDR频率
24 |
25 | > tracecat "trace:ddr_freq" # 以500ms粒度采样(默认)
26 | > tracecat "trace:ddr_freq(100ms)" # 以100ms粒度采样(模块设置)
27 | > tracecat "trace:ddr_freq" -s 100ms # 以100ms粒度采样(全局设置)
28 | > tracecat "parse:ddr_freq" # 解析
29 | > tracecat "chart:ddr_freq" # 显示GPU频率曲线'''
30 |
31 | return text
32 |
33 | def invoke_sources(self):
34 |
35 | self.sysfs = self.invoke_source('sysfs', 'ddr_freq')
36 |
37 | def do_trace(self, params):
38 |
39 | period = get_time(params[0], 'ms') if params else None
40 |
41 | self.sysfs.config('ddr_freq', {'period': period})
42 |
43 | def do_parse(self, params):
44 |
45 | ddr_freqs = self.sysfs.get_metrics('ddr_freq')
46 |
47 | return pandas.DataFrame(ddr_freqs).rename(columns = {'time': 'timestamp', 'data': 'ddr_freq'})
48 |
49 | def do_chart(self, params, df):
50 |
51 | self.plotter.plot(df, 'ddr freq', x='timestamp', y='ddr_freq', kind='line', drawstyle='steps-post', marker='.')
--------------------------------------------------------------------------------
/modules/ddr_freq_stat/ddr_freq_stat_module.py:
--------------------------------------------------------------------------------
1 | from framework.module import Module
2 | from framework.helpers import create_duration_column
3 |
4 | class Ddr_freq_stat_module(Module):
5 |
6 | def __init__(self):
7 |
8 | super().__init__()
9 |
10 | def get_name(self):
11 |
12 | return 'ddr_freq_stat'
13 |
14 | def get_desc(self):
15 |
16 | return 'DDR frequency statistics. (Based on ddr_freq module result.)'
17 |
18 | def get_help(self):
19 |
20 | text = '''
21 | 统计ddr各频点运行时间占比(基于ddr_freq)
22 |
23 | > tracecat "trace:ddr_freq,ddr_freq_stat" # 抓取
24 | > tracecat "parse:ddr_freq,ddr_freq_stat" # 解析
25 | > tracecat "chart:ddr_freq_stat" # 生成柱状图'''
26 |
27 | return text
28 |
29 | def invoke_sources(self):
30 |
31 | self.sysfs = self.invoke_source('sysfs', 'ddr_freq_table')
32 |
33 | def invoke_results(self):
34 |
35 | self.ddr_freq = self.invoke_result('ddr_freq')
36 |
37 | def __get_ddr_freq(self):
38 |
39 | start_time, end_time = self.__get_trace_time()
40 |
41 | ddr_freq = self.ddr_freq.copy()
42 |
43 | ddr_freq = create_duration_column(ddr_freq, end_time)
44 |
45 | return ddr_freq[['ddr_freq', 'duration']]
46 |
47 | def __get_trace_time(self):
48 |
49 | return self.ddr_freq.timestamp.min(), self.ddr_freq.timestamp.max()
50 |
51 | def do_parse(self, params):
52 |
53 | start_time, end_time = self.__get_trace_time()
54 |
55 | duration = end_time - start_time
56 |
57 | ddr_freq = self.__get_ddr_freq()
58 |
59 | ddr_freq_table = self.sysfs.get_metrics('ddr_freq_table', None)
60 |
61 | stats = ddr_freq.groupby(['ddr_freq']).sum().reset_index()
62 |
63 | # Add freq points
64 |
65 | if ddr_freq_table:
66 |
67 | for freq in ddr_freq_table:
68 |
69 | if stats[stats.ddr_freq == freq].empty:
70 |
71 | stats = stats.append({'ddr_freq': freq, 'duration': 0}, ignore_index=True)
72 |
73 | stats = stats.sort_values(by=['ddr_freq'])
74 |
75 | stats['percent'] = stats['duration'] / duration
76 |
77 | return stats[['ddr_freq', 'percent']]
78 |
79 | def do_chart(self, params, df):
80 |
81 | self.plotter.plot(df, x='ddr_freq', y='percent', kind='bar', color='orange')
82 |
--------------------------------------------------------------------------------
/modules/dsu_freq/dsu_freq_module.py:
--------------------------------------------------------------------------------
1 | import pandas
2 |
3 | from framework.module import Module
4 | from framework.helpers import create_seq_list, create_seq_dict
5 |
6 | class Dsu_freq_module(Module):
7 |
8 | def __init__(self):
9 |
10 | super().__init__()
11 |
12 | def get_name(self):
13 |
14 | return 'dsu_freq'
15 |
16 | def get_desc(self):
17 |
18 | return 'CPU DSU frequency for each cluster. (Sample from sysfs)'
19 |
20 | def get_help(self):
21 |
22 | text = '''
23 | 从sysfs采样DSU频率
24 |
25 | > tracecat "trace:dsu_freq" # 以500ms粒度采样(默认)
26 | > tracecat "trace:dsu_freq(100m)" # 以100ms粒度采样(模块设置)
27 | > tracecat "trace:dsu_freq" -s 100ms # 以100ms粒度采样(全局设置)
28 | > tracecat "parse:dsu_freq" # 解析
29 | > tracecat "chart:dsu_freq" # 显示DSU频率曲线'''
30 |
31 | return text
32 |
33 | def invoke_sources(self):
34 |
35 | self.sysfs = self.invoke_source('sysfs', 'dsu_freq')
36 |
37 | def do_trace(self, params):
38 |
39 | period = get_time(params[0], 'ms') if params else None
40 |
41 | self.sysfs.config('dsu_freq', {'period': period})
42 |
43 | def do_parse(self, params):
44 |
45 | results = list()
46 |
47 | dsu_freqs = self.sysfs.get_metrics('dsu_freq')
48 |
49 | for row in dsu_freqs:
50 |
51 | results.append({
52 | 'timestamp': row['time'],
53 | 'dsu_freq': max(row['data'].values())
54 | })
55 |
56 | return pandas.DataFrame(results)
57 |
58 | def do_chart(self, params, df):
59 |
60 | self.plotter.plot(df, 'dsu freq', x='timestamp', y='dsu_freq', kind='line', drawstyle='steps-post', marker='.')
61 |
--------------------------------------------------------------------------------
/modules/gpu_freq/gpu_freq_module.py:
--------------------------------------------------------------------------------
1 | import pandas
2 |
3 | from framework.module import Module
4 | from framework.helpers import get_time
5 |
6 | class Gpu_freq_module(Module):
7 |
8 | def __init__(self):
9 |
10 | super().__init__()
11 |
12 | def get_name(self):
13 |
14 | return 'gpu_freq'
15 |
16 | def get_desc(self):
17 |
18 | return 'GPU frequency. (Sample from sysfs)'
19 |
20 | def get_help(self):
21 |
22 | text = '''
23 | 从sysfs采样GPU频率
24 |
25 | > tracecat "trace:gpu_freq" # 以500ms粒度采样(默认)
26 | > tracecat "trace:gpu_freq(100ms)" # 以100ms粒度采样(模块设置)
27 | > tracecat "trace:gpu_freq" -s 100ms # 以100ms粒度采样(全局设置)
28 | > tracecat "parse:gpu_freq" # 解析
29 | > tracecat "chart:gpu_freq" # 显示GPU频率曲线'''
30 |
31 | return text
32 |
33 | def invoke_sources(self):
34 |
35 | self.sysfs = self.invoke_source('sysfs', 'gpu_freq')
36 |
37 | def do_trace(self, params):
38 |
39 | period = get_time(params[0], 'ms') if params else None
40 |
41 | self.sysfs.config('gpu_freq', {'period': period})
42 |
43 | def do_parse(self, params):
44 |
45 | gpu_freqs = self.sysfs.get_metrics('gpu_freq')
46 |
47 | return pandas.DataFrame(gpu_freqs).rename(columns = {'time': 'timestamp', 'data': 'gpu_freq'})
48 |
49 | def do_chart(self, params, df):
50 |
51 | self.plotter.plot(df, 'gpu freq', x='timestamp', y='gpu_freq', kind='line', drawstyle='steps-post', marker='.')
--------------------------------------------------------------------------------
/modules/gpu_freq_stat/gpu_freq_stat_module.py:
--------------------------------------------------------------------------------
1 | from framework.module import Module
2 | from framework.helpers import create_duration_column
3 |
4 | class Gpu_freq_stat_module(Module):
5 |
6 | def __init__(self):
7 |
8 | super().__init__()
9 |
10 | def get_name(self):
11 |
12 | return 'gpu_freq_stat'
13 |
14 | def get_desc(self):
15 |
16 | return 'DDR frequency statistics. (Based on gpu_freq module result.)'
17 |
18 | def get_help(self):
19 |
20 | text = '''
21 | 统计gpu各频点运行时间占比(基于gpu_freq)
22 |
23 | > tracecat "trace:gpu_freq,gpu_freq_stat" # 抓取
24 | > tracecat "parse:gpu_freq,gpu_freq_stat" # 解析
25 | > tracecat "chart:gpu_freq_stat" # 生成柱状图'''
26 |
27 | return text
28 |
29 | def invoke_sources(self):
30 |
31 | self.sysfs = self.invoke_source('sysfs', 'gpu_freq_table')
32 |
33 | def invoke_results(self):
34 |
35 | self.gpu_freq = self.invoke_result('gpu_freq')
36 |
37 | def __get_gpu_freq(self):
38 |
39 | start_time, end_time = self.__get_trace_time()
40 |
41 | gpu_freq = self.gpu_freq.copy()
42 |
43 | gpu_freq = create_duration_column(gpu_freq, end_time)
44 |
45 | return gpu_freq[['gpu_freq', 'duration']]
46 |
47 | def __get_trace_time(self):
48 |
49 | return self.gpu_freq.timestamp.min(), self.gpu_freq.timestamp.max()
50 |
51 | def do_parse(self, params):
52 |
53 | start_time, end_time = self.__get_trace_time()
54 |
55 | duration = end_time - start_time
56 |
57 | gpu_freq = self.__get_gpu_freq()
58 |
59 | gpu_freq_table = self.sysfs.get_metrics('gpu_freq_table', None)
60 |
61 | stats = gpu_freq.groupby(['gpu_freq']).sum().reset_index()
62 |
63 | # Add freq points
64 |
65 | if gpu_freq_table:
66 |
67 | for freq in gpu_freq_table:
68 |
69 | if stats[stats.gpu_freq == freq].empty:
70 |
71 | stats = stats.append({'gpu_freq': freq, 'duration': 0}, ignore_index=True)
72 |
73 | stats = stats.sort_values(by=['gpu_freq'])
74 |
75 | stats['percent'] = stats['duration'] / duration
76 |
77 | return stats[['gpu_freq', 'percent']]
78 |
79 | def do_chart(self, params, df):
80 |
81 | self.plotter.plot(df, x='gpu_freq', y='percent', kind='bar', color='orange')
82 |
--------------------------------------------------------------------------------
/modules/ios_app_load/ios_app_load_module.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import pandas
3 |
4 | from framework.module import Module
5 | from framework.helpers import sub_slices, get_time, pick_next_window, get_unique_list
6 |
7 | class Ios_app_load_module(Module):
8 |
9 | def __init__(self):
10 |
11 | super().__init__()
12 |
13 | def get_name(self):
14 |
15 | return 'ios_app_load'
16 |
17 | def get_desc(self):
18 |
19 | return 'Process cpu usage on each core, for iPhone. (Parse from trace)'
20 |
21 | def get_help(self):
22 |
23 | text = '''
24 | iPhone某个进程的CPU占用率
25 |
26 | > tracecat "trace:ios_app_load" # 抓取instruments trace
27 | > tracecat "parse:ios_app_load" # 解析app_load
28 | > tracecat "parse:ios_app_load(100ms)" # 以100ms粒度解析app_load
29 | > tracecat "chart:ios_app_load" # 显示所有process
30 | > tracecat "chart:ios_app_load(1532)" # 显示所有pid为1532的进程各核占用率
31 | > tracecat "chart:ios_app_load(pubg)" # 显示名字包含pubg的进程各核占用率
32 |
33 | * 需要在MacOS运行,需要安装xcode软件'''
34 |
35 | return text
36 |
37 | def invoke_sources(self):
38 |
39 | self.instruments = self.invoke_source('instruments')
40 |
41 | def do_parse(self, params):
42 |
43 | results = list()
44 |
45 | window = None
46 |
47 | window_time = get_time(params[0], 'ms') * 1000000 if params else 1000 * 1000000
48 |
49 | trace_info = self.instruments.get_trace_info()
50 |
51 | process_list = self.instruments.get_process_list()
52 |
53 | sched_slices = self.instruments.get_cpu_state().to_dict(orient='records')
54 |
55 | while window := pick_next_window(window, trace_info['start_time'], trace_info['end_time'], window_time, True):
56 |
57 | # init window records
58 |
59 | records = dict()
60 |
61 | for proc in process_list:
62 |
63 | records[proc['process_id']] = {
64 | 'timestamp': window['end'],
65 | 'proc_id': proc['process_id'],
66 | 'proc_name': proc['process_name']
67 | }
68 |
69 | for cpu_id in trace_info['cpu_list']:
70 |
71 | records[proc['process_id']]['cpu_time_' + str(cpu_id)] = 0
72 | records[proc['process_id']]['cpu_load_' + str(cpu_id)] = 0
73 |
74 | # get each process cpu load
75 |
76 | slices = sub_slices(sched_slices, window['start'], window['end'])
77 |
78 | for row in slices:
79 |
80 | records[row['process_id']]['cpu_time_' + str(row['cpu_id'])] += row['duration']
81 |
82 | for record in records.values():
83 |
84 | for cpu_id in trace_info['cpu_list']:
85 |
86 | record['cpu_load_' + str(cpu_id)] = int((record['cpu_time_' + str(cpu_id)] / window_time) * 10000) / 100
87 |
88 | # append to results
89 | results += records.values()
90 |
91 | columns = ['timestamp', 'proc_id', 'proc_name'] + ['cpu_load_' + str(cpu_id) for cpu_id in trace_info['cpu_list']]
92 |
93 | return pandas.DataFrame(results, columns = columns)
94 |
95 | def __get_proc_list(self, df):
96 |
97 | proc_list = get_unique_list(df, {'proc_id': str, 'proc_name': str}, skip_none = True) # None means interrupt, skip it.
98 |
99 | return sorted(proc_list, key = lambda i: int(i['proc_id']))
100 |
101 | def __print_proc_list(self, proc_list):
102 |
103 | print('Process:')
104 |
105 | for proc in proc_list:
106 |
107 | print('\t' + proc['proc_id'] + '\t' + proc['proc_name'])
108 |
109 | def __search_proc(self, proc_list, proc_id = None, proc_name = None):
110 |
111 | if proc_id:
112 |
113 | return [proc for proc in proc_list if proc['proc_id'] == proc_id]
114 |
115 | elif proc_name:
116 |
117 | return [proc for proc in proc_list if proc_name in proc['proc_name']]
118 |
119 | else:
120 | return None
121 |
122 | def do_chart(self, params, df):
123 |
124 | # find proc_id
125 |
126 | proc_list = self.__get_proc_list(df)
127 |
128 | if not params:
129 |
130 | self.__print_proc_list(proc_list)
131 |
132 | sys.exit('Please input process id or process name filter.')
133 |
134 | param = params[0]
135 |
136 | if param.isdigit():
137 |
138 | proc_id = param
139 |
140 | results = self.__search_proc(proc_list, proc_id = proc_id)
141 |
142 | if not results:
143 |
144 | sys.exit('ERROR: Process not found.')
145 |
146 | else:
147 |
148 | results = self.__search_proc(proc_list, proc_name = param)
149 |
150 | if not results:
151 |
152 | sys.exit('ERROR: Process not found.')
153 |
154 | if len(results) > 1:
155 |
156 | self.__print_proc_list(results)
157 |
158 | sys.exit('Found multiple result, please specify process id or an unique process name.')
159 |
160 | proc_id = results[0]['proc_id']
161 |
162 | # plot chart
163 |
164 | self.plotter.plot(df[df.proc_id == proc_id], 'app cpu load', x='timestamp', y=[col for col in df.columns if 'cpu_load_' in col], kind='line', marker='.')
--------------------------------------------------------------------------------
/modules/ios_cpu_freq/ios_cpu_freq_module.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import pandas
3 |
4 | from framework.module import Module
5 |
6 | #FIXME: freq, idle dur config for different device
7 | #FIXME: auto find profiler thread id
8 | #FIXME: select run mode
9 |
10 | class Ios_cpu_freq_module(Module):
11 |
12 | def __init__(self):
13 |
14 | super().__init__()
15 |
16 | # this config is for iPhone12
17 |
18 | self.sm_max_freq = 1823
19 | self.lg_max_freq = 2998
20 |
21 | self.sm_idle_dur = [166, 167, 208, 209, 250, 291, 292, 333, 334, 375, 416, 417, 458, 459, 500, 541, 542, 583, 584]
22 | self.lg_idle_dur = []
23 |
24 | def get_desc(self):
25 |
26 | return 'CPU frequency for each core, for iPhone. (Hack from trace)'
27 |
28 | def get_name(self):
29 |
30 | return 'ios_cpu_freq'
31 |
32 | def get_help(self):
33 |
34 | text = '''
35 | iPhone CPU频率(Hack方式,实验功能)
36 |
37 | * 不建议使用'''
38 |
39 | return text
40 |
41 | def invoke_sources(self):
42 |
43 | self.instruments = self.invoke_source('instruments')
44 |
45 | def __get_cpu_state(self):
46 |
47 | return self.instruments.get_cpu_state().to_dict(orient='records')
48 |
49 | def __get_thread_data(self, thread_id):
50 |
51 | df_cpu_state = self.instruments.get_cpu_state()
52 |
53 | return df_cpu_state.loc[df_cpu_state.thread_id == thread_id].to_dict(orient='records')
54 |
55 | def do_parse(self, params):
56 |
57 | return self.idlehack_mode()
58 |
59 | def do_chart(self, params, df):
60 |
61 | self.plotter.plot(df[df.cpu_id <= 3], 'cpu freq', x='timestamp', y='cpu_freq', label='cpu_freq_sm', drawstyle='steps-post', marker='.')
62 |
63 | def idlehack_mode(self):
64 |
65 | results = list()
66 |
67 | matching = dict()
68 |
69 | cpu_state = self.__get_cpu_state()
70 |
71 | for row in cpu_state:
72 |
73 | if row['cpu_state'] == 'Handling Interrupt':
74 |
75 | matching[row['cpu_id']] = True
76 |
77 | elif row['cpu_id'] in matching and matching[row['cpu_id']] and row['cpu_state'] == 'Running' and row['process_id'] == '0' and row['priority'] == 0:
78 |
79 | if row['cpu_id'] in [0, 1, 2, 3] and row['duration'] in self.sm_idle_dur:
80 |
81 | results.append({
82 | 'cpu_id': row['cpu_id'],
83 | 'timestamp': row['start_time'],
84 | 'cpu_freq': int(167 / row['duration'] * self.sm_max_freq)
85 | })
86 |
87 | elif row['cpu_id'] in [4, 5] and row['duration'] in self.lg_idle_dur:
88 |
89 | results.append({
90 | 'cpu_id': row['cpu_id'],
91 | 'timestamp': row['start_time'],
92 | 'cpu_freq': int(0 / row['duration'] * self.lg_max_freq)
93 | })
94 |
95 | else:
96 | matching[row['cpu_id']] = False
97 |
98 | else:
99 | matching[row['cpu_id']] = False
100 |
101 | return pandas.DataFrame(results, columns = ['cpu_id', 'timestamp', 'cpu_freq'])
102 |
103 | def profiler_mode(self):
104 |
105 | results = list()
106 |
107 | cpu_state = self.__get_cpu_state()
108 |
109 | # find profiler tid
110 |
111 | thread_id = '0x2026f2' # FIXME: find a way to get profiler tid
112 |
113 | thread_data = self.__get_thread_data(thread_id)
114 |
115 | if not thread_data:
116 |
117 | sys.exit('Profiler thread not found.')
118 |
119 | # Get running time
120 |
121 | min_sm_time = 0
122 | max_sm_time = 0
123 | min_lg_time = 0
124 | max_lg_time = 0
125 |
126 | run = {
127 | 'start_time': thread_data[0]['start_time'],
128 | 'end_time': thread_data[0]['end_time'],
129 | 'duration': 0,
130 | 'clusters': []
131 | }
132 |
133 | gap_time = 8 * 1000 * 1000
134 |
135 | for data in thread_data:
136 |
137 | cluster = 'sm' if data['cpu_id'] in [0, 1, 2, 3] else 'lg'
138 |
139 | # Append new run
140 | if data['start_time'] > run['end_time'] + gap_time:
141 |
142 | record = {
143 | 'timestamp': run['end_time'],
144 | 'sm_time': run['duration'] if 'lg' not in run['clusters'] else 0,
145 | 'lg_time': run['duration'] if 'sm' not in run['clusters'] else 0
146 | }
147 |
148 | min_sm_time = record['sm_time'] if record['sm_time'] and (not min_sm_time or record['sm_time'] < min_sm_time) else min_sm_time
149 | max_sm_time = record['sm_time'] if record['sm_time'] and (not max_sm_time or record['sm_time'] > max_sm_time) else max_sm_time
150 | min_lg_time = record['lg_time'] if record['lg_time'] and (not min_lg_time or record['lg_time'] < min_lg_time) else min_lg_time
151 | max_lg_time = record['lg_time'] if record['lg_time'] and (not max_lg_time or record['lg_time'] > max_lg_time) else max_lg_time
152 |
153 | results.append(record)
154 |
155 | run['start_time'] = data['start_time']
156 | run['end_time'] = data['end_time']
157 | run['duration'] = data['duration']
158 | run['clusters'] = [cluster]
159 |
160 | # Run is going
161 | else:
162 | run['end_time'] = data['end_time']
163 | run['duration'] += data['duration']
164 | run['clusters'].append(cluster)
165 |
166 | # Write freq data
167 |
168 | for record in results:
169 |
170 | record['sm_freq'] = int(self.sm_max_freq * min_sm_time / record['sm_time']) if record['sm_time'] else 0
171 | record['lg_freq'] = int(self.lg_max_freq * min_lg_time / record['lg_time']) if record['lg_time'] else 0
172 |
173 | return pandas.DataFrame(results, columns=['timestamp', 'sm_time', 'lg_time', 'sm_freq', 'lg_freq'])
174 |
--------------------------------------------------------------------------------
/modules/ios_cpu_load/ios_cpu_load_module.py:
--------------------------------------------------------------------------------
1 | import pandas
2 |
3 | from framework.module import Module
4 | from framework.helpers import sub_slices, get_slices_usage, get_time, pick_next_window, create_seq_list, create_seq_dict
5 |
6 | class Ios_cpu_load_module(Module):
7 |
8 | def __init__(self):
9 |
10 | super().__init__()
11 |
12 | def get_name(self):
13 |
14 | return 'ios_cpu_load'
15 |
16 | def get_desc(self):
17 |
18 | return 'CPU load for each core, for iPhone. (Parse from trace)'
19 |
20 | def get_help(self):
21 |
22 | text = '''
23 | iPhone CPU占用率
24 |
25 | > tracecat "trace:ios_cpu_load" # 抓取instruments trace
26 | > tracecat "parse:ios_cpu_load" # 以1s粒度解析占用率
27 | > tracecat "parse:ios_cpu_load(100ms)" # 以100ms粒度解析占用率
28 | > tracecat "chart:ios_cpu_load" # 显示各cpu占用率
29 | > tracecat "chart:ios_cpu_load(0)" # 只显示cpu 0的占用率
30 | > tracecat "chart:ios_cpu_load(0-4,5-6,7)" # 显示平均占用率
31 |
32 | * 需要在MacOS运行,需要安装xcode软件'''
33 |
34 | return text
35 |
36 | def invoke_sources(self):
37 |
38 | self.instruments = self.invoke_source('instruments')
39 |
40 | def __get_sched_slices(self, cpu_id):
41 |
42 | df_cpu_state = self.instruments.get_cpu_state()
43 |
44 | return df_cpu_state[['cpu_id', 'start_time', 'end_time', 'duration']].loc[df_cpu_state.cpu_id == cpu_id].to_dict(orient='records')
45 |
46 | def do_parse(self, params):
47 |
48 | window = None
49 |
50 | window_time = get_time(params[0], 'ms') * 1000000 if params else 1000 * 1000000
51 |
52 | trace_info = self.instruments.get_trace_info()
53 |
54 | cpu_ids = trace_info['cpu_list']
55 |
56 | columns = create_seq_list('timestamp', 'cpu_load_', cpu_ids)
57 |
58 | results = create_seq_dict('timestamp', 'cpu_load_', cpu_ids, list)
59 |
60 | for cpu_id in cpu_ids:
61 |
62 | column = 'cpu_load_' + str(cpu_id)
63 |
64 | sched_slices = self.__get_sched_slices(cpu_id)
65 |
66 | while window := pick_next_window(window, trace_info['start_time'], trace_info['end_time'], window_time, True):
67 |
68 | window_slices = sub_slices(sched_slices, window['start'], window['end'])
69 |
70 | cpu_load = get_slices_usage(window_slices, window['start'], window['end'])
71 |
72 | results[column].append(int(cpu_load * 10000) / 100)
73 |
74 | if len(results['timestamp']) == window['id']: # only append in first loop
75 |
76 | results['timestamp'].append(window['end'])
77 |
78 | return pandas.DataFrame(results, columns = columns)
79 |
80 | def do_chart(self, params, df):
81 |
82 | self.plot_paral_chart(params, df, 'cpu load', x='timestamp', y_prefixer='cpu_load_', kind='line', marker='.')
83 |
--------------------------------------------------------------------------------
/modules/profiler/profiler_module.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import pandas
4 |
5 | from framework.module import Module
6 | from framework.sources.profiler import Profiler
7 |
8 | class Profiler_module(Module):
9 |
10 | def __init__(self):
11 |
12 | super().__init__()
13 |
14 | self.pickle_file = None
15 |
16 | def get_name(self):
17 |
18 | return 'profiler'
19 |
20 | def get_desc(self):
21 |
22 | return 'Qualcomm Snapdragon Profiler data'
23 |
24 | def get_help(self):
25 |
26 | text = '''
27 | 半自动方式抓取、解析SnapdragonProfiler提供的所有数据
28 | (需要PC端安装高通Profiler工具)
29 |
30 | > tracecat "trace:profiler(cpu_branch_miss),profiler(cpu_cache_miss)"
31 | 抓取cpu_branch_miss, cpu_cache_miss,开始后命令行会进入等待,请手动运行
32 | profiler,并在profiler中启动这些数据的抓取,然后在命令行按y继续。抓取结束
33 | 后,命令行再次进入等待,请手动停止profiler,并将结果导出到./runs/xxx/prof
34 | iler/profiler.csv,然后按y继续。
35 | > tracecat "parse:profiler(cpu_branch_miss),profiler(cpu_cache_miss)"
36 | 解析出cpu_branch_miss, cpu_cache_miss, cpu_clock
37 | > tracecat "chart:profiler(cpu_branch_miss)"
38 | 显示cpu_branch_miss的图表
39 |
40 | 当前支持的Metrics:
41 | ------------------------\n'''
42 |
43 | for metric, params in Profiler.metrics.items():
44 |
45 | text += ' ' + metric.ljust(24) + params['matcher'].replace('^', '').replace('$', '') + '\n'
46 |
47 | return text.rstrip()
48 |
49 | def invoke_sources(self):
50 |
51 | self.profiler = self.invoke_source('profiler')
52 |
53 | # Hijack module::save
54 | def save(self, pickle_file = None):
55 |
56 | return super().save(self.pickle_file)
57 |
58 | # Hijack module::export
59 | def export(self, excel_file = None):
60 |
61 | return super().export(self.excel_file)
62 |
63 | # Hijack module::load
64 | def load(self, pickle_file = None):
65 |
66 | if not pickle_file:
67 |
68 | self.results = pandas.DataFrame(['dummy'])
69 |
70 | return self.results
71 |
72 | return super().load(pickle_file)
73 |
74 | def do_parse(self, params):
75 |
76 | if not params:
77 |
78 | sys.exit('Please spicify a metrics for profiler module')
79 |
80 | metrics = params[0]
81 |
82 | self.pickle_file = self.workspace + self.get_name() + '_' + metrics + '.pkl'
83 |
84 | self.excel_file = self.workspace + self.get_name() + '_' + metrics + '.xlsx'
85 |
86 | results = self.profiler.get_metrics(metrics)
87 |
88 | return pandas.DataFrame(results)
89 |
90 | def do_chart(self, params, df):
91 |
92 | # load pickle file
93 |
94 | if not params:
95 |
96 | sys.exit('Please spicify a metrics for profiler module')
97 |
98 | metrics = params[0]
99 |
100 | pickle_file = self.workspace + self.get_name() + '_' + metrics + '.pkl'
101 |
102 | df = self.load(pickle_file)
103 |
104 | # draw chart
105 |
106 | if 'index' in df.columns:
107 |
108 | self.plotter.plot_index_chart(params[1:], df, metrics, index='index', x='timestamp', y=metrics, kind='line', marker='.')
109 |
110 | else:
111 |
112 | self.plotter.plot(df, metrics, x='timestamp', y=metrics, kind='line', marker='.')
113 |
--------------------------------------------------------------------------------
/modules/simpleperf/simpleperf_module.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import pandas
3 |
4 | from framework.module import Module
5 | from framework.helpers import get_time
6 |
7 | class Simpleperf_module(Module):
8 |
9 | def __init__(self):
10 |
11 | super().__init__()
12 |
13 | def get_name(self):
14 |
15 | return 'simpleperf'
16 |
17 | def get_desc(self):
18 |
19 | return 'Statistics simpleperf events.'
20 |
21 | def get_help(self):
22 |
23 | text = '''
24 | 从simpleperf stat统计simpleperf events,支持全局采样和APP采样两种模式
25 |
26 | 全局采样:
27 | > adb shell simpleperf list # 获取手机支持的所有event
28 | > tracecat "trace:simpleperf(cache-misses,cpu-cycles)" # 以500ms粒度全局采样(默认)
29 | > tracecat "trace:simpleperf(cache-misses,cpu-cycles|100ms)" # 以100ms粒度全局采样
30 | * 全局采样包括各个cpu的单独统计数据
31 |
32 | APP采样:
33 | > adb shell pm list package # 获取所有APP包名
34 | > tracecat "trace:simpleperf(com.android.dialer|cache-misses|100ms)"# 以100ms粒度只采样APP:com.android.dialer
35 | * APP采样只包括所有cpu的总和数据,不包括单独cpu的数据
36 |
37 | 解析和显示:
38 | > tracecat "parse:simpleperf" # 解析所有抓取的event
39 | > tracecat "parse:simpleperf(cache-misses,cpu-cycles)" # 解析部分抓取的event
40 | > tracecat "chart:simpleperf" # 显示所有event的曲线
41 | > tracecat "chart:simpleperf(cache-misses,cpu-cycles)" # 显示部分event的曲线
42 | > tracecat "chart:simpleperf(cache-misses(cpu0),cpu-cycles(cpu0))" # 显示某个核的event的曲线'''
43 |
44 | return text
45 |
46 | def invoke_sources(self):
47 |
48 | self.simpleperf = self.invoke_source('simpleperf')
49 |
50 | def __parse_params(self, params):
51 |
52 | app = None
53 |
54 | period = None
55 |
56 | events = params if params else None
57 |
58 | if events:
59 |
60 | mode = None
61 |
62 | if '|' in events[0] and len(events) == 1:
63 |
64 | parts = events[0].split('|')
65 |
66 | if len(parts) == 2:
67 |
68 | mode = 'PERIOD' if parts[1][0].isdigit() else 'APP'
69 |
70 | if '|' in events[0] and mode != 'PERIOD':
71 |
72 | parts = events[0].split('|', 1)
73 |
74 | events[0] = parts[1].strip()
75 |
76 | app = parts[0]
77 |
78 | if '|' in events[-1] and mode != 'APP':
79 |
80 | parts = events[-1].rsplit('|', 1)
81 |
82 | events[-1] = parts[-2].strip()
83 |
84 | period = get_time(parts[-1].strip(), 'ms')
85 |
86 | return app, events, period
87 |
88 | def do_trace(self, params):
89 |
90 | app, events, period = self.__parse_params(params)
91 |
92 | if app:
93 |
94 | self.simpleperf.set_app(app)
95 |
96 | if events:
97 |
98 | self.simpleperf.enable(events)
99 |
100 | if period:
101 |
102 | self.simpleperf.set_period(period)
103 |
104 | def __check_complete(self, results):
105 |
106 | for col in results.values():
107 |
108 | if len(col) != len(results['timestamp']):
109 |
110 | sys.exit('ERROR: Simpleperf data broken, parse failed.')
111 |
112 | def __parse_global_format(self, df):
113 |
114 | results = {'timestamp': list()}
115 |
116 | prev_ts = 0
117 |
118 | for idx, row in df.iterrows():
119 |
120 | data = dict(row)
121 |
122 | if data['cpu'] is not None:
123 |
124 | sys.exit('ERROR: Simpleperf data broken, parse failed.')
125 |
126 | # Create event column
127 |
128 | event = data['event']
129 |
130 | if event not in results:
131 |
132 | results[event] = list()
133 |
134 | # Prev cycle should be complete if timestamp gap met.
135 |
136 | if row['timestamp'] - prev_ts > 10 * 1000000:
137 |
138 | self.__check_complete(results)
139 |
140 | prev_ts = data['timestamp']
141 |
142 | # Prev cycle should be complete if new row appended.
143 |
144 | if len(results[event]) == len(results['timestamp']):
145 |
146 | self.__check_complete(results)
147 |
148 | results['timestamp'].append(data['timestamp'])
149 |
150 | # Append new record
151 |
152 | results[event].append(data['count_normalize'])
153 |
154 | return pandas.DataFrame(results)
155 |
156 | def __parse_percpu_format(self, df):
157 |
158 | results = {'timestamp': list()}
159 |
160 | prev_ts = 0
161 |
162 | for idx, row in df.iterrows():
163 |
164 | data = dict(row)
165 |
166 | if data['cpu'] is None:
167 |
168 | sys.exit('ERROR: Simpleperf data broken, parse failed.')
169 |
170 | # Create event column
171 |
172 | event = data['event']
173 |
174 | if event not in results:
175 |
176 | results[event] = list()
177 |
178 | # Create event(cpu) column
179 |
180 | column = '%s(cpu%s)' % (data['event'], str(data['cpu']))
181 |
182 | if column not in results:
183 |
184 | results[column] = list()
185 |
186 | # Prev cycle should be complete if timestamp gap met.
187 |
188 | if row['timestamp'] - prev_ts > 10 * 1000000:
189 |
190 | self.__check_complete(results)
191 |
192 | prev_ts = data['timestamp']
193 |
194 | # Prev cycle should be complete if new row appended.
195 |
196 | if len(results[column]) == len(results['timestamp']):
197 |
198 | self.__check_complete(results)
199 |
200 | results['timestamp'].append(data['timestamp'])
201 |
202 | # Append new record
203 |
204 | results[column].append(data['count_normalize'])
205 |
206 | if len(results[event]) < len(results[column]):
207 |
208 | results[event].append(0)
209 |
210 | results[event][-1] += data['count_normalize']
211 |
212 | return pandas.DataFrame(results)
213 |
214 | def do_parse(self, params):
215 |
216 | dummy, events, dummy = self.__parse_params(params)
217 |
218 | df = self.simpleperf.get_data(event = events)
219 |
220 | if not df.empty and df['cpu'].iloc[0] is None:
221 |
222 | return self.__parse_global_format(df)
223 |
224 | else:
225 |
226 | return self.__parse_percpu_format(df)
227 |
228 | def do_chart(self, params, df):
229 |
230 | dummy, events, dummy = self.__parse_params(params)
231 |
232 | columns = df.columns
233 |
234 | if events:
235 |
236 | columns = ['timestamp']
237 |
238 | for col in df.columns:
239 |
240 | for event in events:
241 |
242 | if col.startswith(event):
243 |
244 | columns.append(col)
245 |
246 | self.plotter.plot(df[columns], 'simpleperf events', x='timestamp', kind='line', marker='.')
247 |
--------------------------------------------------------------------------------
/modules/thermal_zone/thermal_zone_module.py:
--------------------------------------------------------------------------------
1 | import pandas
2 |
3 | from framework.module import Module
4 | from framework.helpers import create_seq_list, create_seq_dict, get_time
5 |
6 | class Thermal_zone_module(Module):
7 |
8 | def __init__(self):
9 |
10 | super().__init__()
11 |
12 | def get_name(self):
13 |
14 | return 'thermal_zone'
15 |
16 | def get_desc(self):
17 |
18 | return 'Thermal zone temperature. (Sample from sysfs)'
19 |
20 | def get_help(self):
21 |
22 | text = '''
23 | 从sysfs采样thermal信息
24 |
25 | > tracecat "trace:thermal_zone" # 以500ms粒度采样所有thermal节点(默认)
26 | > tracecat "trace:thermal_zone(0,1,2)" -s 1s # 以1s粒度采样0,1,2三个zone(设置全局采样频率为1s)
27 | > tracecat "trace:thermal_zone(0,1,2|1s)" # 以1s粒度采样0,1,2三个zone(设置模块采样频率为1s)
28 | > tracecat "parse:thermal_zone" # 解析
29 | > tracecat "chart:thermal_zone" # 显示所有thermal曲线
30 | > tracecat "chart:thermal_zone(0,1,2)" # 显示0,1,2三个zone曲线
31 |
32 | * 由于大部分手机thermal节点比较多,建议尽量降低采样频率(>500ms)'''
33 |
34 | return text
35 |
36 | def invoke_sources(self):
37 |
38 | self.sysfs = self.invoke_source('sysfs', 'thermal_zone')
39 |
40 | def __parse_params(self, params):
41 |
42 | period = None
43 |
44 | zone_ids = params if params else None
45 |
46 | if zone_ids:
47 |
48 | if '|' in zone_ids[-1]:
49 |
50 | parts = zone_ids[-1].split('|')
51 |
52 | zone_ids[-1] = parts[0].strip()
53 |
54 | period = get_time(parts[1].strip(), 'ms')
55 |
56 | elif len(zone_ids) == 1 and zone_ids[0][-1] == 's':
57 |
58 | period = get_time(zone_ids[0].strip(), 'ms')
59 |
60 | zone_ids = None
61 |
62 | return zone_ids, period
63 |
64 | def do_trace(self, params):
65 |
66 | zone_ids, period = self.__parse_params(params)
67 |
68 | self.sysfs.config('thermal_zone', {'filter': zone_ids, 'period': period})
69 |
70 | def do_parse(self, params):
71 |
72 | zones = self.sysfs.get_metrics('thermal_zone')
73 |
74 | zone_ids = sorted(zones[0]['data'].keys()) if zones else []
75 |
76 | columns = create_seq_list('timestamp', 'thermal_zone_', zone_ids)
77 |
78 | results = create_seq_dict('timestamp', 'thermal_zone_', zone_ids, list)
79 |
80 | for row in zones:
81 |
82 | results['timestamp'].append(row['time'])
83 |
84 | for zone_id, temperature in row['data'].items():
85 |
86 | column = 'thermal_zone_' + str(zone_id)
87 |
88 | results[column].append(temperature)
89 |
90 | return pandas.DataFrame(results, columns = columns)
91 |
92 | def do_chart(self, params, df):
93 |
94 | self.plotter.plot_paral_chart(params, df, 'thermal zone', x='timestamp', y_prefixer='thermal_zone_', kind='line', marker='.')
95 |
--------------------------------------------------------------------------------
/scripts/run_all.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import platform
4 |
5 | if __name__ == '__main__':
6 |
7 | if len(sys.argv) <= 1:
8 |
9 | print('Tracecat batch parse or chart script.')
10 |
11 | print('Examples:')
12 |
13 | print('> run_all "parse:cpu_load,cpu_freq,cpu_freq_stat"')
14 | print('> run_all "chart:cpu_load,cpu_freq,cpu_freq_stat"')
15 |
16 | sys.exit()
17 |
18 | if getattr(sys, 'frozen', False):
19 |
20 | command = 'tracecat' if platform.system() == 'Windows' else './tracecat'
21 |
22 | else:
23 | command = 'python ./tracecat.py'
24 |
25 | runline = sys.argv[1]
26 |
27 | for file_name in os.listdir('./runs'):
28 |
29 | if os.path.isfile('./runs/' + file_name):
30 |
31 | continue
32 |
33 | if 'parse:' in runline:
34 |
35 | os.system('%s "%s" %s' % (command, runline, file_name))
36 |
37 | elif 'chart:' in runline:
38 |
39 | modules = runline.replace('chart:', '').split(',')
40 |
41 | for module in modules:
42 |
43 | os.system('%s "chart:%s" %s --export 1280,720' % (command, module, file_name))
44 | else:
45 |
46 | sys.exit('Error command format.')
47 |
--------------------------------------------------------------------------------
/scripts/self_test.sh:
--------------------------------------------------------------------------------
1 | # Test 1
2 | python3 tracecat.py "trace:cpu_load,cpu_freq,cpu_idle,cpu_load2,cpu_freq2,cpu_freq_stat,cpu_freq_stat2,ddr_freq,ddr_freq_stat,gpu_freq,gpu_freq_stat,simpleperf(cache-misses|500ms),thermal_zone(0,1,2,3|1s)" -d 5s -s 100ms
3 | python3 tracecat.py "parse:cpu_load,cpu_freq,cpu_idle,cpu_load2,cpu_freq2,cpu_freq_stat,cpu_freq_stat2,ddr_freq,ddr_freq_stat,gpu_freq,gpu_freq_stat,simpleperf,thermal_zone"
4 |
5 | sleep 5
6 |
7 | # Test 2
8 | python3 tracecat.py "trace:cpu_load,cpu_freq,cpu_idle,cpu_load2(10ms),cpu_freq2,cpu_freq_stat,cpu_freq_stat2,ddr_freq(20ms),ddr_freq_stat,gpu_freq(30ms),gpu_freq_stat,simpleperf(branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,stalled-cycles-backend,stalled-cycles-frontend,branch-load-misses,branch-loads,dTLB-load-misses,dTLB-loads,iTLB-load-misses,iTLB-loads,L1-dcache-load-misses,L1-dcache-loads,L1-icache-load-misses,L1-icache-loads,raw-ldst-spec,raw-dp-spec,raw-ase-spec,raw-sve-inst-spec,raw-vfp-spec,raw-pc-write-spec,raw-br-pred,raw-op-spec),thermal_zone(1s)" -d 5s -s 100ms
9 | python3 tracecat.py "parse:cpu_load,cpu_freq,cpu_idle,cpu_load2,cpu_freq2,cpu_freq_stat,cpu_freq_stat2,ddr_freq,ddr_freq_stat,gpu_freq,gpu_freq_stat,simpleperf,thermal_zone"
10 |
11 | sleep 5
12 |
13 | # Test 3
14 | python3 tracecat.py "trace:cpu_load,cpu_freq,cpu_idle,cpu_load2(10ms),cpu_freq2,cpu_freq_stat,cpu_freq_stat2,ddr_freq(20ms),ddr_freq_stat,gpu_freq(30ms),gpu_freq_stat,simpleperf(com.android.systemui|branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,stalled-cycles-backend,stalled-cycles-frontend,branch-load-misses,branch-loads,dTLB-load-misses,dTLB-loads,iTLB-load-misses,iTLB-loads,L1-dcache-load-misses,L1-dcache-loads,L1-icache-load-misses,L1-icache-loads,raw-ldst-spec,raw-dp-spec,raw-ase-spec,raw-sve-inst-spec,raw-vfp-spec,raw-pc-write-spec,raw-br-pred,raw-op-spec),thermal_zone(1,2,3)" -d 5s -s 1s
15 | python3 tracecat.py "parse:cpu_load,cpu_freq,cpu_idle,cpu_load2,cpu_freq2,cpu_freq_stat,cpu_freq_stat2,ddr_freq,ddr_freq_stat,gpu_freq,gpu_freq_stat,simpleperf,thermal_zone"
--------------------------------------------------------------------------------
/tracecat.py:
--------------------------------------------------------------------------------
1 | #!./venv/bin/python3
2 |
3 | import os
4 | import sys
5 | import time
6 | import datetime
7 | import matplotlib.pyplot as plt
8 | import pandas
9 | import threading
10 | import argparse
11 |
12 | from framework.config import VERSION, CONFIG
13 | from framework.objects import SOURCES, MODULES
14 | from framework.helpers import get_element, get_time, log_current_command
15 |
16 | class Tracecat():
17 |
18 | modules = dict()
19 |
20 | sources = dict()
21 |
22 | def __init__(self):
23 |
24 | for module in MODULES:
25 |
26 | self.modules[module.get_name()] = module
27 |
28 | for source in SOURCES:
29 |
30 | self.sources[source.get_name()] = source
31 |
32 | def __get_run_name(self, run_type):
33 |
34 | if run_type == 'NEW':
35 |
36 | now = datetime.datetime.now()
37 |
38 | return now.strftime('%Y%m%d_%H%M%S')
39 |
40 | elif run_type == 'LAST':
41 |
42 | dirs = dict()
43 |
44 | for dname in next(os.walk('./runs'))[1]:
45 |
46 | ctime = os.path.getctime('./runs/' + dname)
47 |
48 | dirs[ctime] = dname
49 |
50 | if not dirs:
51 |
52 | return None
53 |
54 | return dirs[max(dirs.keys())]
55 |
56 | else:
57 |
58 | raise Exception('Invalid run_type.')
59 |
60 | def __get_module(self, module_name):
61 |
62 | if module_name not in self.modules:
63 |
64 | sys.exit('ERROR: Module not found: %s' % module_name)
65 |
66 | return self.modules[module_name]
67 |
68 | def trace(self, mod_list, run_name, options):
69 |
70 | threads = list()
71 |
72 | duration = options['duration']
73 |
74 | if not duration:
75 |
76 | sys.exit('ERROR: You forgot passing option: --duration ')
77 |
78 | duration = get_time(duration, 's')
79 |
80 | if duration < 1:
81 |
82 | sys.exit('ERROR: [duration] should at least be 1s.')
83 |
84 | # Init a run
85 |
86 | if not run_name:
87 |
88 | run_name = self.__get_run_name('NEW')
89 |
90 | run_folder = './runs/%s/' % run_name
91 |
92 | if os.path.exists(run_folder):
93 |
94 | sys.exit('ERROR: Path already exsist: ' + run_folder)
95 |
96 | print('Using: %s' % run_folder)
97 |
98 | # Invoke sources from module
99 |
100 | for mod in mod_list:
101 |
102 | module = self.__get_module(mod['module'])
103 |
104 | module.init_sources(self.sources)
105 |
106 | module.invoke_sources()
107 |
108 | module.trace(mod['params'])
109 |
110 | # Invoke sources from source
111 |
112 | for source in self.sources.values():
113 |
114 | if source.is_enable():
115 |
116 | source.init_invoke(self.sources)
117 |
118 | source.invoke_sources()
119 |
120 | # Create data source threads
121 |
122 | for source in self.sources.values():
123 |
124 | if source.is_enable():
125 |
126 | source_folder = run_folder + source.get_name() + '/'
127 |
128 | source.init_workspace(source_folder)
129 |
130 | thread = threading.Thread(target=source.trace, args=(duration,))
131 |
132 | threads.append({
133 | 'name': source.get_name(),
134 | 'thread': thread
135 | })
136 |
137 | # Call pre trace
138 |
139 | source.pre_trace(duration)
140 |
141 | # Run data source threads
142 |
143 | for thread in threads:
144 |
145 | print('Create thread: ' + thread['name'])
146 |
147 | thread['thread'].start()
148 |
149 | # Wati for completion
150 |
151 | for thread in threads:
152 |
153 | thread['thread'].join()
154 |
155 | print('Thread finished: ' + thread['name'])
156 |
157 | # Call post trace
158 |
159 | for source in self.sources.values():
160 |
161 | if source.is_enable():
162 |
163 | source.post_trace(duration)
164 |
165 | log_current_command(run_folder + '/log.txt')
166 |
167 | def parse(self, mod_list, run_name, options):
168 |
169 | # Init a run
170 |
171 | if not run_name:
172 |
173 | run_name = self.__get_run_name('LAST')
174 |
175 | if not run_name:
176 |
177 | sys.exit('ERROR: Trace data not found, maybe you need run trace first.')
178 |
179 | run_folder = './runs/%s/' % run_name
180 |
181 | if not os.path.exists(run_folder):
182 |
183 | sys.exit('ERROR: Path not exsist: ' + run_folder)
184 |
185 | print('Using: %s' % run_folder)
186 |
187 | # Invoke sources
188 |
189 | for mod in mod_list:
190 |
191 | module = self.__get_module(mod['module'])
192 |
193 | module.init_sources(self.sources)
194 |
195 | module.invoke_sources()
196 |
197 | for source in self.sources.values():
198 |
199 | if source.is_enable():
200 |
201 | source.init_invoke(self.sources)
202 |
203 | source.invoke_sources()
204 |
205 | # Run source parsing
206 |
207 | for source in self.sources.values():
208 |
209 | if not source.is_enable():
210 |
211 | continue
212 |
213 | source_folder = run_folder + source.get_name() + '/'
214 |
215 | if not os.path.exists(source_folder):
216 |
217 | sys.exit('ERROR: Path not found: %s' % source_folder)
218 |
219 | source.init_workspace(source_folder)
220 |
221 | source.parse()
222 |
223 | # Run module parsing
224 |
225 | module_folder = run_folder + 'modules/'
226 |
227 | if not os.path.exists(module_folder):
228 |
229 | os.makedirs(module_folder)
230 |
231 | for mod in mod_list:
232 |
233 | module = self.__get_module(mod['module'])
234 |
235 | print('Parsing %s...' % module.get_name())
236 |
237 | module.init_workspace(module_folder)
238 |
239 | module.invoke_results()
240 |
241 | module.parse(mod['params'])
242 |
243 | print('Saving pickle file...')
244 |
245 | file_path = module.save()
246 |
247 | if file_path:
248 |
249 | print('Pickle file saved: %s' % file_path)
250 |
251 | print('Export excel file...')
252 |
253 | file_path = module.export()
254 |
255 | if file_path:
256 |
257 | if file_path[-4:] == '.csv':
258 |
259 | print('WARNING: Too many data, fallback to csv format.')
260 |
261 | print('Excel file saved: %s' % file_path)
262 |
263 | log_current_command(run_folder + '/log.txt')
264 |
265 | print('Done.')
266 |
267 | def chart(self, mod_list, run_name, options):
268 |
269 | # Init a run
270 |
271 | if not run_name:
272 |
273 | run_name = self.__get_run_name('LAST')
274 |
275 | if not run_name:
276 |
277 | sys.exit('ERROR: Trace data not found, maybe you need run trace first.')
278 |
279 | run_folder = './runs/%s/' % run_name
280 |
281 | if not os.path.exists(run_folder):
282 |
283 | sys.exit('ERROR: Path not exsist. ' + run_folder)
284 |
285 | print('Using: %s' % run_folder)
286 |
287 | # Plot charts
288 |
289 | axis = plt.gca()
290 |
291 | figure = plt.gcf()
292 |
293 | title = ''
294 |
295 | module_folder = run_folder + 'modules/'
296 |
297 | for idx, mod in enumerate(mod_list):
298 |
299 | module = self.__get_module(mod['module'])
300 |
301 | print('Loading %s...' % module.get_name())
302 |
303 | module.init_workspace(module_folder)
304 |
305 | result = module.load()
306 |
307 | if result.empty:
308 |
309 | sys.exit('ERROR: Nothing to plot.')
310 |
311 | print('Ploting %s...' % module.get_name())
312 |
313 | module.init_plotter(ax=axis, secondary_y=idx>0)
314 |
315 | module.chart(mod['params'])
316 |
317 | title += ' / ' + module.get_name() if title else module.get_name()
318 |
319 | axis.set_title(title)
320 |
321 | plt.gcf().canvas.set_window_title(run_name + ' - ' + title)
322 |
323 | plt.tight_layout()
324 |
325 | export = options['export']
326 |
327 | if export is not None:
328 |
329 | if not export:
330 |
331 | image_size = None
332 |
333 | else:
334 |
335 | image_args = export.split(',')
336 |
337 | if len(image_args) != 2 or not image_args[0].isdigit() or not image_args[1].isdigit():
338 |
339 | sys.exit('ERROR: Invalid [image_size] format, please type ,. (e.g. 1024,768)')
340 |
341 | image_size = {'width': int(image_args[0]), 'height': int(image_args[1])}
342 |
343 | dpi = 100
344 |
345 | image_file = ''
346 |
347 | for mod in mod_list:
348 |
349 | if image_file:
350 |
351 | image_file += '_'
352 |
353 | image_file += mod['module']
354 |
355 | if mod['params']:
356 |
357 | param = '(' + ','.join(mod['params']) + ')'
358 |
359 | param = param.replace('/', '_')
360 |
361 | image_file += param
362 |
363 | image_file = module_folder + image_file + '.png'
364 |
365 | if image_size:
366 |
367 | figure.set_size_inches(image_size['width'] / dpi, image_size['height'] / dpi)
368 |
369 | plt.savefig(image_file, dpi=dpi)
370 |
371 | else:
372 | plt.savefig(image_file)
373 |
374 | print('Save chart as image: %s' % image_file)
375 |
376 | log_current_command(run_folder + '/log.txt')
377 |
378 | print('Done.')
379 |
380 | else:
381 |
382 | start = time.time()
383 |
384 | plt.show()
385 |
386 | end = time.time()
387 |
388 | if end - start < 1:
389 |
390 | print('ERROR: It seems something wrong with the plot, please check the error message.')
391 |
392 | print('To solve this issue, you may run: "sudo apt install python3-tk" and try again.')
393 |
394 | else:
395 | print('Done.')
396 |
397 | def print_help(self):
398 |
399 | text = '\nTracecat %s\n' % VERSION
400 | text += '=========================\n'
401 |
402 | text += '\nBasic Format:\n'
403 | text += ' tracecat "trace:,..." \n'
404 | text += ' --duration, -d