├── .coveragerc
├── .github
├── dependabot.yml
└── workflows
│ ├── codeql-analysis.yml
│ ├── pypi-publish.yml
│ └── smoketest.yml
├── .gitignore
├── CHANGELOG.md
├── Dockerfile
├── LICENSE
├── MANIFEST.in
├── README.md
├── README_en.md
├── docs
├── .nojekyll
├── README.md
├── _coverpage.md
├── _sidebar.md
├── index.html
├── pages
│ ├── 0_what_is_it.md
│ ├── 1_where_can_it_be_used.md
│ ├── 2_how_to_use_it.md
│ ├── 3_how_it_works.md
│ ├── 4_roadmap.md
│ └── 5_others.md
└── pics
│ ├── brand.svg
│ ├── ssim_trend.png
│ ├── stage.png
│ └── stagesepx.svg
├── example
├── README.md
├── compare_videos.py
├── cut_and_classify.py
├── mini.py
├── old
│ ├── README.md
│ ├── classify.py
│ ├── classify_with_svm.py
│ ├── cut.py
│ ├── mini.py
│ └── multi_video.py
├── range_check.py
├── stable.py
├── train_and_predict
│ ├── README.md
│ ├── cut.py
│ ├── predict.py
│ └── train.py
└── with_keras.py
├── pyrightconfig.json
├── setup.py
├── stagesepx
├── __init__.py
├── api.py
├── classifier
│ ├── __init__.py
│ ├── base.py
│ ├── keras.py
│ ├── ssim.py
│ └── svm.py
├── cli.py
├── constants.py
├── cutter
│ ├── __init__.py
│ ├── cut_range.py
│ ├── cut_result.py
│ └── cutter.py
├── hook.py
├── reporter.py
├── template
│ └── report.html
├── toolbox.py
└── video.py
└── test
├── README.md
├── min_run_config.json
├── run_config.json
├── run_config_with_model.json
├── test_api.py
├── test_classifier.py
├── test_cli.py
├── test_cutter.py
├── test_diff.py
├── test_hook.py
├── test_toolbox.py
└── test_video.py
/.coveragerc:
--------------------------------------------------------------------------------
1 | [report]
2 | exclude_lines =
3 | raise NotImplementedError
4 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: pip
4 | directory: "/"
5 | schedule:
6 | interval: monthly
7 | time: "11:00"
8 | open-pull-requests-limit: 10
9 | versioning-strategy: lockfile-only
10 |
--------------------------------------------------------------------------------
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | # For most projects, this workflow file will not need changing; you simply need
2 | # to commit it to your repository.
3 | #
4 | # You may wish to alter this file to override the set of languages analyzed,
5 | # or to provide custom queries or build logic.
6 | #
7 | # ******** NOTE ********
8 | # We have attempted to detect the languages in your repository. Please check
9 | # the `language` matrix defined below to confirm you have the correct set of
10 | # supported CodeQL languages.
11 | #
12 | name: "CodeQL"
13 |
14 | on:
15 | push:
16 | branches: [ master ]
17 | pull_request:
18 | # The branches below must be a subset of the branches above
19 | branches: [ master ]
20 | schedule:
21 | - cron: '00 02 * * *'
22 |
23 | jobs:
24 | analyze:
25 | name: Analyze
26 | runs-on: ubuntu-latest
27 |
28 | strategy:
29 | fail-fast: false
30 | matrix:
31 | language: [ 'python' ]
32 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
33 | # Learn more:
34 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
35 |
36 | steps:
37 | - name: Checkout repository
38 | uses: actions/checkout@v2
39 |
40 | # Initializes the CodeQL tools for scanning.
41 | - name: Initialize CodeQL
42 | uses: github/codeql-action/init@v1
43 | with:
44 | languages: ${{ matrix.language }}
45 | # If you wish to specify custom queries, you can do so here or in a config file.
46 | # By default, queries listed here will override any specified in a config file.
47 | # Prefix the list here with "+" to use these queries and those in the config file.
48 | # queries: ./path/to/local/query, your-org/your-repo/queries@main
49 |
50 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
51 | # If this step fails, then you should remove it and run the build manually (see below)
52 | - name: Autobuild
53 | uses: github/codeql-action/autobuild@v1
54 |
55 | # ℹ️ Command-line programs to run using the OS shell.
56 | # 📚 https://git.io/JvXDl
57 |
58 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
59 | # and modify them (or add more) to build your code if your project
60 | # uses a compiled language
61 |
62 | #- run: |
63 | # make bootstrap
64 | # make release
65 |
66 | - name: Perform CodeQL Analysis
67 | uses: github/codeql-action/analyze@v1
68 |
--------------------------------------------------------------------------------
/.github/workflows/pypi-publish.yml:
--------------------------------------------------------------------------------
1 | name: PyPI Publish
2 |
3 | on:
4 | push:
5 | tags:
6 | - 'v*'
7 |
8 | jobs:
9 | pypi-publish:
10 | name: Upload release to PyPI
11 | runs-on: ubuntu-latest
12 | environment:
13 | name: pypi
14 | url: https://pypi.org/p/stagesepx
15 | permissions:
16 | id-token: write # IMPORTANT: this permission is mandatory for trusted publishing
17 | contents: read
18 | steps:
19 | - name: Checkout
20 | uses: actions/checkout@v4
21 | - name: Set up Python
22 | uses: actions/setup-python@v4
23 | - name: Install dependencies
24 | run: |
25 | python -m pip install --upgrade pip
26 | python setup.py sdist
27 |
28 | - name: Publish package distributions to PyPI
29 | uses: pypa/gh-action-pypi-publish@release/v1
30 |
--------------------------------------------------------------------------------
/.github/workflows/smoketest.yml:
--------------------------------------------------------------------------------
1 | name: smoketest
2 |
3 | on:
4 | push:
5 | pull_request:
6 | schedule:
7 | - cron: '0 23 * * 6'
8 |
9 | jobs:
10 | build:
11 |
12 | runs-on: ubuntu-22.04
13 | strategy:
14 | max-parallel: 5
15 | matrix:
16 | python-version: [ '3.8', '3.9', '3.10', '3.11', '3.12' ]
17 |
18 | steps:
19 | - uses: actions/checkout@v1
20 | - name: Set up Python ${{ matrix.python-version }}
21 | uses: actions/setup-python@v1
22 | with:
23 | python-version: ${{ matrix.python-version }}
24 | - name: Install dependencies
25 | run: |
26 | sudo apt-get update
27 | sudo apt-get install -y ffmpeg
28 | pip install .
29 | pip install tensorflow
30 | - name: Download test resource
31 | run: |
32 | curl -o demo.mp4 https://raw.githubusercontent.com/williamfzc/stagesep2-sample/master/videos/demo.mp4
33 | curl -o demo.jpg https://raw.githubusercontent.com/williamfzc/stagesep2-sample/master/pictures/amazon.jpg
34 | - name: Generate coverage report
35 | run: |
36 | pip install pytest
37 | pip install pytest-cov
38 | pytest --cov=./stagesepx --cov-report=xml
39 | env:
40 | PYTHONPATH: .
41 | - name: Upload coverage to Codecov
42 | uses: codecov/codecov-action@v3
43 | with:
44 | file: ./coverage.xml
45 | flags: unittests
46 | name: codecov-umbrella
47 | - name: Test example
48 | run: |
49 | cd example
50 | python cut_and_classify.py
51 | python mini.py
52 | python stable.py
53 | cd ..
54 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 |
106 | .idea/
107 | *.mp4
108 |
109 | example/*.mp4
110 | example/*.html
111 | example/*.h5
112 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.7-slim
2 |
3 | USER root
4 |
5 | RUN apt-get update \
6 | # opencv
7 | && apt-get install -y libglib2.0 libsm6 libxrender1 libxext-dev \
8 | # ffmpeg
9 | && apt-get install -y ffmpeg \
10 | && apt-get clean \
11 | && rm -rf /var/lib/apt/lists/*
12 |
13 | WORKDIR /root
14 |
15 | COPY . .
16 |
17 | RUN pip install --no-cache-dir .
18 |
19 | WORKDIR /usr/src/app
20 |
21 | CMD ["bash"]
22 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 williamfzc
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE
2 | include stagesepx/template/*.html
3 | recursive-exclude * __pycache__
4 | recursive-exclude * *.pyc
5 | recursive-exclude * *.pyo
6 | prune example*
7 | prune test*
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | stage sep(aration) x
6 |
7 | detect stages in video automatically
8 |
9 |
10 | ---
11 |
12 |
13 | | Type | Status |
14 | |----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
15 | | package version | [](https://badge.fury.io/py/stagesepx) |
16 | | python version |  |
17 | | auto test |  |
18 | | code maintainability | [](https://codeclimate.com/github/williamfzc/stagesepx/maintainability) |
19 | | code coverage | [](https://codecov.io/gh/williamfzc/stagesepx) |
20 | | code style | [](https://github.com/psf/black) |
21 | | stat | [](https://pepy.tech/project/stagesepx) [](https://pepy.tech/project/stagesepx) [](https://pepy.tech/project/stagesepx) |
22 |
23 |
24 | ---
25 |
26 | > 2022/08/13:在 0.18.0 之后,ffmpeg 以 [imageio_ffmpeg](https://github.com/imageio/imageio-ffmpeg) 方式内置(请自行评估其LICENSE影响)。此版本为解决opencv版本及M1芯片的限制,并不再需要额外安装ffmpeg。详见 [#178](https://github.com/williamfzc/stagesepx/issues/178)。
27 | >
28 | > 2022/03/30:官方不维护任何诸如微信群、QQ群的多人群组,所有信息请通过issue公开交流。不需要任何捐赠支持,如遇到涉及金钱相关的信息请不要相信。
29 | >
30 | > 2021/12/15:在 0.16.0 之后,stagesepx 将开始提供部分功能测试的支持,详见 [#158](https://github.com/williamfzc/stagesepx/issues/158)
31 | >
32 | > 2020/05/21:目前,该项目已经达到一个较为稳定的状态,并顺利在多家头部公司、团队落地,作为正式工具存在。Bug跟踪与建议请优先通过 [issue](https://github.com/williamfzc/stagesepx/issues) 联系我,感谢所有支持过这个项目的人。欢迎有心优化的同学、落地成功的团队共同建设:)
33 |
34 | ---
35 |
36 | > [English README here](./README_en.md)
37 |
38 | 这段视频展示了一个应用的完整启动过程:
39 |
40 | 
41 |
42 | 将视频传递给 stagesepx,它将自动分析拆解,得到视频中所有的阶段。包括变化的过程及其耗时,以及在稳定的阶段停留的时长:
43 |
44 | 
45 |
46 | 你可以据此得到每个阶段对应的精确耗时。
47 |
48 | ### 跨端运作
49 |
50 | 当然,它是天然跨端的,例如web端。甚至,任何端:
51 |
52 | 
53 |
54 | 
55 |
56 | ### 高准确度
57 |
58 | 与视频一致的高准确度。以秒表为例:
59 |
60 | 
61 |
62 | 可以看到,与秒表的表现几乎没有差异。**请注意,这里的准确度指的是 stagesepx 能够精确还原视频本身的数据与表现。而对于现象(例如某某时间点出现什么状态)而言,准确度很大程度上取决于视频本身,如fps/分辨率等。**
63 |
64 | ### 彻底解耦 & 可编程
65 |
66 | 如果比起报告,更希望亲自处理原始数据,进而进行二次开发,你可以直接将 report 部分去除。如此做,你将得到一个 python 对象供你随意使用。它提供了大量的API,例如转换成字典:
67 |
68 | ```text
69 | {
70 | "data": [{
71 | "data": null,
72 | "frame_id": 1,
73 | "stage": "0",
74 | "timestamp": 0.0,
75 | "video_path": "../demo.mp4"
76 | }, {
77 | "data": null,
78 | "frame_id": 2,
79 | "stage": "0",
80 | "timestamp": 0.04,
81 | "video_path": "../demo.mp4"
82 | }, {
83 | "data": null,
84 | "frame_id": 3,
85 | "stage": "0",
86 | "timestamp": 0.08,
87 | "video_path": "../demo.mp4"
88 | }, {
89 |
90 | ...
91 | ```
92 |
93 | 从这个字典中我们可以知道,每一帧分别对应的:
94 |
95 | - 被分类到哪一个类别
96 | - 时间戳
97 | - 帧编号
98 | - ...
99 |
100 | 用户可以随意处理这些数据,无论是保存或是交给下一段代码。
101 |
102 | ### 完整自动化支持 & 规模化
103 |
104 | - 既然它是可编程的,那么它必然是朝着彻底替代人力的方向演进的。这也是它最强大的特性;
105 | - 它允许用户利用自己的训练集进行模型训练,利用神经网络进行规模化、全自动化的特定阶段耗时计算;
106 | - 此方案能够被广泛应用到各类业务迭代中,与持续集成配合,有效降低人力消耗;
107 | - 一些方向参考:
108 | - 为你的应用建立高频次的性能回归测试,形成benchmark
109 | - 对模型进行补足,为一系列同类应用(如小程序、小游戏,etc.)构建巡检能力
110 | - ...
111 |
112 | 具体可参见 [将 stagesepx 应用到实际业务中](https://github.com/williamfzc/work_with_stagesepx)。
113 |
114 | ---
115 |
116 | - 标准模式下无需前置训练与学习
117 | - 更少的代码需要
118 | - 高度可配置化,适应不同场景
119 | - 支持与其他框架结合,融入你的业务
120 | - 所有你需要的,只是一个视频
121 |
122 | ## 开始
123 |
124 | ### 正式使用
125 |
126 | > 在正式落地时,推荐使用 完整的python脚本 而不是命令行,以保证更高的可编程性。完整的落地例子另外单独开了一个 repo 存放,[传送门](https://github.com/williamfzc/work_with_stagesepx)。
127 | > 请一定配合 [这篇文章](https://blog.csdn.net/wsc106/article/details/107351675) 使用,基本能解决90%的问题。
128 |
129 | - 配置:
130 | - [用30行代码快速跑一个demo](example/mini.py)
131 | - [30行代码怎么没有注释](example/stable.py)
132 | - [还有更多配置吗](example/cut_and_classify.py)
133 | - 应用:
134 | - [我想结合真实场景理解这个项目的原理](https://github.com/150109514/stagesepx_with_keras)
135 | - [我想看看实际落地方案,最好有把饭喂嘴里的例子](https://github.com/williamfzc/work_with_stagesepx)
136 | - [我们的app很复杂,能搞定吗](https://testerhome.com/topics/22215)
137 | - [太麻烦了,有没有开箱即用、简单配置下可以落地的工具](https://github.com/williamfzc/sepmachine)
138 | - 其他:
139 | - [我有问题要问](https://github.com/williamfzc/stagesepx/issues/new)
140 | - [(使用时请参考上面的其他链接,此文档更新不及时)官方文档](https://williamfzc.github.io/stagesepx/)
141 |
142 | ### 命令行
143 |
144 | 你也可以直接通过命令行使用,而无需编写脚本:
145 |
146 | ```bash
147 | stagesepx analyse your_video.mp4 report.html
148 | ```
149 |
150 | 基于此,你可以非常方便地利用 shell 建立工作流。以 android 为例:
151 |
152 | ```bash
153 | adb shell screenrecord --time-limit 10 /sdcard/demo.mp4
154 | adb pull /sdcard/demo.mp4 .
155 | stagesepx analyse demo.mp4 report.html
156 | ```
157 |
158 | 关于结果不准确的问题请参考 [#46](https://github.com/williamfzc/stagesepx/issues/46)。
159 |
160 | ### 配置化运行(0.15.0)
161 |
162 | 当然,通常因为场景差异,我们需要对参数进行修改使其达到更好的效果。这使得用户需要投入一些精力在脚本编写上。在 0.15.0 之后,配置化运行的加入使用户能够在不需要编写脚本的情况下直接使用所有能力,大大降低了接入门槛。
163 |
164 | ```json
165 | {
166 | "output": ".",
167 | "video": {
168 | "path": "./PATH_TO_YOUR/VIDEO.mp4",
169 | "fps": 30
170 | }
171 | }
172 | ```
173 |
174 | 命令行运行:
175 |
176 | ```bash
177 | stagesepx run YOUR_CONFIG.json
178 | ```
179 |
180 | 即可达到与脚本相同的效果。其他的配置项可以参考:[work_with_stagesepx](https://github.com/williamfzc/work_with_stagesepx/tree/master/run_with_config)
181 |
182 | ## 安装
183 |
184 | 标准版(pypi)
185 |
186 | ```bash
187 | pip install stagesepx
188 | ```
189 |
190 | 预览版(github):
191 |
192 | ```bash
193 | pip install --upgrade git+https://github.com/williamfzc/stagesepx.git
194 | ```
195 |
196 | ## 常见问题
197 |
198 | 最终我还是决定通过 issue 面板维护所有的 Q&A ,毕竟问题的提出与回复是一个强交互过程。如果在查看下列链接之后你的问题依旧没有得到解答:
199 |
200 | - 请 [新建issue](https://github.com/williamfzc/stagesepx/issues/new)
201 | - 或在相关的 issue 下进行追问与补充
202 | - 你的提问将不止帮助到你一个人 :)
203 |
204 | 问题列表:
205 |
206 | - [安装过程遇到问题?](https://github.com/williamfzc/stagesepx/issues/80)
207 | - [如何根据图表分析得出app启动的时间?](https://github.com/williamfzc/stagesepx/issues/73)
208 | - [日志太多了,如何关闭或者导出成文件?](https://github.com/williamfzc/stagesepx/issues/58)
209 | - [我的视频有 轮播图 或 干扰分类 的区域](https://github.com/williamfzc/stagesepx/issues/55)
210 | - [分类结果如何定制?](https://github.com/williamfzc/stagesepx/issues/48)
211 | - [算出来的结果不准确 / 跟传统方式有差距](https://github.com/williamfzc/stagesepx/issues/46)
212 | - [出现 OutOfMemoryError](https://github.com/williamfzc/stagesepx/issues/86)
213 | - [工具没法满足我的业务需要](https://github.com/williamfzc/stagesepx/issues/93)
214 | - [为什么报告中的时间戳跟实际不一样?](https://github.com/williamfzc/stagesepx/issues/75)
215 | - [自定义模型的分类结果不准确,跟我提供的训练集对不上](https://github.com/williamfzc/stagesepx/issues/100)
216 | - ...
217 |
218 | 不仅是问题,如果有任何建议与交流想法,同样可以通过 issue 面板找到我。我们每天都会查看 issue 面板,无需担心跟进不足。
219 |
220 | ## 相关文章
221 |
222 | - [图像分类、AI 与全自动性能测试](https://testerhome.com/topics/19978)
223 | - [全自动化的抖音启动速度测试](https://testerhome.com/topics/22215)
224 | - [(MTSC2019) 基于图像分类的下一代速度类测试解决方案](https://testerhome.com/topics/21874)
225 |
226 | ## 架构
227 |
228 | 
229 |
230 | ## 参与项目
231 |
232 | ### 规划
233 |
234 | 在 1.0版本 之前,我们接下来的工作主要分为下面几个部分:
235 |
236 | #### 标准化
237 |
238 | 随着越来越多的业务落地,我们开始思考它是否能够作为行业级别的方案。
239 |
240 | - [x] 基于实验室数据的准确度对比(未公开)
241 | - [x] [规范且适合落地的例子](https://github.com/williamfzc/work_with_stagesepx)
242 | - [ ] 边界情况下的确认
243 | - [x] 代码覆盖率 95%+
244 | - [ ] API参数相关文档
245 |
246 | #### 新需求的收集与开发
247 |
248 | 该部分由 issue 面板管理。
249 |
250 | ### 贡献代码
251 |
252 | 欢迎感兴趣的同学为这个项目添砖加瓦,三个必备步骤:
253 |
254 | - 请在开始编码前留个 issue 告知你想完成的功能,因为可能这个功能已经在开发中或者已有;
255 | - commit规范我们严格遵守 [约定式提交](https://www.conventionalcommits.org/zh-hans/);
256 | - 该repo有较为完善的单测与CI以保障整个项目的质量,在过去的迭代中发挥了巨大的作用。所以请为你新增的代码同步新增单元测试(具体写法请参考 tests 中的已有用例)。
257 |
258 | ### 联系我们
259 |
260 | - 邮箱:`fengzc@vip.qq.com`
261 | - QQ:`178894043`
262 |
263 | ## Changelog / History
264 |
265 | see [CHANGELOG.md](CHANGELOG.md)
266 |
267 | ## Thanks
268 |
269 | Thank you [JetBrains](https://www.jetbrains.com/) for supporting the project with free product licenses.
270 |
271 | ## License
272 |
273 | [MIT](LICENSE)
274 |
--------------------------------------------------------------------------------
/README_en.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | stage sep(aration) x
6 |
7 | detect stages in video automatically
8 |
9 |
10 | ---
11 |
12 | | Type | Status |
13 | |----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
14 | | package version | [](https://badge.fury.io/py/stagesepx) |
15 | | python version |  |
16 | | auto test |  |
17 | | code maintainability | [](https://codeclimate.com/github/williamfzc/stagesepx/maintainability) |
18 | | code coverage | [](https://codecov.io/gh/williamfzc/stagesepx) |
19 | | docker build status |   |
20 | | code style | [](https://github.com/psf/black) |
21 |
22 | ---
23 |
24 | > For English users:
25 | >
26 | > Mainly we used Chinese in discussions and communications, so maybe the most of issues/document are wrote in Chinese currently.
27 | >
28 | > But don't worry:
29 | > - maybe google translate is a good helper :)
30 | > - read the code directly (all the code and comments are wrote in English)
31 | > - feel free to contact with us via building a new issue with your questions
32 | >
33 | > Thanks !
34 |
35 | ---
36 |
37 | This video shows the complete startup process of an app:
38 |
39 | 
40 |
41 | By sending this video to stagesepx, you would get a report like this automatically:
42 |
43 | 
44 |
45 | You can get the exact time consumption for each stage easily. Of course it is cross-platform, which can be also used in Android/Web/PC or something like that. Even, any platforms:
46 |
47 | 
48 |
49 | 
50 |
51 | And precisely:
52 |
53 | 
54 |
55 | As you can see, its result is very close to the timer.
56 |
57 | ---
58 |
59 | - Fully automatic, no pre-training required
60 | - Less code required
61 | - Configurable for different scenes
62 | - All you need is a video!
63 |
64 | ## Structure
65 |
66 | 
67 |
68 | ## Quick Start
69 |
70 | > Translation is working in progress. But not ready. You can use something like google translate instead for now. Feel free to leave me a issue when you are confused.
71 |
72 | - [30 lines demo](example/mini.py)
73 | - [how to use it in production (in Chinese)](https://github.com/williamfzc/stagesepx/blob/master/README_en.md)
74 | - [demo with all the features (in Chinese)](example/cut_and_classify.py)
75 | - [i have some questions](https://github.com/williamfzc/stagesepx/issues/new)
76 |
77 | ## Installation
78 |
79 | ```bash
80 | pip install stagesepx
81 | ```
82 |
83 | ## License
84 |
85 | [MIT](LICENSE)
86 |
--------------------------------------------------------------------------------
/docs/.nojekyll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/williamfzc/stagesepx/e3d9538a1f94b77788544af129e7795d85026a54/docs/.nojekyll
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | stage sep(aration) x
6 |
7 | detect stages in video automatically
8 |
9 |
10 | ---
11 |
12 | | Type | Status |
13 | |----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
14 | | python version | [](https://badge.fury.io/py/stagesepx) |
15 | | auto test |  |
16 | | code maintainability | [](https://codeclimate.com/github/williamfzc/stagesepx/maintainability) |
17 | | code coverage | [](https://codecov.io/gh/williamfzc/stagesepx) |
18 | | docker build status |   |
19 | | code style | [](https://github.com/psf/black) |
20 |
21 | ---
22 |
23 | > welcome to stagesepx :P
24 |
25 | Please start from sidebar!
26 |
--------------------------------------------------------------------------------
/docs/_coverpage.md:
--------------------------------------------------------------------------------
1 | # stagesep x
2 |
3 | > detect stages in video automatically
4 |
5 | [GitHub](https://github.com/williamfzc/stagesepx/)
6 | [Documentation](/pages/0_what_is_it)
7 |
--------------------------------------------------------------------------------
/docs/_sidebar.md:
--------------------------------------------------------------------------------
1 | - [主页](/README)
2 | - [关于 stagesepx](/pages/0_what_is_it)
3 | - [应用场景](/pages/1_where_can_it_be_used)
4 | - [如何使用](/pages/2_how_to_use_it)
5 | - [运作原理](/pages/3_how_it_works)
6 | - [发展规划](/pages/4_roadmap)
7 | - [其他](/pages/5_others)
8 |
--------------------------------------------------------------------------------
/docs/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | stagesep x document
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/docs/pages/0_what_is_it.md:
--------------------------------------------------------------------------------
1 | ## stagesepx 是什么
2 |
3 | 轻量化的、基于图像处理与机器学习的、全自动的视频分析工具。它提供了丰富的可定制性,能够根据你的实际需求分析视频并将其拆分为一系列阶段。在此之后,你可以清晰地得知视频包含了几个阶段、以及每个阶段发生了什么。而这一切都是自动完成的。
4 |
5 | ## stagesepx 能做什么
6 |
7 | 在软件工程领域,视频是一种较为通用的UI(现象)描述方法。它能够记录下用户到底做了哪些操作,以及界面发生了什么事情。例如,下面的例子描述了从桌面打开chrome进入amazon主页的过程:
8 |
9 | [](https://i.loli.net/2019/07/17/5d2e8ed1e9d0b49825.gif)
10 |
11 | stagesepx能够**自动侦测**并提取视频中的稳定或不稳定的阶段(例子中,stagesepx认为视频中包含三个稳定的阶段,分别是点击前、点击时与页面加载完成后):
12 |
13 | [](https://i.loli.net/2019/07/17/5d2e97c5e3a0e96365.png)
14 |
15 | 然后,自动得到每个阶段对应的时间区间:
16 |
17 | [](https://i.loli.net/2019/07/17/5d2ea6720c58d44996.png)
18 |
19 | 例如,从图中可以看出:
20 |
21 | - 视频开始直到 0.76s 时维持在阶段0
22 | - 在 0.76s 时从阶段0切换到阶段1
23 | - 在 0.92s 时从阶段1切换到阶段0,随后进入变化状态(当stagesepx无法将帧分为某特定类别、或帧不在待分析范围内时,会被标记为 -1,一般会在页面发生变化的过程中出现)
24 | - 在 1.16s 时到达阶段2
25 | - ...
26 |
27 | 以此类推,我们能够对视频的每个阶段进行非常细致的评估。通过观察视频也可以发现,识别效果与实际完全一致。
28 |
29 | 在运行过程中,stagesepx强大的快照功能能够让你很轻松地知道每个阶段到底发生了什么:
30 |
31 | [](https://i.loli.net/2019/07/25/5d3955365dff977571.png)
32 |
33 | **而所有的一切只需要一个视频,无需前置模板、无需提前学习。**
34 |
35 | ## 人性化的报告
36 |
37 | 想得到每个阶段的耗时?stagesepx已经帮你计算好了:
38 |
39 | [](https://i.loli.net/2019/07/17/5d2ea67201ac283867.png)
40 |
41 | 快照功能能够让你很直观地知道每个阶段的情况:
42 |
43 | [](https://i.loli.net/2019/07/25/5d3955365dff977571.png)
44 |
45 | ...
46 |
47 | ## 优异的性能表现
48 |
49 | 在效率方面,吸取了 [stagesep2](https://github.com/williamfzc/stagesep2) 的教训(他真的很慢,而这一点让他很难被用于生产环境),在项目规划期我们就将性能的优先级提高。对于该视频而言,可以从日志中看到,它的耗时在惊人的300毫秒左右(windows7 i7-6700 3.4GHz 16G):
50 |
51 | ```bash
52 | 2019-07-17 10:52:03.429 | INFO | stagesepx.cutter:cut:200 - start cutting: test.mp4
53 | ...
54 | 2019-07-17 10:52:03.792 | INFO | stagesepx.cutter:cut:203 - cut finished: test.mp4
55 | ```
56 |
57 | 除了常规的基于图像本身的优化手段,stagesepx主要利用采样机制进行性能优化,它指把时间域或空间域的连续量转化成离散量的过程。由于分类器的精确度要求较高,该机制更多被用于切割器部分,用于加速切割过程。它在计算量方面优化幅度是非常可观的,以5帧的步长为例,它相比优化前节省了80%的计算量。
58 |
59 | 当然,采样相比连续计算会存在一定的误差,如果你的视频变化较为激烈或者你希望有较高的准确度,你也可以关闭采样功能。
60 |
61 | ## 更强的稳定性
62 |
63 | stagesep2存在的另一个问题是,对视频本身的要求较高,抗干扰能力不强。这主要是它本身使用的模块(template matching、OCR等)导致的,旋转、分辨率、光照都会对识别效果造成影响;由于它强依赖预先准备好的模板图片,如果模板图片的录制环境与视频有所差异,很容易导致误判的发生。
64 |
65 | 而SSIM本身的抗干扰能力相对较强。如果使用默认的SSIM分类器,所有的数据(训练集与测试集)都来源于同一个视频,保证了环境的一致性,规避了不同环境(例如旋转、光照、分辨率等)带来的影响,大幅度降低了误判的发生。
66 |
67 | ## 有保障的准确度
68 |
69 | 与视频一致的高准确度。以秒表为例:
70 |
71 | 
72 |
73 | 计算结果与实际表现一致,误差在 0.01s 内。(误差能够随着提高 fps 而逐渐降低)
74 |
--------------------------------------------------------------------------------
/docs/pages/1_where_can_it_be_used.md:
--------------------------------------------------------------------------------
1 | # 应用场景
2 |
3 | !> stagesepx 本质上是一个视频分析工具,它本身只跟视频有关联,并没有任何特定的使用场景!你可以尽情发挥你的想象力,用它帮助你实现更多的功能。
4 |
5 | ## APP
6 |
7 | - 前面提到的应用启动速度计算
8 | - 那么同理,页面切换速度等方面都可以应用
9 | - 除了性能,你可以使用切割器对视频切割后,用诸如[findit](https://github.com/williamfzc/findit)等图像识别方案对功能性进行校验
10 | - 除了应用,游戏这种无法用传统测试方法的场景更是它的主场
11 | - ...
12 |
13 | ## 除了APP?
14 |
15 | - 以视频为分析主体,对运行时无依赖
16 | - 除了移动端,当然PC、网页也可以同理计算出结果
17 | - 甚至任何视频?
18 |
19 | [](https://i.loli.net/2019/07/22/5d35a84e3e0df82450.gif)
20 |
21 | 你可以直接得到出笔进入与移除的耗时!
22 |
23 | [](https://i.loli.net/2019/07/22/5d35a8858640e67521.png)
24 |
25 | 它没有任何限制!
26 |
27 | ## As AI frontend
28 |
29 | Stagesepx also was designed as a tool for preparation of AI processing. It can easily collect resources from videos for further image processing. Here is an example flow:
30 |
31 | 
32 |
33 | It offered a 'bridge' between videos and image processing directly. And you do not need to handle videos by yourself.
34 |
35 | View https://github.com/williamfzc/stagesepx/issues/28 for details.
36 |
--------------------------------------------------------------------------------
/docs/pages/2_how_to_use_it.md:
--------------------------------------------------------------------------------
1 | # 使用
2 |
3 | ## 安装
4 |
5 | Python >= 3.6
6 |
7 | ```bash
8 | pip install stagesepx
9 | ```
10 |
11 | ## 快速开始
12 |
13 | [example](https://github.com/williamfzc/stagesepx/tree/master/example)
14 |
--------------------------------------------------------------------------------
/docs/pages/3_how_it_works.md:
--------------------------------------------------------------------------------
1 | # 运作方式
2 |
3 | !> stagesepx主要由三个部分组成:切割器、分类器、hook。
4 |
5 | ## 切割器
6 |
7 | ?> 切割器是 stagesepx 最重要的组成部分,是整个过程能够被自动化的基石。
8 |
9 | 顾名思义,切割器的功能是将一个视频按照一定的规律切割成多个部分。他负责视频阶段划分与采样,作为数据采集者为其他工具(例如AI模型)提供自动化的数据支持。它应该提供友好的接口或其他形式为外部(包括分类器)提供支持。例如,`pick_and_save`方法完全是为了能够使数据直接被 [keras](https://github.com/keras-team/keras) 利用而设计的。
10 |
11 | 切割器的定位是预处理,降低其他模块的运作成本及重复度。得到稳定区间之后,我们可以知道视频中有几个稳定阶段、提取稳定阶段对应的帧等等。在此基础上,你可以很轻松地对阶段进行图片采样(例子中为每个阶段采集3张图片,一共有3个稳定阶段,分别名为0、1、2)后保存起来,以备他用(例如AI训练、功能检测等等):
12 |
13 | [](https://i.loli.net/2019/07/17/5d2ea54271fe256939.png)
14 |
15 | ## 分类器
16 |
17 | 针对上面的例子,分类器应运而生。它主要是加载(在AI分类器上可能是学习)一些分类好的图片,并据此对帧(图片)进行分类。
18 |
19 | 例如,当加载上述例子中稳定阶段对应的帧后,分类器即可将视频进行帧级别的分类,得到每个阶段的准确耗时。
20 |
21 | 
22 |
23 | 分类器的定位是对视频进行帧级别、高准确度的图片分类,并能够利用采样结果。它应该有不同的存在形态(例如机器学习模型)、以达到不同的分类效果。例如,你可以在前几次视频中用采样得到的数据训练你的AI模型,当它收敛之后在你未来的分析中你就可以直接利用训练好的模型进行分类,而不需要前置的采样过程了。[stagesep2](https://github.com/williamfzc/stagesep2)本质上是一个分类器。
24 |
25 | 目前,stagesepx官方提供了两种不同类型的分类器,用于处理切割后的结果:
26 |
27 | - SVM + HoG分类器在阶段复杂的视频上表现较好,你可以用不同的视频对它进行训练逐步提高它的识别效果,使其足够被用于生产环境;
28 | - 传统的 SSIM 分类器无需训练且较为轻量化,多用于阶段较少、较为简单的视频;
29 |
30 | ## hook
31 |
32 | hook是在 0.4.2 版本之后被加入的概念,它被用于支撑**帧级别**的图像处理。换言之,所有对帧的操作都会通过hook来实现。你可以发现,cutter与classifier内部的图片压缩、图片灰度化都是通过hook的形式实现的。
33 |
34 | 以 cutter 为例,在初始化时,CompressHook 与 GreyHook 会被加入 hook_list 中。在加入后,每当 cutter 开始处理一帧之前,首先会将帧依次经过 hook_list 处理,在所有的 hook 处理完毕之后才会进行正式的分析流程。例如,在加入 CompressHook 之后,每一帧在进行处理之间都会经过它的压缩。
35 |
36 | 如此做,我们能够非常灵活地定制预处理步骤的行为。hook 不仅仅只能影响分析流程,你完全可以通过定制 hook 进行个性化的帧操作。例如 FrameSaveHook 能够在迭代过程中将每一帧单独保存到指定位置。你可以参考他们的实现来完成属于自己的hook,从而实现你的功能。
37 |
38 | !> hook的执行是严格按照添加的顺序的,hook之间可能会存在干扰。overwrite参数被用于控制hook是否会修改原始的帧,具体使用可参考 [完整例子](https://github.com/williamfzc/stagesepx/blob/master/example/cut_and_classify.py)。
39 |
40 | ## 设计理念与定位
41 |
42 | ?> stagesepx的定位是,轻量化的、基于图像处理与机器学习的、全自动的视频分析工具。
43 |
44 | 为了兼顾上述特点(轻量化的AI),我们希望它能够在保证效果的情况下在大多数平台上无痛运行,能够被快速部署,这也意味着它不能有过于苛刻的依赖项(软件与硬件)。除此之外,深度学习需要大量的训练集,而这个量级是单一视频难以提供的。基于这些前提,我们放弃了笨重的深度学习框架而选择了 [sklearn](https://github.com/scikit-learn/scikit-learn)。事实上,对于短视频(30s内)而言,这个包的功能已经非常足够。
45 |
46 | 当然,**这不意味着你不能使用深度学习来进行分类**,毕竟深度学习的效果理论上要比普通的机器学习算法好得多。你完全可以利用 `cut.py` 得到的结果来训练你的深度学习模型。详见 [这里](/pages/1_where_can_it_be_used?id=as-ai-frontend)。
47 |
48 | 另一方面,你也可以不使用 cutter,而是使用人工采集的训练集来训练你的模型,分析你的视频。详见 [这里](https://github.com/williamfzc/stagesepx/tree/master/example)。
49 |
--------------------------------------------------------------------------------
/docs/pages/4_roadmap.md:
--------------------------------------------------------------------------------
1 | # roadmap
2 |
3 | 后续开发方向将围绕三个主题展开:
4 |
5 | ## 速度类型的精确测试
6 |
7 | 正如首页中的例子,它已经被落地到多个实际的速度类型测试场景中:
8 |
9 | - 启动速度
10 | - 页面切换速度
11 | - 开关机
12 | - ...
13 |
14 | ## 功能测试
15 |
16 | - 基于 [视频比较功能](https://github.com/williamfzc/stagesepx/tree/master/example)
17 | - 理论上,在无异常发生的情况下,同样的操作流程 -> 同样的页面效果 -> 同样的视频 -> 同样(类似)的阶段
18 | - 以功能测试为例,我们只需要比较每次回归产生的视频,即可知道页面是否符合预期
19 | - 基于此,你同样可以将它用于兼容性测试中,用于检测UI渲染是否正常
20 |
21 | ## AI数据采集
22 |
23 | - classifier本身就是基于机器学习实现的
24 | - 你完全可以利用cutter从视频中提取素材,用于训练你自己的AI模型
25 | - 或者,用你手工采集的数据集来训练模型
26 | - 细节请参见 [设计理念与定位](/pages/3_how_it_works)
27 |
28 | ## 或者,更多想法?
29 |
30 | 非常欢迎有想法的开发者加入我们,请直接通过 [issue面板](https://github.com/williamfzc/stagesepx/issues) 分享你的想法即可!
31 |
32 | 如果你希望加入这个项目亲自为它开发一些新特性,请同样从 [issue面板](https://github.com/williamfzc/stagesepx/issues) 开始。所有的开发流程会同步在上面,你可以通过它了解该项目的开发进展及近期方向。
33 |
--------------------------------------------------------------------------------
/docs/pages/5_others.md:
--------------------------------------------------------------------------------
1 | # Others
2 |
3 | ## 相关项目
4 |
5 | 这是与stagesep系列的迭代过程,不出意外的话,x将会是长期维护版本。
6 |
7 | - [stagesep2](https://github.com/williamfzc/stagesep2)
8 | - [stagesep (deprecated)](https://github.com/williamfzc/stagesep)
9 |
10 | 具体可以阅读:https://testerhome.com/topics/19978
11 |
12 | ## Bug 报告
13 |
14 | 可想而知的,要考虑到所有的场景是非常困难的,在项目前期很难做到。如果你怀疑遇到bug,建议先更新到最新版本再尝试:`pip install --upgrade stagesepx`。如果问题依旧存在,请通过issue反馈。
15 |
16 | 有什么建议或者遇到问题可以通过 issue 反馈给我。
17 |
18 | ## 贡献代码 或 加入我们
19 |
20 | 欢迎开发者为这个项目贡献内容。在开始前,最好将你的想法通过 issue 贴出来,因为很可能你的想法已经处于开发中了 :)
21 |
22 | 你也可以通过 [issue面板](https://github.com/williamfzc/stagesepx/issues) 了解该项目近期的开发内容,所有的新特性都会先同步到上面。
23 |
24 | ## 联系方式
25 |
26 | - 如果你有疑问与建议,在不涉密的前提下请**尽量通过 issue 方式反馈**。你的疑问与建议很可能能够帮助其他遇到相似状况的人。
27 | - fengzc@vip.qq.com
28 |
29 | ## License
30 |
31 | [MIT](https://github.com/williamfzc/stagesepx/blob/master/LICENSE)
32 |
--------------------------------------------------------------------------------
/docs/pics/brand.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/docs/pics/ssim_trend.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/williamfzc/stagesepx/e3d9538a1f94b77788544af129e7795d85026a54/docs/pics/ssim_trend.png
--------------------------------------------------------------------------------
/docs/pics/stage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/williamfzc/stagesepx/e3d9538a1f94b77788544af129e7795d85026a54/docs/pics/stage.png
--------------------------------------------------------------------------------
/docs/pics/stagesepx.svg:
--------------------------------------------------------------------------------
1 | cutter video images (stage1) images (stage2) images (stageN) classifier classifier result(object) reporter html report image directory modules formodel trainning work with other programs other dataset (optional) forhuman stagesepx input output
--------------------------------------------------------------------------------
/example/README.md:
--------------------------------------------------------------------------------
1 | # example for stagesepx
2 |
3 | ## 安装
4 |
5 | 支持 python3.6 及更高的版本。
6 |
7 | ```bash
8 | pip install stagesepx
9 | ```
10 |
11 | 你可以通过源码安装来获取还未发布的最新版本:
12 |
13 | ```bash
14 | git clone https://github.com/williamfzc/stagesepx.git
15 | cd stagesepx
16 | pip install .
17 | ```
18 |
19 | ## 快速开始
20 |
21 | > 如果你只是想落地,可以先看看 [实际应用的例子](https://github.com/williamfzc/work_with_stagesepx)
22 |
23 | ### 命令行方式
24 |
25 | stagesepx支持直接从命令行启动。在此模式下,你无需编写任何代码。
26 |
27 | 试着分析你的第一个视频:
28 |
29 | ```bash
30 | stagesepx one_step demo.mp4
31 | ```
32 |
33 | ### 脚本方式
34 |
35 | 如果你想要更多定制,或者你希望与其他程序进行结合,那么你最好通过脚本使用。
36 |
37 | - [mini.py](./mini.py) 提供了一个不到30行的例子。当然,这也意味着肯定不会包含太多功能。
38 | - [cut_and_classify.py](./cut_and_classify.py) 提供了几乎所有的stagesepx用法及详细的注释。
39 |
40 | 你可以先从前者开始使用,再根据自身的需要,参考后者的用法逐步补充到前者中。
41 |
42 | ### docker image
43 |
44 | 基于docker,我们提供了更为简洁的方式使它能够运行在容器内。你无需关心复杂的依赖,并且能够更轻松地与其它系统(诸如Jenkins)结合。
45 |
46 | 例如你的视频放置在 `video_dir` 目录下,名为 `demo.mp4`:
47 |
48 | ```bash
49 | cd video_dir
50 | ```
51 |
52 | 创建并启动容器:
53 |
54 | ```bash
55 | docker run \
56 | --rm \
57 | -v ${PWD}:/usr/src/app \
58 | williamfzc/stagesepx \
59 | stagesepx one_step demo.mp4
60 | ```
61 |
62 | 当然你也可以使用脚本方式:
63 |
64 | ```bash
65 | docker run \
66 | --rm \
67 | -v ${PWD}:/usr/src/app \
68 | williamfzc/stagesepx \
69 | python your_script.py
70 | ```
71 |
72 | ## 常规落地方案
73 |
74 | 常规情况下,有一个比较通用的落地方案:
75 |
76 | - 视频采集器
77 | - 根据你的实际情况可能不同
78 | - 可能是硬件(摄像头)或者软件(录屏工具?)
79 | - 向下游提供过程录制服务
80 | - 自动化驱动
81 | - 通常是UI自动化框架
82 | - 在录制过程中代替人进行操作
83 | - 与视频采集器配合,不断向下游提供录制完成的视频
84 | - stagesepx
85 | - 逐一分析视频,得到结果
86 |
87 | 如此做后,这整套东西可以形成闭环。你可以将其与CI系统结合,制造一套稳定的工作流。
88 |
89 | ## 落地指南
90 |
91 | 经过上述流程,你会拥有一套稳定可以持续运行的工作流。而此时,你可能面临的问题是:
92 |
93 | - 我不需要那么多的阶段 / 我希望合并阶段
94 | - 我依然需要人工来检查这些结果
95 | - 每次分出来的阶段数量似乎有可能不同
96 | - ...
97 |
98 | [这个例子](./train_and_predict) 将引导你解决上述的问题,顺利将它落地到实际业务中。
99 |
100 | ## 视频比较(实验性质)
101 |
102 | 视频比较功能被设计用于功能层面上的校验。通过视频比较,你可以先录制好一个人工校验无误的视频,然后:
103 |
104 | - 检验不同分辨率下的表现是否一致
105 | - 在多次重复流程中,检验他们的表现是否一致
106 | - ...
107 |
108 | 参见 [compare_videos.py](./compare_videos.py)。
109 |
110 | ## 常见问题
111 |
112 | ### 为什么分出来的阶段不符合我的预期?
113 |
114 | 有个前提,人类的视觉感知实际上不是非常灵敏。很多情况下,人类认知的稳定状态实际上并不是真正意义上的稳定。当阈值很高时,这种情况出现的概率会逐步上升,你可能会很奇怪为什么分出来的阶段如此之多。
115 |
116 | 立竿见影的解决方案是,略微降低阈值,使其不那么敏感。对于大多数情况(例如页面切换)还是非常奏效的,因为这种情况下改变非常剧烈,计算得到的相似度要远低于阈值;而与此同时一些诸如噪音的干扰可以被阈值过滤掉。
117 |
118 | 如果你遇到的问题是,很多你觉得应该分出来的阶段没有被自动识别出来,那你可以将阈值调高。在这一层面,机器的判定能力要远强于人类。
119 |
120 | ### 如何自动化地检测每个阶段是否符合我的预期?
121 |
122 | `VideoCutRange` 内置的 `contain_image` 方法使你能够检测某个阶段中的帧是否包含某个特定icon。例如,你的场景中结束的标志是A icon出现,你可以利用这种方法检测A icon是否出现在最后一个稳定阶段中,以此判断场景是否正常结束。
123 |
124 | 你可以通过这种方法对 cutter 的结果进行检测。参考 [range_check.py](./range_check.py)。
125 |
126 | ### 如何自动化处理最终的分类结果而不是报告?
127 |
128 | 之所以会有这个疑惑,很大程度上是因为 stagesepx 自带了 report 系统,而 [例子](./mini.py)中把处理结果直接渲染成报告展示出来了。
129 |
130 | 那么这个问题的答案也很简单,你只需要直接处理交给 reporter 的数据就可以了,他们两者并没有耦合关系。
131 |
132 | ```python
133 | # 分类出来的结果是一个 list,里面包含 ClassifierResult 对象
134 | # 如果你希望这个东西能够接入你的业务,与其他的工具协同,那么光靠后面的报告其实意义不大
135 | # 你可以基于 classify_result 进行定制化开发
136 | for each in classify_result:
137 | # 它的帧编号
138 | print(each.frame_id)
139 | # 它的时间戳
140 | print(each.timestamp)
141 | # 它被划分为什么类型
142 | print(each.stage)
143 | break
144 | ```
145 |
146 | 具体可以参考 [完整例子](./cut_and_classify.py)。
147 |
148 | ### 如何定制我的阶段切割结果?
149 |
150 | 在前面一个问题的基础上,很多人会有进一步的疑问:我认为分出来的阶段客观上符合逻辑,但是我希望自己决定如何分阶段。
151 |
152 | 当然是可以的。在生产环境的业务上,这种方式是首选的。我也不相信你会有勇气把没有人工干预过的全自动化工具放上正式环境。
153 |
154 | 首先你需要明白 cutter 与 classifier 是如何运作的(可参见[HOW_IT_WORKS](https://williamfzc.github.io/stagesepx/#/pages/3_how_it_works) 一节)。实际上阶段的划分是由 cutter 来决定的。换言之,我们只需要对 cutter 的行为进行干预,即可进一步控制阶段分割结果。
155 |
156 | 更简单的方法是,直接人工处理 cutter 的结果,用人工分拣过的数据集训练模型。这样做,你的模型会完全根据你挑选出来的数据集生成。
157 |
--------------------------------------------------------------------------------
/example/compare_videos.py:
--------------------------------------------------------------------------------
1 | from stagesepx.cutter import VideoCutter
2 |
3 | import pprint
4 |
5 | video_path = '../test1.mp4'
6 | another_video_path = '../test2.mp4'
7 |
8 | cutter = VideoCutter()
9 | res = cutter.cut(video_path, compress_rate=0.1)
10 | res1 = cutter.cut(another_video_path, compress_rate=0.1)
11 |
12 | # version >= 0.4.3
13 | pprint.pprint(
14 | res.diff(res1, frame_count=3)
15 | )
16 |
--------------------------------------------------------------------------------
/example/cut_and_classify.py:
--------------------------------------------------------------------------------
1 | from stagesepx.cutter import VideoCutter, VideoCutResult
2 | from stagesepx.classifier import SVMClassifier
3 | from stagesepx.reporter import Reporter
4 | from stagesepx.hook import ExampleHook, CropHook, IgnoreHook
5 | import os
6 |
7 | video = "../demo.mp4"
8 |
9 | from stagesepx.video import VideoObject
10 |
11 | video = VideoObject(
12 | video,
13 | # fps 参数(>=0.9.0)
14 | # 结合 ffmpeg,在加载前对视频进行 fps 重整,使表现更加标准
15 | # 需要预先安装 ffmpeg,并配置到环境变量中。即人工在命令行下运行 ffmpeg 有正常提示
16 | # 例如 fps=30 即将视频转换为fps30的格式(不会覆盖原视频)
17 | # fps=30,
18 | )
19 | # 预加载(>=0.8.0,会消耗一定内存)
20 | # 你可以利用视频预加载模式,大幅度提升分析速度
21 | video.load_frames()
22 |
23 | # --- cut ---
24 | cutter = VideoCutter(
25 | # 步长,默认为1,通过它可以自行把握效率与颗粒度
26 | # 设定为2时,会以2帧为一个单位进行遍历
27 | # 即跳过一帧
28 | step=1,
29 | # 默认为0.2,即将图片缩放为0.2倍
30 | # 主要为了提高计算效率
31 | # 如果你担心影响分析效果,可以将其提高
32 | compress_rate=0.2,
33 | # 或者直接指定尺寸
34 | # 当压缩率与指定尺寸同时传入时,优先以指定尺寸为准
35 | # target_size=(200, 400),
36 | )
37 |
38 | # hook特性(>=0.4.2,https://williamfzc.github.io/stagesepx/#/pages/3_how_it_works?id=hook)
39 | # 使用极其简单,你只需要初始化 hook
40 | hook = ExampleHook()
41 | # 再将 hook 添加到 cutter 或者 classifier 中去
42 | cutter.add_hook(hook)
43 | # 支持多个hook,他们会按顺序执行
44 | # 当 overwrite 被设置为 true 时,hook的修改将会持续影响到后续的分析
45 | # 否则 hook 操作的都是 frame 的副本
46 | hook1 = ExampleHook(overwrite=True)
47 | cutter.add_hook(hook1)
48 |
49 | # CropHook(>=0.7.0,被用于局部检测)
50 | # 它能够对帧进行裁剪,使用户能够只对视频的其中一部分进行分析
51 | # 例如,它能够顺利解决轮播图问题:https://github.com/williamfzc/stagesepx/issues/55
52 | # 它采用两种参数,size 与 offset,分别对应 裁剪区域大小 与 偏移量
53 | # 例如,你希望裁剪出画面右下角的 1/4
54 | hook2 = CropHook(
55 | # 高度为 0.5 * height,宽度为 0.5 * width
56 | size=(0.5, 0.5),
57 | # 除了指定比例,你也可以直接指定绝对长度
58 | # 例如你希望裁剪 高度100 宽度200 的一部分
59 | # size=(100, 200),
60 | # 默认情况下,所有的坐标都是从左上角开始的
61 | # 如果我们需要偏移到右下角,意味着我们需要向下偏移 0.5 * height,向右偏移 0.5 * width
62 | # offset=(0.5, 0.5),
63 | # 当然,这里也可以指定绝对长度,同size
64 | # offset=(100, 100),
65 | overwrite=True,
66 | )
67 | # 在初始化完成后,你就可以将hook添加到 cutter 或 classifier 中了
68 | # 在添加完成后,你可以发现,stagesepx 只会对你裁剪后的区域进行检测
69 | cutter.add_hook(hook2)
70 |
71 | # 针对 CropHook 的使用场景,IgnoreHook 被加入用于对其进行进一步补充(>=0.7.1)
72 | # 与 CropHook 相反,它被用于对帧的一部分进行屏蔽
73 | # 详见 https://github.com/williamfzc/stagesepx/issues/56
74 | hook3 = IgnoreHook(
75 | # 它的参数解析方式与 CropHook 是一致的,此处不赘述
76 | # 与 CropHook 不同的是,此处指定的区域会被屏蔽掉
77 | size=(0.5, 0.5),
78 | offset=(0.5, 0.5),
79 | overwrite=True,
80 | )
81 | # 为了不影响结果,在例子中先注释掉了
82 | # cutter.add_hook(hook3)
83 |
84 | # 开始切割
85 | res = cutter.cut(
86 | video,
87 | # block 能够对每帧进行切割并分别进行比较,计算出更加敏感的ssim值
88 | # 默认为2,即切为4宫格;若为4,即切为16宫格,以此类推;为1即不做切割,全图比较
89 | # 值得注意,如果无法整除,block是会报错的
90 | block=2,
91 | )
92 |
93 | # 你可以将你的cutter结果保存起来,供其他时刻使用(>=0.4.4)
94 | cut_result = res.dumps()
95 | # 或直接保存成json文件
96 | # res.dump('./YOUR_RES.json')
97 | # 在你想要使用时,使用loads读取即可
98 | res = VideoCutResult.loads(cut_result)
99 | # 或直接从文件读取
100 | # res = VideoCutResult.load('./YOUR_RES.json')
101 |
102 | # 你可以通过res获取切割结果,获取稳定状态与活动状态分别对应的区间
103 | stable, unstable = res.get_range(
104 | # 判定阶段是否稳定的阈值
105 | # 越高则越严格(判定为稳定的区间更少)
106 | # 默认为 0.95 (0-1)
107 | threshold=0.95,
108 | # 利用 psnr 进行增强型的检测
109 | # 0.5.3加入的特性,默认关闭(float,0-1)
110 | # 设定后,它将对被认为stable的区间进行二次检测
111 | # 例如,设定为0.5时,稳定区间的条件将变为:
112 | # ssim > 0.95 and psnr > 0.5
113 | # 详见 https://github.com/williamfzc/stagesepx/issues/38
114 | psnr_threshold=None,
115 | # limit 能够过滤掉一些过于短的阶段(你可以用它忽略一些持续时间较短的变化),默认不过滤
116 | # 例如填入5,持续区间短于 5*step 的会被忽略
117 | limit=None,
118 | # offset主要用于弥补 在变化过程中 有一些变化不大的相邻帧 被判定为稳态 导致连续变化过程被切割成多个部分 的情况
119 | # 可以参考 https://github.com/williamfzc/stagesepx/issues/16#issuecomment-517916995
120 | # 在上面的例子中,165 - 174 是一个变化过程,而因为 166 - 167 的变化不大导致整个过程被切断
121 | # 如果将offset设置为2,stagesepx会自动拟合在变化过程中长度小于等于2的稳定区间,使变化过程能够完整呈现
122 | offset=None,
123 | )
124 |
125 | # 你可以通过 thumbnail 将阶段的变化过程转化成一张缩略图,这样可以很直观地看出阶段的变化过程!
126 | # 例如,你希望查看第一个unstable阶段发生了什么
127 | # 这样做能够将转化后的缩略图保存到当前目录下
128 | # res.thumbnail(unstable[0], to_dir='.')
129 |
130 | # 对区间进行采样
131 | # 采样出来的图片将保存原始尺寸以便后续分析,但会成为灰度图
132 | data_home = res.pick_and_save(
133 | # 这里的例子是对稳定区间进行采样
134 | stable,
135 | # 每段区间的采样数,5即每个阶段等距离截取5张图片
136 | # 如果涉及机器学习,建议将此值提高
137 | 5,
138 | # 采样结果保存的位置
139 | # 不指定的话则会在当前位置生成文件夹并返回它的路径
140 | # './cut_result',
141 | # prune被用于去除重复阶段(>=0.4.4)
142 | # float(0-1.0),设置为0.9时,如果两个stage相似度超过0.9,他们会合并成一个类别
143 | prune=None,
144 | )
145 |
146 | # --- classify ---
147 |
148 | cl = SVMClassifier(
149 | # 默认情况下使用 HoG 进行特征提取
150 | # 你可以将其关闭从而直接对原始图片进行训练与测试:feature_type='raw'
151 | feature_type="hog",
152 | # 默认为0.2,即将图片缩放为0.2倍
153 | # 主要为了提高计算效率
154 | # 如果你担心影响分析效果,可以将其提高
155 | compress_rate=0.2,
156 | # 或者直接指定尺寸
157 | # 当压缩率与指定尺寸同时传入时,优先以指定尺寸为准
158 | # target_size=(200, 400),
159 | )
160 |
161 | # 加载数据
162 | cl.load(data_home)
163 | # 在加载数据完成之后需要先训练
164 | cl.train()
165 | # 在训练后你可以把模型保存起来
166 | # cl.save_model('model.pkl')
167 | # 或者直接读取已经训练好的模型
168 | # cl.load_model('model.pkl')
169 |
170 | # 注意,如果在classify方法指定了范围
171 | # 那么分析时只会分析处于范围内的帧!
172 | # 例如,这里只传入了stable的范围,那么非stable范围内的帧都会被忽略掉,标记为 -1
173 | classify_result = cl.classify(
174 | video,
175 | stable,
176 | # 步长,可以自行设置用于平衡效率与颗粒度
177 | # 默认为1,即每帧都检测
178 | step=1,
179 | # 默认为 False
180 | # 一旦打开,你的分类结果对象将会保留图片数据
181 | # 可以提高后续处理的速度,但有高内存占用风险
182 | keep_data=False,
183 | )
184 |
185 | # 分类得到的结果是一个 ClassifierResult 对象
186 | # 你可以直接通过处理里面的数据、使用它的内置方法对你的分类结果进行定制
187 | # 从而达到你希望的效果
188 | data_list = classify_result.data
189 | print(data_list)
190 | # classify_result 已经提供了许多方法用于更好地重整数据
191 | # 可以直接进入 ClassifyResult 对象中查看
192 | cr_dict = classify_result.to_dict()
193 | print(cr_dict)
194 |
195 | # contain_image (>=0.9.1)
196 | # 你可以利用模板匹配,对最终结果与你的预期进行对比,从而得知阶段是否与你的期望相符
197 | # 全自动化的校验可以在此基础上展开
198 | # res = data_list[0].contain_image(image_path="path/to/your/template/path")
199 | # print(res)
200 | # 你可以得到类似这样的结果:
201 | # {'target_point': [550, 915], 'target_sim': 0.9867244362831116, 'ok': True}
202 |
203 | # --- draw ---
204 | r = Reporter()
205 |
206 | # 你可以将把一些自定义数据插入到报告中
207 | r.add_extra("data_home", data_home)
208 |
209 | # 在0.3.2及之后的版本,你可以在报告中加入一些自定义内容 (https://github.com/williamfzc/stagesepx/issues/13)
210 | # r.add_extra('here is title', 'here is content')
211 | r.draw(
212 | classify_result,
213 | report_path=os.path.join(data_home, "report.html"),
214 | # 传入 unstable 可以将对应部分标记为 unstable
215 | # 会影响最终的分析结果
216 | unstable_ranges=unstable,
217 | # 0.5.3新增的特性,多用于debug
218 | # 传入cutter的切割结果,能够在图表末尾追加 SSIM、MSE、PSNR 的变化趋势图
219 | cut_result=res,
220 | )
221 |
--------------------------------------------------------------------------------
/example/mini.py:
--------------------------------------------------------------------------------
1 | """
2 | 这是一个最小化的 stagesepx 使用例子
3 | 每一行的注释均可以在 cut_and_classify.py 中找到
4 | """
5 | from stagesepx.cutter import VideoCutter
6 | from stagesepx.classifier import SVMClassifier
7 | from stagesepx.reporter import Reporter
8 | from stagesepx.video import VideoObject
9 |
10 | video_path = "../demo.mp4"
11 | video = VideoObject(video_path)
12 | video.load_frames()
13 |
14 | # --- cutter ---
15 | cutter = VideoCutter()
16 | res = cutter.cut(video)
17 | stable, unstable = res.get_range()
18 | data_home = res.pick_and_save(stable, 5)
19 |
20 | # --- classify ---
21 | cl = SVMClassifier()
22 | cl.load(data_home)
23 | cl.train()
24 | classify_result = cl.classify(video, stable)
25 |
26 | # --- draw ---
27 | r = Reporter()
28 | r.draw(classify_result)
29 |
--------------------------------------------------------------------------------
/example/old/README.md:
--------------------------------------------------------------------------------
1 | # OLD VERSION
2 |
3 | 这里的例子不建议使用
4 |
--------------------------------------------------------------------------------
/example/old/classify.py:
--------------------------------------------------------------------------------
1 | from stagesepx.classifier import SSIMClassifier
2 | from stagesepx.reporter import Reporter
3 |
4 | # 在运行这个例子前需要有前置数据
5 | # 你可以先从 cut.py 开始
6 |
7 | # 这里用的分类器是默认的SSIM分类器
8 | # 更多的分类器会在稳定之后逐步加入
9 | cl = SSIMClassifier()
10 | # cut.py会把数据生成在这个路径下
11 | # 如果你改动了,这里也要做相应修改
12 | data_home = './cut_result'
13 | cl.load(data_home)
14 | # 开始分析即可
15 | res = cl.classify(
16 | '../demo.mp4',
17 | # 步长,可以自行设置用于平衡效率与颗粒度
18 | # 默认为1,即每帧都检测
19 | step=1
20 | )
21 |
22 | # 分类出来的结果是一个 list,里面包含 ClassifierResult 对象
23 | # 你可以用它进行二次开发
24 | for each in res:
25 | # 它的帧编号
26 | print(each.frame_id)
27 | # 它的时间戳
28 | print(each.timestamp)
29 | # 它被划分为什么类型
30 | print(each.stage)
31 | break
32 |
33 | # 为了更方便的可读性,stagesepx已经内置了图表绘制功能
34 | # 你可以直接把分析结果绘制成图表
35 | report = Reporter()
36 | # 你可以将把一些文件夹路径插入到报告中
37 | # 这样你可以很方便地从报告中查看各项相关内容
38 | # 当然,你需要想好这些路径与报告最后所在位置之间的相对位置,以确保他们能够被访问到
39 | report.add_dir_link(data_home)
40 |
41 | report.draw(
42 | res,
43 | report_path='report.html',
44 | )
45 |
--------------------------------------------------------------------------------
/example/old/classify_with_svm.py:
--------------------------------------------------------------------------------
1 | from stagesepx.classifier import SVMClassifier
2 | from stagesepx.reporter import Reporter
3 |
4 |
5 | # 默认情况下使用 HoG 进行特征提取
6 | # 你可以将其关闭从而直接对原始图片进行训练与测试:feature_type='raw'
7 | cl = SVMClassifier(feature_type='hog')
8 |
9 | # 基本与SSIM分类器的流程一致
10 | # 但它对数据的要求可能有所差别,具体参见 cut.py 中的描述
11 | data_home = './cut_result'
12 | cl.load(data_home)
13 |
14 | # 在加载数据完成之后需要先训练
15 | cl.train()
16 |
17 | # # 在训练后你可以把模型保存起来
18 | # cl.save_model('model.pkl')
19 | # # 或者直接读取已经训练好的模型
20 | # cl.load_model('model.pkl')
21 |
22 | # 开始分类
23 | res = cl.classify(
24 | '../demo.mp4',
25 | # 步长,可以自行设置用于平衡效率与颗粒度
26 | # 默认为1,即每帧都检测
27 | step=1,
28 | )
29 |
30 | # 为了更方便的可读性,stagesepx已经内置了图表绘制功能
31 | # 你可以直接把分析结果绘制成图表
32 | report = Reporter()
33 |
34 | # 你可以将把一些文件夹路径插入到报告中
35 | # 这样你可以很方便地从报告中查看各项相关内容
36 | # 当然,你需要想好这些路径与报告最后所在位置之间的相对位置,以确保他们能够被访问到
37 | report.add_dir_link(data_home)
38 |
39 | report.draw(
40 | res,
41 | report_path='report.html',
42 | )
43 |
--------------------------------------------------------------------------------
/example/old/cut.py:
--------------------------------------------------------------------------------
1 | from stagesepx.cutter import VideoCutter
2 |
3 | # 改为你的视频路径
4 | video_path = '../demo.mp4'
5 |
6 | cutter = VideoCutter(
7 | # 步长,默认为1,通过它可以自行把握效率与颗粒度
8 | # 设定为2时,会以2帧为一个单位进行遍历
9 | # 即跳过一帧
10 | step=1,
11 | )
12 |
13 | # 开始切割
14 | res = cutter.cut(
15 | video_path,
16 | # 默认为0.2,即将图片缩放为0.2倍
17 | # 主要为了提高计算效率
18 | compress_rate=0.2
19 | )
20 |
21 | # 你可以通过res获取切割结果,获取稳定状态与活动状态分别对应的区间
22 | stable, unstable = res.get_range(
23 | # 判定阶段是否稳定的阈值
24 | # 越高则越严格(判定为稳定的区间更少)
25 | # 默认为 0.95 (0-1)
26 | threshold=0.95,
27 | # limit 能够过滤掉一些过于短的阶段(你可以用它忽略一些持续时间较短的变化),默认不过滤
28 | # 例如填入5,持续区间短于 5*step 的会被忽略
29 | limit=None,
30 | # offset主要用于弥补 在变化过程中 有一些变化不大的相邻帧 被判定为稳态 导致连续变化过程被切割成多个部分 的情况
31 | # 可以参考 https://github.com/williamfzc/stagesepx/issues/16#issuecomment-517916995
32 | # 在上面的例子中,165 - 174 是一个变化过程,而因为 166 - 167 的变化不大导致整个过程被切断
33 | # 如果将offset设置为2,stagesepx会自动拟合在变化过程中长度小于等于2的稳定区间,使变化过程能够完整呈现
34 | offset=None,
35 | )
36 |
37 | # 你可以通过 thumbnail 将阶段的变化过程转化成一张缩略图,这样可以很直观地看出阶段的变化过程!
38 | # 例如,你希望查看第一个unstable阶段发生了什么
39 | # 这样做能够将转化后的缩略图保存到当前目录下
40 | res.thumbnail(unstable[0], to_dir='.')
41 |
42 | # 由于所有的阶段都是自动侦测的,可能发生的一个状况是:
43 | # 你对同一个场景重复录制了几次视频,但可能由于拍摄效果与环境的影响,每个视频得到的阶段数量不一致
44 | # 基于findit,用户能够直接对阶段进行检测,以确保阶段对应的内容符合预期
45 | # 例如,你希望第二个稳定阶段中的帧必须包含某图像(路径为a.png),可以:
46 | # assert stable[1].contain_image('a.png')
47 |
48 | # 对区间进行采样
49 | data_path = res.pick_and_save(
50 | # 这里的例子是对稳定区间进行采样
51 | stable,
52 | # 每段区间的采样数,3即每个阶段等距离截取3张图片
53 | # 如果涉及机器学习,建议将此值提高
54 | 3,
55 | # 采样结果保存的位置
56 | # 不指定的话则会在当前位置生成文件夹并返回它的路径
57 | './cut_result',
58 | )
59 | print(f'data saved to {data_path}')
60 |
--------------------------------------------------------------------------------
/example/old/mini.py:
--------------------------------------------------------------------------------
1 | from stagesepx.cutter import VideoCutter
2 | from stagesepx.classifier import SSIMClassifier
3 | from stagesepx.reporter import Reporter
4 |
5 | # cut
6 | video_path = '../demo.mp4'
7 | cutter = VideoCutter()
8 | res = cutter.cut(video_path)
9 | stable = res.get_stable_range()
10 |
11 | # classify
12 | cl = SSIMClassifier()
13 | cl.load(stable)
14 |
15 | res = cl.classify(
16 | video_path,
17 | stable,
18 | )
19 |
20 | # draw
21 | r = Reporter()
22 | r.draw(res)
23 |
--------------------------------------------------------------------------------
/example/old/multi_video.py:
--------------------------------------------------------------------------------
1 | from stagesepx.cutter import VideoCutter
2 | from stagesepx.classifier import SVMClassifier
3 | from stagesepx.reporter import Reporter
4 |
5 | video_list = [
6 | '../demo.mp4',
7 | # 把别的视频也配置在这里即可
8 | ]
9 |
10 | for each_video_path in video_list:
11 | cutter = VideoCutter()
12 | res = cutter.cut(each_video_path)
13 | stable = res.get_stable_range()
14 | data_home = res.pick_and_save(stable, 3)
15 | print(stable)
16 |
17 | # classify
18 | cl = SVMClassifier()
19 | cl.load(data_home)
20 | cl.train()
21 |
22 | # 注意,如果在classify方法指定了范围
23 | # 那么分析时只会分析处于范围内的帧!
24 | # 例如,这里只传入了stable的范围,那么非stable范围内的帧都会被忽略掉,标记为 -1
25 | res = cl.classify(
26 | each_video_path,
27 | stable,
28 | # 步长,可以自行设置用于平衡效率与颗粒度
29 | # 默认为1,即每帧都检测
30 | step=1
31 | )
32 |
33 | # 为了更方便的可读性,stagesepx已经内置了图表绘制功能
34 | # 你可以直接把分析结果绘制成图表
35 | report = Reporter()
36 | # 你可以将把一些文件夹路径插入到报告中
37 | # 这样你可以很方便地从报告中查看各项相关内容
38 | # 当然,你需要想好这些路径与报告最后所在位置之间的相对位置,以确保他们能够被访问到
39 | report.add_dir_link(data_home)
40 |
41 | report.draw(res)
42 |
--------------------------------------------------------------------------------
/example/range_check.py:
--------------------------------------------------------------------------------
1 | from stagesepx.cutter import VideoCutter
2 | from stagesepx.classifier import SVMClassifier
3 |
4 | video_path = "../demo.mp4"
5 | amazon_image_path = "../amazon.png"
6 | phone_image_path = "../phone.png"
7 | message_image_path = "../message.png"
8 |
9 | cutter = VideoCutter()
10 | res = cutter.cut(video_path)
11 | stable, _ = res.get_range()
12 |
13 | # 检查最后一个阶段中是否包含图片 person.png
14 | # 这种做法会在阶段中间取一帧进行模板匹配
15 | # 当然,这种做法并不常用,最常用还是用于检测最终结果而不是中间量
16 | # 值得注意,这里的模板匹配会受到压缩率的影响
17 | # 虽然已经做了分辨率拟合,但是如果压缩率过高,依旧会出现图像难以辨认而导致的误判
18 | # 正常来说没什么问题
19 | match_result = stable[-1].contain_image(
20 | amazon_image_path, engine_template_scale=(0.5, 2, 5)
21 | )
22 | print(match_result)
23 | # 分别输出:最可能存在的坐标、相似度、计算是否正常完成
24 | # {'target_point': [550, 915], 'target_sim': 0.9867244362831116, 'ok': True}
25 |
26 | data_home = res.pick_and_save(stable, 5)
27 | cl = SVMClassifier()
28 | cl.load(data_home)
29 | cl.train()
30 | classify_result = cl.classify(video_path, stable, keep_data=True)
31 | result_dict = classify_result.to_dict()
32 |
33 | final_result: dict = {}
34 |
35 | for each_stage, each_frame_list in result_dict.items():
36 | # 你可以通过对这些阶段进行目标检测,以确认他们符合你的预期
37 | # 注意,如阶段名称为负数,意味着这个阶段是处在变化中,非稳定
38 | # 例如,检测每一个阶段的中间帧是否包含特定图片
39 | middle_id: int = int((len(each_frame_list) - 1) / 2)
40 |
41 | # 分别检测 amazon.png 与 phone.png (这两张是手动选出来的标志物)
42 | amazon_image_res = each_frame_list[middle_id].contain_image(
43 | image_path=amazon_image_path,
44 | # 模板匹配依赖了 findit
45 | # 所有 findit 需要的参数都可以通过 kwargs 的形式传递并生效
46 | # 具体可查看 FindIt object 的 __init__() 与 find()
47 | engine_template_scale=(0.5, 2, 10),
48 | )
49 | phone_image_res = each_frame_list[middle_id].contain_image(
50 | image_path=phone_image_path, engine_template_scale=(0.5, 2, 10)
51 | )
52 | msg_image_res = each_frame_list[middle_id].contain_image(
53 | image_path=message_image_path, engine_template_scale=(0.5, 2, 10)
54 | )
55 | final_result[each_stage] = {
56 | amazon_image_path: amazon_image_res["target_sim"],
57 | phone_image_path: phone_image_res["target_sim"],
58 | message_image_path: msg_image_res["target_sim"],
59 | }
60 |
61 | # 可以通过这些标志物知晓阶段是否符合预期,并进行计算
62 | print(final_result)
63 |
--------------------------------------------------------------------------------
/example/stable.py:
--------------------------------------------------------------------------------
1 | from stagesepx.cutter import VideoCutter
2 | from stagesepx.classifier import SVMClassifier
3 | from stagesepx.reporter import Reporter
4 | from stagesepx.video import VideoObject
5 | import os
6 |
7 | video = "../demo.mp4"
8 |
9 | video = VideoObject(
10 | video,
11 | # 预加载,大幅度提升分析速度
12 | pre_load=True,
13 | )
14 |
15 | # --- cut ---
16 | cutter = VideoCutter()
17 |
18 | # 开始切割
19 | res = cutter.cut(video)
20 |
21 | # 你可以通过res获取切割结果,获取稳定状态与活动状态分别对应的区间
22 | stable, unstable = res.get_range(
23 | # 判定阶段是否稳定的阈值
24 | # 越高则越严格(判定为稳定的区间更少)
25 | # 默认为 0.95 (0-1)
26 | threshold=0.95,
27 | # offset主要用于弥补 在变化过程中 有一些变化不大的相邻帧 被判定为稳态 导致连续变化过程被切割成多个部分 的情况
28 | # 可以参考 https://github.com/williamfzc/stagesepx/issues/16#issuecomment-517916995
29 | # 在上面的例子中,165 - 174 是一个变化过程,而因为 166 - 167 的变化不大导致整个过程被切断
30 | # 如果将offset设置为2,stagesepx会自动拟合在变化过程中长度小于等于2的稳定区间,使变化过程能够完整呈现
31 | offset=None,
32 | )
33 |
34 | # 对区间进行采样
35 | # 采样出来的图片将保存原始尺寸以便后续分析,但会成为灰度图
36 | data_home = res.pick_and_save(
37 | # 这里的例子是对稳定区间进行采样
38 | stable,
39 | # 每段区间的采样数,5即每个阶段等距离截取5张图片
40 | # 如果涉及机器学习,建议将此值提高
41 | 5,
42 | )
43 |
44 | # --- classify ---
45 | cl = SVMClassifier()
46 |
47 | # 加载数据
48 | cl.load(data_home)
49 | # 在加载数据完成之后需要先训练
50 | cl.train()
51 |
52 | # 注意,如果在classify方法指定了范围
53 | # 那么分析时只会分析处于范围内的帧!
54 | # 例如,这里只传入了stable的范围,那么非stable范围内的帧都会被忽略掉,标记为 -1
55 | classify_result = cl.classify(video, stable)
56 |
57 | # 分类得到的结果是一个 ClassifierResult 对象
58 | # 你可以直接通过处理里面的数据、使用它的内置方法对你的分类结果进行定制
59 | # 从而达到你希望的效果
60 | data_list = classify_result.data
61 | print(data_list)
62 | # classify_result 已经提供了许多方法用于更好地重整数据
63 | # 可以直接进入 ClassifyResult 对象中查看
64 | cr_dict = classify_result.to_dict()
65 | print(cr_dict)
66 |
67 | # --- draw ---
68 | r = Reporter()
69 |
70 | r.draw(
71 | classify_result,
72 | report_path=os.path.join(data_home, "report.html"),
73 | # 传入 unstable 可以将对应部分标记为 unstable
74 | # 会影响最终的分析结果
75 | unstable_ranges=unstable,
76 | # 0.5.3新增的特性,多用于debug
77 | # 传入cutter的切割结果,能够在图表末尾追加 SSIM、MSE、PSNR 的变化趋势图
78 | cut_result=res,
79 | )
80 |
--------------------------------------------------------------------------------
/example/train_and_predict/README.md:
--------------------------------------------------------------------------------
1 | # 建立稳定的视频分析服务
2 |
3 | 这是一个针对生产环境设计的例子。
4 |
5 | - 数据(图片)采集
6 | - 人工对机器分类好的图片,结合你的实际情况,进行二次分类
7 | - 分类好的数据集 -> 训练好的模型
8 | - 应用你的模型
9 |
10 | 假设你目前有两个视频 `train.mp4` 与 `predict.mp4` 分别用于训练模型与校验模型。
11 |
12 | ## 数据(图片)采集
13 |
14 | 在运行 `cut.py` 之后,你将可以得到 `cut_result` 文件夹,里面有分类好的图片。
15 |
16 | 参见 [cut.py](./cut.py)。
17 |
18 | 
19 |
20 | ## 二次分类
21 |
22 | 人工对机器分类好的图片,结合你的实际情况,进行二次分类。具体参见 [issue#48](https://github.com/williamfzc/stagesepx/issues/48)。
23 |
24 | ## 模型训练
25 |
26 | 经过上述步骤后,你现在应该有一个符合业务需求的、分类完好的训练(图片)集(文件夹)。接下来我们会基于它构建一个模型。
27 |
28 | 如何训练参见 [train.py](./train.py)。
29 |
30 | ## 应用
31 |
32 | 之后你就可以利用训练好的模型,对类似的视频进行预测。
33 |
34 | 如何使用模型参见 [predict.py](./predict.py)
35 |
--------------------------------------------------------------------------------
/example/train_and_predict/cut.py:
--------------------------------------------------------------------------------
1 | """
2 | 这个例子描述了如何采集训练集
3 | """
4 | from stagesepx.cutter import VideoCutter
5 |
6 |
7 | video_path = '../../demo.mp4'
8 |
9 | # --- cut ---
10 | cutter = VideoCutter()
11 | res = cutter.cut(video_path)
12 | stable, unstable = res.get_range()
13 |
14 | res.pick_and_save(
15 | stable,
16 | # 每段区间的采样数,5即每个阶段等距离截取5张图片
17 | 5,
18 | # 采样结果保存的位置
19 | './cut_result',
20 | )
21 |
--------------------------------------------------------------------------------
/example/train_and_predict/predict.py:
--------------------------------------------------------------------------------
1 | """
2 | 利用训练好的模型,建立长期的视频分析工作流
3 |
4 | 在 train.py 之后,你应该能得到一个 model.pkl 模型
5 | """
6 |
7 | from stagesepx.classifier import SVMClassifier
8 | from stagesepx.cutter import VideoCutter
9 | from stagesepx.reporter import Reporter
10 |
11 | TARGET_VIDEO = '../../demo.mp4'
12 |
13 | # cut
14 | # 这里依旧使用了 cut,主要目的还是为了可以比较好的处理变化中的过程
15 | # 但这次我们不需要用到 pick_and_save,因为这次 classifier 不会使用 cutter 的数据
16 | cutter = VideoCutter()
17 | res = cutter.cut(TARGET_VIDEO)
18 | stable, _ = res.get_range()
19 |
20 | # classify
21 | # 这里的参数需要保持与train.py一致,如果你有改动的话
22 | cl = SVMClassifier()
23 | cl.load_model('./model.pkl')
24 |
25 | classify_result = cl.classify(
26 | TARGET_VIDEO,
27 | stable,
28 | )
29 |
30 | r = Reporter()
31 | r.draw(
32 | classify_result,
33 | report_path='report.html',
34 | cut_result=res,
35 | )
36 |
--------------------------------------------------------------------------------
/example/train_and_predict/train.py:
--------------------------------------------------------------------------------
1 | """
2 | 这个例子描述了如何训练一个后续可用的模型
3 |
4 | 在 cut 流程之后,你应该能得到一个已经分拣好的训练集文件夹
5 | 我们将基于此文件夹进行模型的训练
6 | """
7 | from stagesepx.classifier import SVMClassifier
8 |
9 | DATA_HOME = './cut_result'
10 | cl = SVMClassifier()
11 |
12 | # 加载数据
13 | cl.load(DATA_HOME)
14 | # 在加载数据完成之后需要先训练
15 | cl.train()
16 | # 在训练后你可以把模型保存起来
17 | cl.save_model('model.pkl')
18 |
--------------------------------------------------------------------------------
/example/with_keras.py:
--------------------------------------------------------------------------------
1 | """
2 | classify with keras model
3 | """
4 | from keras.models import Sequential
5 |
6 | from stagesepx.cutter import VideoCutter
7 | from stagesepx.classifier.keras import KerasClassifier
8 | from stagesepx.reporter import Reporter
9 | from stagesepx.video import VideoObject
10 |
11 |
12 | video_path = "../demo.mp4"
13 | video = VideoObject(video_path)
14 | video.load_frames()
15 |
16 | # --- cutter ---
17 | cutter = VideoCutter()
18 | res = cutter.cut(video)
19 | stable, unstable = res.get_range()
20 | data_home = res.pick_and_save(stable, 10)
21 |
22 | # --- classify ---
23 | # We recommend that you read the code (KerasClassifier) directly for better understanding
24 | # and actually you can build your own Classifier which based on it
25 | class NewKerasClassifier(KerasClassifier):
26 | def create_model(self) -> Sequential:
27 | # overwrite this method to design your own model structure!
28 |
29 | # model = Sequential()
30 | # ...
31 | pass
32 |
33 | def train(self, data_path: str, *_, **__):
34 | # ...
35 | pass
36 |
37 |
38 | # or use the default one
39 | # and then init it
40 | # epochs=1 is just a example
41 | cl = KerasClassifier(epochs=1)
42 |
43 | # train model and save weights
44 | cl.train(data_home)
45 | cl.save_model("keras_model.weights.h5")
46 |
47 | # you would better reuse the trained model for less time cost
48 | # keras model takes much more time than SVM
49 | # cl.load_model("keras_model.weights.h5")
50 |
51 | classify_result = cl.classify(video, stable, keep_data=True)
52 | result_dict = classify_result.to_dict()
53 |
54 | # --- draw ---
55 | r = Reporter()
56 | r.draw(classify_result)
57 |
--------------------------------------------------------------------------------
/pyrightconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "include": [
3 | "stagesepx"
4 | ],
5 | "reportMissingImports": false
6 | }
7 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 | from stagesepx import (
3 | __AUTHOR__,
4 | __AUTHOR_EMAIL__,
5 | __URL__,
6 | __LICENSE__,
7 | __VERSION__,
8 | __PROJECT_NAME__,
9 | __DESCRIPTION__,
10 | )
11 |
12 | setup(
13 | name=__PROJECT_NAME__,
14 | version=__VERSION__,
15 | description=__DESCRIPTION__,
16 | author=__AUTHOR__,
17 | author_email=__AUTHOR_EMAIL__,
18 | url=__URL__,
19 | packages=find_packages(),
20 | include_package_data=True,
21 | license=__LICENSE__,
22 | classifiers=[
23 | "License :: OSI Approved :: MIT License",
24 | "Programming Language :: Python",
25 | "Programming Language :: Python :: 3",
26 | "Programming Language :: Python :: 3.6",
27 | "Programming Language :: Python :: 3.7",
28 | "Programming Language :: Python :: 3.8",
29 | "Programming Language :: Python :: 3.9",
30 | "Programming Language :: Python :: 3.10",
31 | "Programming Language :: Python :: 3.11",
32 | ],
33 | python_requires=">=3.6",
34 | install_requires=[
35 | "opencv-python>=4.1.2.30",
36 | "opencv-contrib-python>=4.1.2.30",
37 | "moviepy==1.0.3",
38 | "imageio>=2.5.0",
39 | "imageio-ffmpeg>=0.4.7",
40 | "numpy>=0.18.0",
41 | "loguru>=0.2.5",
42 | "scikit-image>=0.16.0",
43 | "scikit-learn>=0.21.0",
44 | "pyecharts>=1.3.1",
45 | "findit>=0.5.8",
46 | "Jinja2>=3.0.3",
47 | "MarkupSafe>=2.1.1;python_version>='3.7'",
48 | "MarkupSafe==2.0.1;python_version<'3.7'",
49 | "fire>=0.2.1",
50 | "keras>=2.3.1",
51 | "pydantic==1.*",
52 | ],
53 | entry_points={"console_scripts": ["stagesepx = stagesepx.cli:main"]},
54 | )
55 |
--------------------------------------------------------------------------------
/stagesepx/__init__.py:
--------------------------------------------------------------------------------
1 | # ________ _________ ________ ________ _______ ________ _______ ________ ___ ___
2 | # |\ ____\|\___ ___\\ __ \|\ ____\|\ ___ \ |\ ____\|\ ___ \ |\ __ \ |\ \ / /|
3 | # \ \ \___|\|___ \ \_\ \ \|\ \ \ \___|\ \ __/|\ \ \___|\ \ __/|\ \ \|\ \ \ \ \/ / /
4 | # \ \_____ \ \ \ \ \ \ __ \ \ \ __\ \ \_|/_\ \_____ \ \ \_|/_\ \ ____\ \ \ / /
5 | # \|____|\ \ \ \ \ \ \ \ \ \ \ \|\ \ \ \_|\ \|____|\ \ \ \_|\ \ \ \___| / \/
6 | # ____\_\ \ \ \__\ \ \__\ \__\ \_______\ \_______\____\_\ \ \_______\ \__\ / /\ \
7 | # |\_________\ \|__| \|__|\|__|\|_______|\|_______|\_________\|_______|\|__| /__/ /\ __\
8 | # \|_________| \|_________| |__|/ \|__|
9 | #
10 |
11 | """
12 | MIT License
13 |
14 | Copyright (c) 2019 williamfzc
15 |
16 | Permission is hereby granted, free of charge, to any person obtaining a copy
17 | of this software and associated documentation files (the "Software"), to deal
18 | in the Software without restriction, including without limitation the rights
19 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
20 | copies of the Software, and to permit persons to whom the Software is
21 | furnished to do so, subject to the following conditions:
22 |
23 | The above copyright notice and this permission notice shall be included in all
24 | copies or substantial portions of the Software.
25 |
26 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
31 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 | SOFTWARE.
33 | """
34 |
35 | __PROJECT_NAME__ = r"stagesepx"
36 | __AUTHOR__ = r"williamfzc"
37 | __AUTHOR_EMAIL__ = r"williamfzc@foxmail.com"
38 | __LICENSE__ = r"MIT"
39 | __URL__ = r"https://github.com/williamfzc/stagesepx"
40 | __VERSION__ = r"0.18.3"
41 | __DESCRIPTION__ = r"detect stages in video automatically"
42 |
--------------------------------------------------------------------------------
/stagesepx/api.py:
--------------------------------------------------------------------------------
1 | """
2 | high level API
3 | """
4 | import os
5 | import typing
6 | import traceback
7 | import tempfile
8 | import json
9 | import pathlib
10 | from enum import Enum
11 | from loguru import logger
12 | from pydantic import BaseModel
13 |
14 | from stagesepx.cutter import VideoCutter
15 | from stagesepx.classifier import SVMClassifier
16 | from stagesepx.hook import BaseHook
17 | from stagesepx.reporter import Reporter
18 | from stagesepx import constants
19 | from stagesepx.video import VideoObject
20 |
21 |
22 | def run(config: typing.Union[dict, str]):
23 | """
24 | run with config
25 |
26 | :param config: config file path, or a preload dict
27 | :return:
28 | """
29 |
30 | class _VideoUserConfig(BaseModel):
31 | path: str
32 | pre_load: bool = True
33 | fps: int = None
34 |
35 | class _CutterUserConfig(BaseModel):
36 | threshold: float = None
37 | frame_count: int = None
38 | offset: int = None
39 | limit: int = None
40 | block: int = None
41 |
42 | # common
43 | compress_rate: float = None
44 | target_size: typing.Tuple[int, int] = None
45 |
46 | class _ClassifierType(Enum):
47 | SVM = "svm"
48 | KERAS = "keras"
49 |
50 | class _ClassifierUserConfig(BaseModel):
51 | boost_mode: bool = None
52 | classifier_type: _ClassifierType = _ClassifierType.SVM
53 | model: str = None
54 |
55 | # common
56 | compress_rate: float = None
57 | target_size: typing.Tuple[int, int] = None
58 |
59 | class _CalcOperatorType(Enum):
60 | BETWEEN = "between"
61 | DISPLAY = "display"
62 |
63 | class _CalcOperator(BaseModel):
64 | name: str
65 | calc_type: _CalcOperatorType
66 | args: dict = dict()
67 |
68 | class _CalcUserConfig(BaseModel):
69 | output: str = None
70 | ignore_error: bool = None
71 | operators: typing.List[_CalcOperator] = None
72 |
73 | class _ExtraUserConfig(BaseModel):
74 | save_train_set: str = None
75 |
76 | class UserConfig(BaseModel):
77 | output: str
78 | video: _VideoUserConfig
79 | cutter: _CutterUserConfig = _CutterUserConfig()
80 | classifier: _ClassifierUserConfig = _ClassifierUserConfig()
81 | calc: _CalcUserConfig = _CalcUserConfig()
82 | extras: _ExtraUserConfig = _ExtraUserConfig()
83 |
84 | if isinstance(config, str):
85 | # path
86 | config_path = pathlib.Path(config)
87 | assert config_path.is_file(), f"no config file found in {config_path}"
88 |
89 | # todo: support different types in the future
90 | assert config_path.as_posix().endswith(
91 | ".json"
92 | ), "config file should be json format"
93 | with open(config_path, encoding=constants.CHARSET) as f:
94 | config = json.load(f)
95 |
96 | config = UserConfig(**config)
97 | logger.info(f"config: {config}")
98 |
99 | # main flow
100 | video = VideoObject(
101 | # fmt: off
102 | path=config.video.path,
103 | fps=config.video.fps,
104 | )
105 | if config.video.pre_load:
106 | video.load_frames()
107 |
108 | # cut
109 | cutter = VideoCutter(
110 | # fmt: off
111 | compress_rate=config.cutter.compress_rate,
112 | target_size=config.cutter.target_size,
113 | )
114 | res = cutter.cut(
115 | # fmt: off
116 | video=video,
117 | block=config.cutter.block,
118 | )
119 | stable, unstable = res.get_range(
120 | # fmt: off
121 | threshold=config.cutter.threshold,
122 | offset=config.cutter.offset,
123 | )
124 |
125 | with tempfile.TemporaryDirectory() as temp_dir:
126 | # classify
127 | if config.classifier.classifier_type is _ClassifierType.SVM:
128 | cl = SVMClassifier(
129 | # fmt: off
130 | compress_rate=config.classifier.compress_rate,
131 | target_size=config.classifier.target_size,
132 | )
133 | elif config.classifier.classifier_type is _ClassifierType.KERAS:
134 | from stagesepx.classifier.keras import KerasClassifier
135 |
136 | cl = KerasClassifier(
137 | # fmt: off
138 | compress_rate=config.classifier.compress_rate,
139 | target_size=config.classifier.target_size,
140 | )
141 | # validation has been applied by pydantic
142 | # so no `else`
143 |
144 | if config.classifier.model:
145 | # no need to retrain
146 | model_path = pathlib.Path(config.classifier.model)
147 | assert model_path.is_file(), f"file {model_path} not existed"
148 | cl.load_model(model_path)
149 | else:
150 | # train a new model
151 | train_set_dir = config.extras.save_train_set or temp_dir
152 | os.makedirs(train_set_dir, exist_ok=True)
153 |
154 | res.pick_and_save(
155 | # fmt: off
156 | stable,
157 | frame_count=config.cutter.frame_count,
158 | to_dir=train_set_dir,
159 | )
160 | cl.train(data_path=train_set_dir)
161 |
162 | # start classifying
163 | classify_result = cl.classify(
164 | # fmt: off
165 | video,
166 | stable,
167 | boost_mode=config.classifier.boost_mode,
168 | )
169 |
170 | # calc
171 | def _calc_display() -> dict:
172 | # jsonify
173 | return json.loads(classify_result.dumps())
174 |
175 | def _calc_between(*, from_stage: str = None, to_stage: str = None) -> dict:
176 | assert classify_result.contain(
177 | from_stage
178 | ), f"no stage {from_stage} found in result"
179 | assert classify_result.contain(to_stage), f"no stage {to_stage} found in result"
180 | from_frame = classify_result.last(from_stage)
181 | to_frame = classify_result.first(to_stage)
182 | cost = to_frame.timestamp - from_frame.timestamp
183 | return {
184 | "from": from_frame.frame_id,
185 | "to": to_frame.frame_id,
186 | "cost": cost,
187 | }
188 |
189 | _calc_func_dict = {
190 | _CalcOperatorType.BETWEEN: _calc_between,
191 | _CalcOperatorType.DISPLAY: _calc_display,
192 | }
193 | calc_output = config.calc.output
194 | if calc_output:
195 | output_path = pathlib.Path(calc_output)
196 | assert not output_path.is_file(), f"file {output_path} already existed"
197 | result = []
198 | for each_calc in config.calc.operators:
199 | func = _calc_func_dict[each_calc.calc_type]
200 | try:
201 | func_ret = func(**each_calc.args)
202 | except Exception as e:
203 | if not config.calc.ignore_error:
204 | raise
205 | logger.warning(e)
206 | func_ret = traceback.format_exc()
207 | calc_ret = {
208 | "name": each_calc.name,
209 | "type": each_calc.calc_type.value,
210 | "result": func_ret,
211 | }
212 | result.append(calc_ret)
213 | with open(output_path, "w", encoding=constants.CHARSET) as f:
214 | json.dump(result, f)
215 |
216 | # draw
217 | r = Reporter()
218 | r.draw(
219 | # fmt: off
220 | classify_result,
221 | report_path=config.output,
222 | )
223 |
224 |
225 | def keras_train(
226 | train_data_path: str,
227 | model_path: str,
228 | # options
229 | epochs: int = 10,
230 | target_size: str = "600x800",
231 | overwrite: bool = False,
232 | **kwargs,
233 | ):
234 | from stagesepx.classifier.keras import KerasClassifier
235 |
236 | assert not os.path.isfile(model_path), f"file {model_path} already existed"
237 | # handle args
238 | target_size: typing.Sequence[int] = [int(each) for each in target_size.split("x")]
239 |
240 | cl = KerasClassifier(
241 | # 轮数
242 | epochs=epochs,
243 | # 保证数据集的分辨率统一性
244 | target_size=target_size,
245 | **kwargs,
246 | )
247 | cl.train(train_data_path)
248 | cl.save_model(model_path, overwrite=overwrite)
249 |
250 |
251 | def analyse(
252 | video: typing.Union[str, VideoObject],
253 | output_path: str,
254 | pre_load: bool = True,
255 | threshold: float = 0.98,
256 | offset: int = 3,
257 | boost_mode: bool = True,
258 | ):
259 | """designed for https://github.com/williamfzc/stagesepx/issues/123"""
260 |
261 | if isinstance(video, str):
262 | video = VideoObject(video, pre_load=pre_load)
263 |
264 | cutter = VideoCutter()
265 | res = cutter.cut(video)
266 |
267 | stable, unstable = res.get_range(
268 | threshold=threshold,
269 | offset=offset,
270 | )
271 |
272 | with tempfile.TemporaryDirectory() as temp_dir:
273 | res.pick_and_save(
274 | stable,
275 | 5,
276 | to_dir=temp_dir,
277 | )
278 |
279 | cl = SVMClassifier()
280 | cl.load(temp_dir)
281 | cl.train()
282 | classify_result = cl.classify(video, stable, boost_mode=boost_mode)
283 |
284 | r = Reporter()
285 | r.draw(
286 | classify_result,
287 | report_path=output_path,
288 | unstable_ranges=unstable,
289 | cut_result=res,
290 | )
291 |
292 |
293 | # https://github.com/williamfzc/stagesepx/issues/158
294 | # not a good design, hide this method
295 | # maybe i am wrong
296 | def _diff(
297 | video_before: typing.Union[str, VideoObject],
298 | video_after: typing.Union[str, VideoObject],
299 | pre_hooks: typing.List[BaseHook] = None,
300 | *args,
301 | **kwargs,
302 | ):
303 | cutter = VideoCutter()
304 | if isinstance(video_before, str):
305 | video_before = VideoObject(video_before)
306 | video_before.load_frames()
307 | if isinstance(video_after, str):
308 | video_after = VideoObject(video_after)
309 | video_after.load_frames()
310 |
311 | res = cutter.cut(video_before)
312 | res1 = cutter.cut(video_after)
313 | return res.diff(res1, pre_hooks, *args, **kwargs)
314 |
--------------------------------------------------------------------------------
/stagesepx/classifier/__init__.py:
--------------------------------------------------------------------------------
1 | from stagesepx.classifier.base import SingleClassifierResult, ClassifierResult
2 |
3 | from stagesepx.classifier.ssim import SSIMClassifier
4 | from stagesepx.classifier.svm import SVMClassifier
5 |
--------------------------------------------------------------------------------
/stagesepx/classifier/base.py:
--------------------------------------------------------------------------------
1 | import pathlib
2 | import typing
3 | from collections import OrderedDict
4 | import os
5 | import json
6 | import cv2
7 | import time
8 | import numpy as np
9 | import difflib
10 | from loguru import logger
11 |
12 | from stagesepx.cutter import VideoCutRange
13 | from stagesepx import toolbox
14 | from stagesepx import constants
15 | from stagesepx.hook import BaseHook, GreyHook, CompressHook
16 | from stagesepx.video import VideoObject, VideoFrame
17 |
18 |
19 | class SingleClassifierResult(object):
20 | def __init__(
21 | self,
22 | video_path: str,
23 | frame_id: int,
24 | timestamp: float,
25 | stage: str,
26 | data: np.ndarray = None,
27 | ):
28 | self.video_path: str = video_path
29 | self.frame_id: int = frame_id
30 | self.timestamp: float = timestamp
31 | self.stage: str = stage
32 |
33 | # optional
34 | self.data: np.ndarray = data
35 |
36 | def to_video_frame(self, *args, **kwargs) -> VideoFrame:
37 | # VideoFrame has `data`
38 | # SingleClassifierResult has `stage` (data is optional)
39 |
40 | # already have data
41 | if self.data is not None:
42 | return VideoFrame(self.frame_id, self.timestamp, self.data)
43 | # no data
44 | with toolbox.video_capture(self.video_path) as cap:
45 | frame = toolbox.get_frame(cap, self.frame_id)
46 | compressed = toolbox.compress_frame(frame, *args, **kwargs)
47 | return VideoFrame(self.frame_id, self.timestamp, compressed)
48 |
49 | def get_data(self) -> np.ndarray:
50 | return self.to_video_frame().data
51 |
52 | def is_stable(self) -> bool:
53 | return self.stage not in (
54 | constants.UNSTABLE_FLAG,
55 | constants.IGNORE_FLAG,
56 | constants.UNKNOWN_STAGE_FLAG,
57 | )
58 |
59 | def contain_image(
60 | self, *, image_path: str = None, image_object: np.ndarray = None, **kwargs
61 | ) -> typing.Dict[str, typing.Any]:
62 | return self.to_video_frame().contain_image(
63 | image_path=image_path, image_object=image_object, **kwargs
64 | )
65 |
66 | def to_dict(self) -> typing.Dict:
67 | return self.__dict__
68 |
69 | def __str__(self):
70 | return f""
71 |
72 | __repr__ = __str__
73 |
74 |
75 | class DiffResult(object):
76 | def __init__(
77 | self, origin_data: "ClassifierResult", another_data: "ClassifierResult"
78 | ):
79 | self.origin_data = origin_data
80 | self.another_data = another_data
81 |
82 | @property
83 | def origin_stage_list(self):
84 | return self.origin_data.get_ordered_stage_set()
85 |
86 | @property
87 | def another_stage_list(self):
88 | return self.another_data.get_ordered_stage_set()
89 |
90 | def ok(self) -> bool:
91 | return self.origin_stage_list == self.another_stage_list
92 |
93 | def get_diff_str(self):
94 | return difflib.Differ().compare(self.origin_stage_list, self.another_stage_list)
95 |
96 |
97 | class ClassifierResult(object):
98 | LABEL_DATA: str = "data"
99 | LABEL_VIDEO_PATH: str = "video_path"
100 |
101 | def __init__(self, data: typing.List[SingleClassifierResult]):
102 | self.video_path: str = data[0].video_path
103 | self.data: typing.List[SingleClassifierResult] = data
104 |
105 | def get_timestamp_list(self) -> typing.List[float]:
106 | return [each.timestamp for each in self.data]
107 |
108 | def get_stage_list(self) -> typing.List[str]:
109 | return [each.stage for each in self.data]
110 |
111 | def get_length(self) -> int:
112 | return len(self.data)
113 |
114 | def get_offset(self) -> float:
115 | # timestamp offset between frames
116 | return self.data[1].timestamp - self.data[0].timestamp
117 |
118 | def get_ordered_stage_set(self) -> typing.List[str]:
119 | ret = list()
120 | for each in self.get_stage_list():
121 | # first element
122 | if not ret:
123 | ret.append(each)
124 | continue
125 | if each == ret[-1]:
126 | continue
127 | ret.append(each)
128 | return ret
129 |
130 | def get_stage_set(self) -> typing.Set[str]:
131 | return set(self.get_stage_list())
132 |
133 | def to_dict(
134 | self,
135 | ) -> typing.Dict[str, typing.List[typing.List[SingleClassifierResult]]]:
136 | stage_list = list(self.get_stage_set())
137 | try:
138 | int(stage_list[0])
139 | except ValueError:
140 | stage_list.sort()
141 | else:
142 | stage_list.sort(key=lambda o: int(o))
143 |
144 | d = OrderedDict()
145 | for each_stage in stage_list:
146 | d[each_stage] = self.get_specific_stage_range(each_stage)
147 | return d
148 |
149 | def contain(self, stage_name: str) -> bool:
150 | return stage_name in self.get_stage_set()
151 |
152 | def first(self, stage_name: str) -> SingleClassifierResult:
153 | for each in self.data:
154 | if each.stage == stage_name:
155 | logger.debug(f"first frame of {stage_name}: {each}")
156 | return each
157 | logger.warning(f"no stage named {stage_name} found")
158 |
159 | def last(self, stage_name: str) -> SingleClassifierResult:
160 | for each in self.data[::-1]:
161 | if each.stage == stage_name:
162 | logger.debug(f"last frame of {stage_name}: {each}")
163 | return each
164 | logger.warning(f"no stage named {stage_name} found")
165 |
166 | def get_stage_range(self) -> typing.List[typing.List[SingleClassifierResult]]:
167 | """
168 | return a range list.
169 | if your video has 30 frames, with 3 stages, this list can be:
170 | [(0, 1, ... 11), (12, 13 ... 20), (21, 22 ... 30)]
171 |
172 | :return:
173 | """
174 | result: typing.List[typing.List[SingleClassifierResult]] = []
175 |
176 | # real data
177 | cur = self.data[0]
178 | # frame id = index + 1
179 | cur_index = cur.frame_id - 1
180 | # init pointer
181 | ptr = cur_index
182 | length = self.get_length()
183 | while ptr < length:
184 | # next frame
185 | next_one = self.data[ptr]
186 | # is continuous?
187 | if cur.stage == next_one.stage:
188 | ptr += 1
189 | continue
190 | # +1 because:
191 | # [1,2,3,4,5][1:3] == [2,3]
192 | # -1 because:
193 | # current ptr is the next frame
194 | result.append(self.data[cur_index : ptr + 1 - 1] or [self.data[cur_index]])
195 | cur = next_one
196 | cur_index = next_one.frame_id - 1
197 |
198 | # issue #90
199 | assert len(result) > 0, "video seems to only contain one stage"
200 |
201 | last_data = self.data[-1]
202 | last_result = result[-1][-1]
203 | if last_result != last_data:
204 | result.append(
205 | self.data[last_result.frame_id - 1 + 1 : last_data.frame_id - 1 + 1]
206 | or [self.data[last_result.frame_id - 1]]
207 | )
208 | logger.debug(f"get stage range: {result}")
209 | return result
210 |
211 | def get_specific_stage_range(
212 | self, stage_name: str
213 | ) -> typing.List[typing.List[SingleClassifierResult]]:
214 | """get specific stage range by stage name (maybe contains some partition"""
215 | ret = list()
216 | for each_range in self.get_stage_range():
217 | cur = each_range[0]
218 | if cur.stage == stage_name:
219 | ret.append(each_range)
220 | return ret
221 |
222 | def get_not_stable_stage_range(
223 | self,
224 | ) -> typing.List[typing.List[SingleClassifierResult]]:
225 | unstable = self.get_specific_stage_range(constants.UNSTABLE_FLAG)
226 | ignore = self.get_specific_stage_range(constants.IGNORE_FLAG)
227 | return sorted(unstable + ignore, key=lambda x: x[0].stage)
228 |
229 | def mark_range(self, start: int, end: int, target_stage: str):
230 | for each in self.data[start:end]:
231 | each.stage = target_stage
232 | logger.debug(f"range {start} to {end} has been marked as {target_stage}")
233 |
234 | def mark_range_unstable(self, start: int, end: int):
235 | self.mark_range(start, end, constants.UNSTABLE_FLAG)
236 |
237 | def mark_range_ignore(self, start: int, end: int):
238 | self.mark_range(start, end, constants.IGNORE_FLAG)
239 |
240 | def time_cost_between(self, start_stage: str, end_stage: str) -> float:
241 | return self.first(end_stage).timestamp - self.last(start_stage).timestamp
242 |
243 | def get_important_frame_list(self) -> typing.List[SingleClassifierResult]:
244 | # save the first frame
245 | result = [self.data[0]]
246 |
247 | prev = self.data[0]
248 | for cur in self.data[1:]:
249 | if cur.stage != prev.stage:
250 | result.append(prev)
251 | result.append(cur)
252 | prev = cur
253 |
254 | # save the latest frame
255 | if result[-1] != self.data[-1]:
256 | result.append(self.data[-1])
257 | return result
258 |
259 | def calc_changing_cost(
260 | self,
261 | ) -> typing.Dict[str, typing.Tuple[SingleClassifierResult, SingleClassifierResult]]:
262 | """calc time cost between stages"""
263 | # add changing cost
264 | cost_dict: typing.Dict[
265 | str, typing.Tuple[SingleClassifierResult, SingleClassifierResult]
266 | ] = {}
267 | i = 0
268 | while i < len(self.data) - 1:
269 | cur = self.data[i]
270 | next_one = self.data[i + 1]
271 |
272 | # next one is changing
273 | if not next_one.is_stable():
274 | for j in range(i + 1, len(self.data)):
275 | i = j
276 | next_one = self.data[j]
277 | if next_one.is_stable():
278 | break
279 |
280 | changing_name = f"from {cur.stage} to {next_one.stage}"
281 | cost_dict[changing_name] = (cur, next_one)
282 | else:
283 | i += 1
284 | return cost_dict
285 |
286 | def dumps(self) -> str:
287 | # for np.ndarray
288 | def _handler(obj: object):
289 | if isinstance(obj, np.ndarray):
290 | # ignore
291 | return ""
292 | return obj.__dict__
293 |
294 | return json.dumps(self, sort_keys=True, default=_handler)
295 |
296 | def dump(self, json_path: str, **kwargs):
297 | logger.debug(f"dump result to {json_path}")
298 | assert not os.path.isfile(json_path), f"{json_path} already existed"
299 | with open(json_path, "w+", **kwargs) as f:
300 | f.write(self.dumps())
301 |
302 | @classmethod
303 | def load(cls, from_file: str) -> "ClassifierResult":
304 | assert os.path.isfile(from_file), f"file {from_file} not existed"
305 | with open(from_file, encoding=constants.CHARSET) as f:
306 | content = json.load(f)
307 |
308 | data = content[cls.LABEL_DATA]
309 | return ClassifierResult([SingleClassifierResult(**each) for each in data])
310 |
311 | def diff(self, another: "ClassifierResult") -> DiffResult:
312 | return DiffResult(self, another)
313 |
314 | def is_order_correct(self, should_be: typing.List[str]) -> bool:
315 | cur = self.get_ordered_stage_set()
316 | len_cur, len_should_be = len(cur), len(should_be)
317 | if len_cur == len_should_be:
318 | return cur == should_be
319 | if len_cur < len_should_be:
320 | # some classes lost
321 | return False
322 | # compare
323 | ptr_should, ptr_cur = 0, 0
324 | while ptr_cur < len_cur:
325 | if cur[ptr_cur] == should_be[ptr_should]:
326 | ptr_should += 1
327 | ptr_cur += 1
328 | if ptr_should == len_should_be:
329 | return True
330 | return False
331 |
332 | # alias
333 | get_frame_length = get_offset
334 |
335 |
336 | class BaseClassifier(object):
337 | def __init__(
338 | self,
339 | compress_rate: float = None,
340 | target_size: typing.Tuple[int, int] = None,
341 | *args,
342 | **kwargs,
343 | ):
344 | # default compress rate is 0.2
345 | if (not compress_rate) and (not target_size):
346 | logger.debug(
347 | f"no compress rate or target size received. set compress rate to 0.2"
348 | )
349 | compress_rate = 0.2
350 |
351 | self.compress_rate = compress_rate
352 | self.target_size = target_size
353 | logger.debug(f"compress rate: {self.compress_rate}")
354 | logger.debug(f"target size: {self.target_size}")
355 |
356 | self._data: typing.Dict[str, typing.Union[typing.List[pathlib.Path]]] = dict()
357 |
358 | # init inner hooks
359 | self._hook_list: typing.List[BaseHook] = list()
360 | compress_hook = CompressHook(
361 | overwrite=True, compress_rate=compress_rate, target_size=target_size
362 | )
363 | grey_hook = GreyHook(overwrite=True)
364 | self.add_hook(compress_hook)
365 | self.add_hook(grey_hook)
366 |
367 | def add_hook(self, new_hook: BaseHook):
368 | """
369 | add a hook
370 |
371 | :param new_hook:
372 | :return:
373 | """
374 | self._hook_list.append(new_hook)
375 | logger.debug(f"add hook: {new_hook.__class__.__name__}")
376 |
377 | def load(
378 | self, data: typing.Union[str, typing.List[VideoCutRange], None], *args, **kwargs
379 | ):
380 | """
381 | at most of time, you MUST load data (from cutter) before classification
382 | otherwise you need a trained model
383 |
384 | :param data: path to your cutter's result (mainly from pick_and_save)
385 | :param args:
386 | :param kwargs:
387 | :return:
388 | """
389 | if isinstance(data, str):
390 | return self.load_from_dir(data, *args, **kwargs)
391 | if isinstance(data, list):
392 | return self.load_from_list(data, *args, **kwargs)
393 | raise TypeError(f"data type error, should be str or typing.List[VideoCutRange]")
394 |
395 | def load_from_list(
396 | self, data: typing.List[VideoCutRange], frame_count: int = None, *_, **__
397 | ):
398 | for stage_name, stage_data in enumerate(data):
399 | target_frame_list = stage_data.pick(frame_count)
400 | self._data[str(stage_name)] = target_frame_list
401 |
402 | def load_from_dir(self, dir_path: str, *_, **__):
403 | p = pathlib.Path(dir_path)
404 | stage_dir_list = p.iterdir()
405 | for each in stage_dir_list:
406 | # load dir only
407 | if each.is_file():
408 | continue
409 | stage_name = each.name
410 | stage_pic_list = [i.absolute() for i in each.iterdir()]
411 | self._data[stage_name] = stage_pic_list
412 | logger.debug(
413 | f"stage [{stage_name}] found, and got {len(stage_pic_list)} pics"
414 | )
415 |
416 | def read(self, *args, **kwargs):
417 | for stage_name, stage_data in self._data.items():
418 | if isinstance(stage_data[0], pathlib.Path):
419 | yield stage_name, self.read_from_path(stage_data, *args, **kwargs)
420 | else:
421 | raise TypeError(
422 | f"data type error, should be str or typing.List[VideoCutRange]"
423 | )
424 |
425 | @staticmethod
426 | def read_from_path(data: typing.List[pathlib.Path], *_, **__):
427 | return (toolbox.imread(each.as_posix()) for each in data)
428 |
429 | def read_from_list(
430 | self, data: typing.List[int], video_cap: cv2.VideoCapture = None, *_, **__
431 | ):
432 | raise DeprecationWarning("this function already deprecated")
433 |
434 | def _classify_frame(self, frame: VideoFrame, *args, **kwargs) -> str:
435 | """must be implemented by sub class"""
436 | raise NotImplementedError
437 |
438 | def _apply_hook(self, frame: VideoFrame, *args, **kwargs) -> VideoFrame:
439 | for each_hook in self._hook_list:
440 | frame = each_hook.do(frame, *args, **kwargs)
441 | return frame
442 |
443 | def classify(
444 | self,
445 | video: typing.Union[str, VideoObject],
446 | valid_range: typing.List[VideoCutRange] = None,
447 | step: int = None,
448 | keep_data: bool = None,
449 | boost_mode: bool = None,
450 | *args,
451 | **kwargs,
452 | ) -> ClassifierResult:
453 | """
454 | start classification
455 |
456 | :param video: path to target video or VideoObject
457 | :param valid_range: frames out of these ranges will be ignored
458 | :param step: step between frames, default to 1
459 | :param keep_data: default to False. if enabled, all the frames will contain numpy data.
460 | :param boost_mode:
461 | :param args:
462 | :param kwargs:
463 | :return:
464 | """
465 | logger.debug(f"classify with {self.__class__.__name__}")
466 | start_time = time.time()
467 |
468 | # default
469 | if not step:
470 | step = 1
471 | if boost_mode is None:
472 | boost_mode = True
473 | # check
474 | assert (boost_mode and valid_range) or (
475 | not (boost_mode or valid_range)
476 | ), "boost_mode required valid_range"
477 |
478 | final_result: typing.List[SingleClassifierResult] = list()
479 | if isinstance(video, str):
480 | video = VideoObject(video)
481 |
482 | operator = video.get_operator()
483 | frame = operator.get_frame_by_id(1)
484 | # for boost
485 | prev_result: typing.Optional[str] = None
486 | while frame is not None:
487 | # hook
488 | frame = self._apply_hook(frame, *args, **kwargs)
489 | # ignore some ranges
490 | if valid_range and not any(
491 | [each.contain(frame.frame_id) for each in valid_range]
492 | ):
493 | logger.debug(
494 | f"frame {frame.frame_id} ({frame.timestamp}) not in target range, skip"
495 | )
496 | result = constants.IGNORE_FLAG
497 | # is changing
498 | prev_result = None
499 | else:
500 | # is continuous?
501 | if boost_mode and (prev_result is not None):
502 | # do not classify again
503 | result = prev_result
504 | # else, do the real job
505 | else:
506 | prev_result = result = self._classify_frame(frame, *args, **kwargs)
507 | logger.debug(
508 | f"frame {frame.frame_id} ({frame.timestamp}) belongs to {result}"
509 | )
510 |
511 | final_result.append(
512 | SingleClassifierResult(
513 | video.path,
514 | frame.frame_id,
515 | frame.timestamp,
516 | result,
517 | frame.data if keep_data else None,
518 | )
519 | )
520 | frame = operator.get_frame_by_id(frame.frame_id + step)
521 | end_time = time.time()
522 | logger.debug(f"classifier cost: {end_time - start_time}")
523 | return ClassifierResult(final_result)
524 |
525 |
526 | class BaseModelClassifier(BaseClassifier):
527 | # model
528 | def save_model(self, model_path: str, overwrite: bool = None):
529 | raise NotImplementedError
530 |
531 | def load_model(self, model_path: str, overwrite: bool = None):
532 | raise NotImplementedError
533 |
534 | def clean_model(self):
535 | raise NotImplementedError
536 |
537 | # actions
538 | def train(self, data_path: str = None, *_, **__):
539 | raise NotImplementedError
540 |
541 | def predict(self, pic_path: str, *_, **__) -> str:
542 | raise NotImplementedError
543 |
544 | def predict_with_object(self, frame: np.ndarray) -> str:
545 | raise NotImplementedError
546 |
547 | def read_from_list(
548 | self, data: typing.List[int], video_cap: cv2.VideoCapture = None, *_, **__
549 | ):
550 | raise ValueError("model-like classifier only support loading data from files")
551 |
--------------------------------------------------------------------------------
/stagesepx/classifier/keras.py:
--------------------------------------------------------------------------------
1 | from loguru import logger
2 | import os
3 | import cv2
4 | import typing
5 | import numpy as np
6 | import pathlib
7 |
8 | try:
9 | import tensorflow
10 | except ImportError:
11 | raise ImportError("KerasClassifier requires tensorflow. install it first.")
12 |
13 | try:
14 | from keras.preprocessing.image import ImageDataGenerator
15 | except ImportError:
16 | # https://stackoverflow.com/questions/78145837/importerror-cannot-import-name-imagedatagenerator-from-keras-preprocessing-i
17 | from keras.src.legacy.preprocessing.image import ImageDataGenerator
18 |
19 | # https://github.com/tensorflow/models/issues/6177
20 | from tensorflow.keras.models import Sequential
21 | from tensorflow.keras.layers import Conv2D, MaxPooling2D
22 | from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
23 | from keras import backend as K
24 |
25 | from stagesepx.classifier.base import BaseModelClassifier
26 | from stagesepx import toolbox
27 | from stagesepx.video import VideoFrame
28 | from stagesepx import constants
29 |
30 |
31 | class KerasClassifier(BaseModelClassifier):
32 | UNKNOWN_STAGE_NAME = constants.UNKNOWN_STAGE_FLAG
33 | # https://github.com/williamfzc/stagesepx/issues/112
34 | MODEL_DENSE = 6
35 |
36 | def __init__(
37 | self,
38 | score_threshold: float = None,
39 | data_size: typing.Sequence[int] = None,
40 | nb_train_samples: int = None,
41 | nb_validation_samples: int = None,
42 | epochs: int = None,
43 | batch_size: int = None,
44 | *_,
45 | **__,
46 | ):
47 | super(KerasClassifier, self).__init__(*_, **__)
48 |
49 | # model
50 | self._model: typing.Optional[Sequential] = None
51 | # settings
52 | self.score_threshold: float = score_threshold or 0.0
53 | self.data_size: typing.Sequence[int] = data_size or (200, 200)
54 | self.nb_train_samples: int = nb_train_samples or 64
55 | self.nb_validation_samples: int = nb_validation_samples or 64
56 | self.epochs: int = epochs or 20
57 | self.batch_size: int = batch_size or 4
58 |
59 | logger.debug(f"score threshold: {self.score_threshold}")
60 | logger.debug(f"data size: {self.data_size}")
61 | logger.debug(f"nb train samples: {self.nb_train_samples}")
62 | logger.debug(f"nb validation samples: {self.nb_validation_samples}")
63 | logger.debug(f"epochs: {self.epochs}")
64 | logger.debug(f"batch size: {self.batch_size}")
65 |
66 | def clean_model(self):
67 | self._model = None
68 |
69 | def save_model(self, model_path: str, overwrite: bool = None):
70 | """
71 | save trained weights
72 |
73 | :param model_path:
74 | :param overwrite:
75 | :return:
76 | """
77 | logger.debug(f"save model to {model_path}")
78 | # assert model file
79 | if os.path.isfile(model_path) and not overwrite:
80 | raise FileExistsError(
81 | f"model file {model_path} already existed, you can set `overwrite` True to cover it"
82 | )
83 | # assert model data is not empty
84 | assert self._model, "model is empty"
85 | self._model.save_weights(model_path)
86 |
87 | def load_model(self, model_path: str, overwrite: bool = None):
88 | """
89 | load trained model
90 |
91 | :param model_path:
92 | :param overwrite:
93 | :return:
94 | """
95 | logger.debug(f"load model from {model_path}")
96 | # assert model file
97 | assert os.path.isfile(model_path), f"model file {model_path} not existed"
98 | # assert model data is empty
99 | if self._model and not overwrite:
100 | raise RuntimeError(
101 | f"model is not empty, you can set `overwrite` True to cover it"
102 | )
103 | self._model = self.create_model()
104 | self._model.load_weights(model_path)
105 |
106 | def create_model(self) -> Sequential:
107 | """model structure. you can overwrite this method to build your own model"""
108 | logger.info(f"creating keras sequential model")
109 | if K.image_data_format() == "channels_first":
110 | input_shape = (1, *self.data_size)
111 | else:
112 | input_shape = (*self.data_size, 1)
113 |
114 | model = Sequential()
115 | model.add(Conv2D(32, (3, 3), input_shape=input_shape))
116 | model.add(Activation("relu"))
117 | model.add(MaxPooling2D(pool_size=(2, 2)))
118 |
119 | model.add(Conv2D(32, (3, 3)))
120 | model.add(Activation("relu"))
121 | model.add(MaxPooling2D(pool_size=(2, 2)))
122 |
123 | model.add(Conv2D(64, (3, 3)))
124 | model.add(Activation("relu"))
125 | model.add(MaxPooling2D(pool_size=(2, 2)))
126 | model.add(Flatten())
127 | model.add(Dense(64))
128 | model.add(Activation("relu"))
129 | model.add(Dropout(0.5))
130 | model.add(Dense(self.MODEL_DENSE))
131 |
132 | model.add(Activation("softmax"))
133 |
134 | model.compile(
135 | loss="sparse_categorical_crossentropy",
136 | optimizer="rmsprop",
137 | metrics=["accuracy"],
138 | )
139 | logger.info("model created")
140 | return model
141 |
142 | def train(self, data_path: str = None, *_, **__):
143 | """
144 | train your classifier with data. must be called before prediction
145 |
146 | :return:
147 | """
148 |
149 | def _data_verify(p: str):
150 | p = pathlib.Path(p)
151 | assert p.is_dir(), f"{p} is not a valid directory"
152 | # validate: at least two classes
153 | number_of_dir = len([each for each in os.listdir(p) if (p / each).is_dir()])
154 | assert (
155 | number_of_dir > 1
156 | ), f"dataset only contains one class. maybe some path errors happened: {p}?"
157 |
158 | # more than 6 classes?
159 | assert number_of_dir <= self.MODEL_DENSE, (
160 | f"dataset has {number_of_dir} classes (more than " + str(self.MODEL_DENSE) + "), please see "
161 | f"https://github.com/williamfzc/stagesepx/issues/112 "
162 | )
163 |
164 | _data_verify(data_path)
165 |
166 | if not self._model:
167 | logger.debug("no model can be used. build a new one.")
168 | self._model = self.create_model()
169 | else:
170 | logger.debug("model found")
171 |
172 | datagen = ImageDataGenerator(
173 | rescale=1.0 / 16, shear_range=0.2, zoom_range=0.2, validation_split=0.33
174 | )
175 |
176 | train_generator = datagen.flow_from_directory(
177 | data_path,
178 | target_size=self.data_size,
179 | batch_size=self.batch_size,
180 | color_mode="grayscale",
181 | class_mode="sparse",
182 | subset="training",
183 | )
184 |
185 | validation_generator = datagen.flow_from_directory(
186 | data_path,
187 | target_size=self.data_size,
188 | batch_size=self.batch_size,
189 | color_mode="grayscale",
190 | class_mode="sparse",
191 | subset="validation",
192 | )
193 |
194 | self._model.fit(
195 | train_generator,
196 | epochs=self.epochs,
197 | validation_data=validation_generator,
198 | )
199 |
200 | logger.debug("train finished")
201 |
202 | def predict(self, pic_path: str, *args, **kwargs) -> str:
203 | """
204 | predict a single picture
205 |
206 | :param pic_path:
207 | :return:
208 | """
209 | pic_object = toolbox.imread(pic_path)
210 | # fake VideoFrame for apply_hook
211 | fake_frame = VideoFrame(0, 0.0, pic_object)
212 | fake_frame = self._apply_hook(fake_frame, *args, **kwargs)
213 | return self.predict_with_object(fake_frame.data)
214 |
215 | def predict_with_object(self, frame: np.ndarray) -> str:
216 | """
217 | predict a single object
218 |
219 | :param frame:
220 | :return:
221 | """
222 | # resize for model
223 | frame = cv2.resize(frame, dsize=self.data_size)
224 | frame = np.expand_dims(frame, axis=[0, -1])
225 |
226 | result = self._model.predict(frame)
227 | tag = str(np.argmax(result, axis=1)[0])
228 | confidence = result.max()
229 | logger.debug(f"confidence: {confidence}")
230 | if confidence < self.score_threshold:
231 | logger.warning(
232 | f"max score is lower than {self.score_threshold}, unknown class"
233 | )
234 | return self.UNKNOWN_STAGE_NAME
235 | return tag
236 |
237 | def _classify_frame(self, frame: VideoFrame, *_, **__) -> str:
238 | return self.predict_with_object(frame.data)
239 |
--------------------------------------------------------------------------------
/stagesepx/classifier/ssim.py:
--------------------------------------------------------------------------------
1 | from loguru import logger
2 |
3 | from stagesepx.classifier.base import BaseClassifier
4 | from stagesepx import toolbox
5 | from stagesepx.video import VideoFrame
6 |
7 |
8 | class SSIMClassifier(BaseClassifier):
9 | def _classify_frame(
10 | self, frame: VideoFrame, threshold: float = None, *_, **__
11 | ) -> str:
12 | if not threshold:
13 | threshold = 0.85
14 |
15 | result = list()
16 | for each_stage_name, each_stage_pic_list in self.read():
17 | each_result = list()
18 | for target_pic in each_stage_pic_list:
19 | # apply hooks
20 | target_pic = self._apply_hook(VideoFrame(-1, -1.0, target_pic))
21 | target_pic = target_pic.data
22 |
23 | each_pic_ssim = toolbox.compare_ssim(frame.data, target_pic)
24 | each_result.append(each_pic_ssim)
25 | ssim = max(each_result)
26 | result.append((each_stage_name, ssim))
27 | logger.debug(f"stage [{each_stage_name}]: {ssim}")
28 |
29 | result = max(result, key=lambda x: x[1])
30 | if result[1] < threshold:
31 | logger.debug("not a known stage, set it -1")
32 | result = ("-1", result[1])
33 | return result[0]
34 |
--------------------------------------------------------------------------------
/stagesepx/classifier/svm.py:
--------------------------------------------------------------------------------
1 | from loguru import logger
2 | import cv2
3 | import os
4 | import pickle
5 | import typing
6 | import numpy as np
7 | from sklearn.svm import LinearSVC
8 |
9 | from stagesepx.classifier.base import BaseModelClassifier
10 | from stagesepx import toolbox
11 | from stagesepx.video import VideoFrame
12 | from stagesepx import constants
13 |
14 |
15 | class SVMClassifier(BaseModelClassifier):
16 | FEATURE_DICT = {
17 | "hog": toolbox.turn_hog_desc,
18 | "lbp": toolbox.turn_lbp_desc,
19 | # do not use feature transform
20 | "raw": lambda x: x,
21 | }
22 | UNKNOWN_STAGE_NAME = constants.UNKNOWN_STAGE_FLAG
23 |
24 | def __init__(
25 | self, feature_type: str = None, score_threshold: float = None, *args, **kwargs
26 | ):
27 | """
28 | init classifier
29 |
30 | :param feature_type:
31 | before training, classifier will convert pictures into feature, for better classification.
32 | eg: 'hog', 'lbp' or 'raw'
33 | :param score_threshold:
34 | float, 0 - 1.0, under this value, label -> UNKNOWN_STAGE_NAME
35 | default value is 0 (None)
36 | """
37 | super().__init__(*args, **kwargs)
38 |
39 | # feature settings
40 | if not feature_type:
41 | feature_type = "hog"
42 | if feature_type not in self.FEATURE_DICT:
43 | raise AttributeError(f"no feature func named {feature_type}")
44 | self.feature_func: typing.Callable = self.FEATURE_DICT[feature_type]
45 | logger.debug(f"feature function: {feature_type}")
46 |
47 | # model settings
48 | self._model: typing.Optional[LinearSVC] = None
49 | self.score_threshold: float = score_threshold or 0.0
50 | logger.debug(f"score threshold: {self.score_threshold}")
51 |
52 | def clean_model(self):
53 | self._model = None
54 |
55 | def save_model(self, model_path: str, overwrite: bool = None):
56 | """
57 | save trained model
58 |
59 | :param model_path:
60 | :param overwrite:
61 | :return:
62 | """
63 | logger.debug(f"save model to {model_path}")
64 | # assert model file
65 | if os.path.isfile(model_path) and not overwrite:
66 | raise FileExistsError(
67 | f"model file {model_path} already existed, you can set `overwrite` True to cover it"
68 | )
69 | # assert model data is not empty
70 | assert self._model, "model is empty"
71 | with open(model_path, "wb") as f:
72 | pickle.dump(self._model, f)
73 |
74 | def load_model(self, model_path: str, overwrite: bool = None):
75 | """
76 | load trained model
77 |
78 | :param model_path:
79 | :param overwrite:
80 | :return:
81 | """
82 | logger.debug(f"load model from {model_path}")
83 | # assert model file
84 | assert os.path.isfile(model_path), f"model file {model_path} not existed"
85 | # assert model data is empty
86 | if self._model and not overwrite:
87 | raise RuntimeError(
88 | f"model is not empty, you can set `overwrite` True to cover it"
89 | )
90 |
91 | # joblib raise an error ( i have no idea about how to fix it ) here, so use pickle instead
92 | with open(model_path, "rb") as f:
93 | self._model = pickle.load(f)
94 |
95 | def train(self, data_path: str = None, *_, **__):
96 | """
97 | train your classifier with data. must be called before prediction
98 |
99 | :return:
100 | """
101 | if not self._model:
102 | logger.debug("no model can be used. build a new one.")
103 | self._model = LinearSVC()
104 | else:
105 | logger.debug("already have a trained model. train on this model.")
106 |
107 | if data_path:
108 | self.load(data_path)
109 |
110 | train_data = list()
111 | train_label = list()
112 | for each_label, each_label_pic_list in self.read():
113 | for each_pic_object in each_label_pic_list:
114 | logger.debug(f"training label: {each_label}")
115 | # apply hook
116 | each_pic_object = self._apply_hook(
117 | VideoFrame(-1, -1.0, each_pic_object)
118 | )
119 | each_pic_object = each_pic_object.data
120 |
121 | each_pic_object = self.feature_func(each_pic_object).flatten()
122 | train_data.append(each_pic_object)
123 | train_label.append(each_label)
124 | logger.debug("data ready")
125 |
126 | assert (
127 | len(train_label) > 1
128 | ), f"seems only one class in the training dataset, at least two classes are required: {train_label}"
129 | self._model.fit(train_data, train_label)
130 | logger.debug("train finished")
131 |
132 | def predict_with_object(self, frame: np.ndarray) -> str:
133 | """
134 | predict a single object
135 |
136 | :param frame:
137 | :return:
138 | """
139 | pic_object = self.feature_func(frame)
140 | pic_object = pic_object.reshape(1, -1)
141 |
142 | # scores for each stages
143 | # IMPORTANT:
144 | # these scores are not always precise
145 | # at the most of time, we used a tiny train data set for training
146 | # which may causes 'liblinear failed to converge'
147 | # actually, it can know which one is the target class
148 | # but the calculated value may becomes weird
149 | scores = self._model.decision_function(pic_object)[0]
150 | logger.debug(f"scores: {scores}")
151 |
152 | # in the binary case, return type is different (wtf ...)
153 | # for more effective i think
154 | if len(self._model.classes_) == 2:
155 | # scores is a float
156 | # confidence score for self.classes_[1] where >0 means this
157 | # class would be predicted
158 | return self._model.classes_[1 if scores > 0 else 0]
159 |
160 | cur_confidence = max(scores)
161 | logger.debug(f"confidence: {cur_confidence}")
162 | # unknown
163 | if cur_confidence < self.score_threshold:
164 | logger.warning(
165 | f"max score is lower than {self.score_threshold}, unknown class"
166 | )
167 | return self.UNKNOWN_STAGE_NAME
168 |
169 | return self._model.classes_[np.argmax(scores)]
170 |
171 | def _classify_frame(self, frame: VideoFrame, *_, **__) -> str:
172 | return self.predict_with_object(frame.data)
173 |
--------------------------------------------------------------------------------
/stagesepx/cli.py:
--------------------------------------------------------------------------------
1 | import fire
2 |
3 | from stagesepx import __PROJECT_NAME__, __VERSION__, __URL__
4 | from stagesepx import api
5 |
6 |
7 | class TerminalCli(object):
8 | __doc__ = f"""
9 | {__PROJECT_NAME__} version {__VERSION__}
10 |
11 | this is a client for stagesepx, for easier usage.
12 | for much more flexible functions, you 'd better use the script way.
13 | more detail: {__URL__}
14 | """
15 |
16 | # this layer was built for (pre) controlling args and kwargs
17 | # or, some translations, default value, and so on
18 | train = staticmethod(api.keras_train)
19 | analyse = staticmethod(api.analyse)
20 | run = staticmethod(api.run)
21 |
22 |
23 | def main():
24 | fire.Fire(TerminalCli)
25 |
26 |
27 | if __name__ == "__main__":
28 | main()
29 |
--------------------------------------------------------------------------------
/stagesepx/constants.py:
--------------------------------------------------------------------------------
1 | # encoding
2 | CHARSET = r"utf-8"
3 |
4 | # path and name
5 | CUT_RESULT_FILE_NAME = r"cut_result.json"
6 | REPORT_FILE_NAME = r"report.html"
7 |
8 | # report
9 | BACKGROUND_COLOR = r"#fffaf4"
10 | UNSTABLE_FLAG = r"-1"
11 | UNKNOWN_STAGE_FLAG = r"-2"
12 | IGNORE_FLAG = r"-3"
13 |
14 | # config
15 | DEFAULT_THRESHOLD = 0.98
16 |
--------------------------------------------------------------------------------
/stagesepx/cutter/__init__.py:
--------------------------------------------------------------------------------
1 | from .cutter import VideoCutter
2 | from .cut_range import VideoCutRange
3 | from .cut_result import VideoCutResult
4 |
--------------------------------------------------------------------------------
/stagesepx/cutter/cut_range.py:
--------------------------------------------------------------------------------
1 | import typing
2 | import random
3 | import numpy as np
4 | from loguru import logger
5 |
6 | from stagesepx import toolbox
7 | from stagesepx import constants
8 | from stagesepx.hook import BaseHook
9 | from stagesepx.video import VideoObject, VideoFrame
10 |
11 |
12 | class VideoCutRange(object):
13 | def __init__(
14 | self,
15 | # TODO why can it be a dict?
16 | video: typing.Union[VideoObject, typing.Dict],
17 | start: int,
18 | end: int,
19 | # TODO need refactored ?
20 | ssim: typing.List[float],
21 | mse: typing.List[float],
22 | psnr: typing.List[float],
23 | start_time: float,
24 | end_time: float,
25 | ):
26 | if isinstance(video, dict):
27 | self.video = VideoObject(**video)
28 | else:
29 | self.video = video
30 |
31 | self.start = start
32 | self.end = end
33 | self.ssim = ssim
34 | self.mse = mse
35 | self.psnr = psnr
36 | self.start_time = start_time
37 | self.end_time = end_time
38 |
39 | # if length is 1
40 | # https://github.com/williamfzc/stagesepx/issues/9
41 | if start > end:
42 | self.start, self.end = self.end, self.start
43 | self.start_time, self.end_time = self.end_time, self.start_time
44 |
45 | logger.debug(
46 | f"new a range: {self.start}({self.start_time}) - {self.end}({self.end_time})"
47 | )
48 |
49 | def can_merge(self, another: "VideoCutRange", offset: int = None, **_):
50 | if not offset:
51 | is_continuous = self.end == another.start
52 | else:
53 | is_continuous = self.end + offset >= another.start
54 | return is_continuous and self.video.path == another.video.path
55 |
56 | def merge(self, another: "VideoCutRange", **kwargs) -> "VideoCutRange":
57 | assert self.can_merge(another, **kwargs)
58 | return __class__(
59 | self.video,
60 | self.start,
61 | another.end,
62 | self.ssim + another.ssim,
63 | self.mse + another.mse,
64 | self.psnr + another.psnr,
65 | self.start_time,
66 | another.end_time,
67 | )
68 |
69 | def contain(self, frame_id: int) -> bool:
70 | # in python:
71 | # range(0, 10) => [0, 10)
72 | # range(0, 10 + 1) => [0, 10]
73 | return frame_id in range(self.start, self.end + 1)
74 |
75 | # alias
76 | contain_frame_id = contain
77 |
78 | def contain_image(
79 | self, image_path: str = None, image_object: np.ndarray = None, *args, **kwargs
80 | ) -> typing.Dict[str, typing.Any]:
81 | # todo pick only one picture?
82 | target_id = self.pick(*args, **kwargs)[0]
83 | operator = self.video.get_operator()
84 | frame = operator.get_frame_by_id(target_id)
85 | return frame.contain_image(
86 | image_path=image_path, image_object=image_object, **kwargs
87 | )
88 |
89 | def pick(
90 | self, frame_count: int = None, is_random: bool = None, *_, **__
91 | ) -> typing.List[int]:
92 | if not frame_count:
93 | frame_count = 3
94 | logger.debug(
95 | f"pick {frame_count} frames "
96 | f"from {self.start}({self.start_time}) "
97 | f"to {self.end}({self.end_time}) "
98 | f"on video {self.video.path}"
99 | )
100 |
101 | result = list()
102 | if is_random:
103 | return random.sample(range(self.start, self.end), frame_count)
104 | length = self.get_length()
105 |
106 | # https://github.com/williamfzc/stagesepx/issues/37
107 | frame_count += 1
108 | for _ in range(1, frame_count):
109 | cur = int(self.start + length / frame_count * _)
110 | result.append(cur)
111 | return result
112 |
113 | def get_frames(
114 | self, frame_id_list: typing.List[int], *_, **__
115 | ) -> typing.List[VideoFrame]:
116 | """return a list of VideoFrame, usually works with pick"""
117 | out = list()
118 | operator = self.video.get_operator()
119 | for each_id in frame_id_list:
120 | frame = operator.get_frame_by_id(each_id)
121 | out.append(frame)
122 | return out
123 |
124 | def pick_and_get(self, *args, **kwargs) -> typing.List[VideoFrame]:
125 | picked = self.pick(*args, **kwargs)
126 | return self.get_frames(picked, *args, **kwargs)
127 |
128 | def get_length(self):
129 | return self.end - self.start + 1
130 |
131 | def is_stable(
132 | self, threshold: float = None, psnr_threshold: float = None, **_
133 | ) -> bool:
134 | # IMPORTANT function!
135 | # it decided whether a range is stable => everything is based on it!
136 | if not threshold:
137 | threshold = constants.DEFAULT_THRESHOLD
138 |
139 | # ssim
140 | res = np.mean(self.ssim) > threshold
141 | # psnr (double check if stable)
142 | if res and psnr_threshold:
143 | res = np.mean(self.psnr) > psnr_threshold
144 |
145 | return res
146 |
147 | def is_loop(self, threshold: float = None, **_) -> bool:
148 | if not threshold:
149 | threshold = constants.DEFAULT_THRESHOLD
150 | operator = self.video.get_operator()
151 | start_frame = operator.get_frame_by_id(self.start)
152 | end_frame = operator.get_frame_by_id(self.end)
153 | return toolbox.compare_ssim(start_frame.data, end_frame.data) > threshold
154 |
155 | def diff(
156 | self,
157 | another: "VideoCutRange",
158 | pre_hooks: typing.List[BaseHook],
159 | *args,
160 | **kwargs,
161 | ) -> typing.List[float]:
162 | self_picked = self.pick_and_get(*args, **kwargs)
163 | another_picked = another.pick_and_get(*args, **kwargs)
164 | return toolbox.multi_compare_ssim(self_picked, another_picked, pre_hooks)
165 |
166 | def __str__(self):
167 | return f""
168 |
169 | __repr__ = __str__
170 |
--------------------------------------------------------------------------------
/stagesepx/cutter/cutter.py:
--------------------------------------------------------------------------------
1 | import typing
2 | import time
3 | import numpy as np
4 | from loguru import logger
5 |
6 | from stagesepx import toolbox
7 | from stagesepx.cutter.cut_range import VideoCutRange
8 | from stagesepx.cutter.cut_result import VideoCutResult
9 | from stagesepx.video import VideoObject, VideoFrame
10 | from stagesepx.hook import BaseHook, GreyHook, CompressHook
11 |
12 |
13 | class VideoCutter(object):
14 | def __init__(
15 | self,
16 | step: int = None,
17 | compress_rate: float = None,
18 | target_size: typing.Tuple[int, int] = None,
19 | ):
20 | """
21 | init video cutter
22 |
23 | :param step: step between frames, default to 1
24 | :param compress_rate:
25 | :param target_size:
26 | """
27 | self.step = step or 1
28 |
29 | # default compress rate is 0.2
30 | if (not compress_rate) and (not target_size):
31 | logger.debug(
32 | f"no compress rate or target size received. set compress rate to 0.2"
33 | )
34 | compress_rate = 0.2
35 |
36 | # init inner hook
37 | self._hook_list: typing.List[BaseHook] = list()
38 | compress_hook = CompressHook(
39 | overwrite=True, compress_rate=compress_rate, target_size=target_size
40 | )
41 | grey_hook = GreyHook(overwrite=True)
42 | self.add_hook(compress_hook)
43 | self.add_hook(grey_hook)
44 |
45 | def add_hook(self, new_hook: BaseHook):
46 | """
47 | add a hook
48 |
49 | :param new_hook:
50 | :return:
51 | """
52 | self._hook_list.append(new_hook)
53 | logger.debug(f"add hook: {new_hook.__class__.__name__}")
54 |
55 | @staticmethod
56 | def pic_split(origin: np.ndarray, block: int) -> typing.List[np.ndarray]:
57 | """actually, when block == 3, blocks' count would be 3 * 3 = 9"""
58 | result: typing.List[np.ndarray] = list()
59 | for each_block in np.array_split(origin, block, axis=0):
60 | sub_block = np.array_split(each_block, block, axis=1)
61 | result += sub_block
62 | return result
63 |
64 | def _apply_hook(self, frame: VideoFrame, *args, **kwargs) -> VideoFrame:
65 | for each_hook in self._hook_list:
66 | frame = each_hook.do(frame, *args, **kwargs)
67 | return frame
68 |
69 | def compare_frame_list(
70 | self, src: typing.List[np.ndarray], target: typing.List[np.ndarray]
71 | ) -> typing.List[float]:
72 | """
73 | core method about how to compare two lists of ndarray and get their ssim/mse/psnr
74 | you can overwrite this method to implement your own algo
75 | see https://github.com/williamfzc/stagesepx/issues/136
76 |
77 | :param src:
78 | :param target:
79 | :return:
80 | """
81 | # find the min ssim and the max mse / psnr
82 | ssim = 1.0
83 | mse = 0.0
84 | psnr = 0.0
85 |
86 | for part_index, (each_start, each_end) in enumerate(zip(src, target)):
87 | part_ssim = toolbox.compare_ssim(each_start, each_end)
88 | if part_ssim < ssim:
89 | ssim = part_ssim
90 |
91 | # mse is very sensitive
92 | part_mse = toolbox.calc_mse(each_start, each_end)
93 | if part_mse > mse:
94 | mse = part_mse
95 |
96 | part_psnr = toolbox.calc_psnr(each_start, each_end)
97 | if part_psnr > psnr:
98 | psnr = part_psnr
99 | logger.debug(
100 | f"part {part_index}: ssim={part_ssim}; mse={part_mse}; psnr={part_psnr}"
101 | )
102 | return [ssim, mse, psnr]
103 |
104 | def _convert_video_into_range_list(
105 | self, video: VideoObject, block: int, window_size: int, window_coefficient: int
106 | ) -> typing.List[VideoCutRange]:
107 |
108 | step = self.step
109 | video_length = video.frame_count
110 |
111 | class _Window(object):
112 | def __init__(self):
113 | self.start = 1
114 | self.size = window_size
115 | self.end = self.start + window_size * step
116 |
117 | def load_data(self) -> typing.List[VideoFrame]:
118 | cur = self.start
119 | result = []
120 | video_operator = video.get_operator()
121 | while cur <= self.end:
122 | frame = video_operator.get_frame_by_id(cur)
123 | result.append(frame)
124 | cur += step
125 | # at least 2
126 | if len(result) < 2:
127 | last = video_operator.get_frame_by_id(self.end)
128 | result.append(last)
129 | return result
130 |
131 | def shift(self) -> bool:
132 | logger.debug(f"window before: {self.start}, {self.end}")
133 | self.start += step
134 | self.end += step
135 | if self.start >= video_length:
136 | # out of range
137 | return False
138 | # window end
139 | if self.end >= video_length:
140 | self.end = video_length
141 | logger.debug(f"window after: {self.start}, {self.end}")
142 | return True
143 |
144 | def _float_merge(float_list: typing.List[float]) -> float:
145 | # the first, the largest.
146 | length = len(float_list)
147 | result = 0.0
148 | denominator = 0.0
149 | for i, each in enumerate(float_list):
150 | weight = pow(length - i, window_coefficient)
151 | denominator += weight
152 | result += each * weight
153 | logger.debug(f"calc: {each} x {weight}")
154 | final = result / denominator
155 | logger.debug(f"calc final: {final} from {result} / {denominator}")
156 | return final
157 |
158 | range_list: typing.List[VideoCutRange] = list()
159 | logger.info(f"total frame count: {video_length}, size: {video.frame_size}")
160 |
161 | window = _Window()
162 | while True:
163 | frame_list = window.load_data()
164 | frame_list = [self._apply_hook(each) for each in frame_list]
165 |
166 | # window loop
167 | ssim_list = []
168 | mse_list = []
169 | psnr_list = []
170 |
171 | cur_frame = frame_list[0]
172 | first_target_frame = frame_list[1]
173 | cur_frame_list = self.pic_split(cur_frame.data, block)
174 | for each in frame_list[1:]:
175 | each_frame_list = self.pic_split(each.data, block)
176 | ssim, mse, psnr = self.compare_frame_list(
177 | cur_frame_list, each_frame_list
178 | )
179 | ssim_list.append(ssim)
180 | mse_list.append(mse)
181 | psnr_list.append(psnr)
182 | logger.debug(
183 | f"between {cur_frame.frame_id} & {each.frame_id}: ssim={ssim}; mse={mse}; psnr={psnr}"
184 | )
185 | ssim = _float_merge(ssim_list)
186 | mse = _float_merge(mse_list)
187 | psnr = _float_merge(psnr_list)
188 |
189 | range_list.append(
190 | VideoCutRange(
191 | video,
192 | start=cur_frame.frame_id,
193 | end=first_target_frame.frame_id,
194 | ssim=[ssim],
195 | mse=[mse],
196 | psnr=[psnr],
197 | start_time=cur_frame.timestamp,
198 | end_time=first_target_frame.timestamp,
199 | )
200 | )
201 | continue_flag = window.shift()
202 | if not continue_flag:
203 | break
204 |
205 | return range_list
206 |
207 | def cut(
208 | self,
209 | video: typing.Union[str, VideoObject],
210 | block: int = None,
211 | window_size: int = None,
212 | window_coefficient: int = None,
213 | *_,
214 | **kwargs,
215 | ) -> VideoCutResult:
216 | """
217 | convert video file, into a VideoCutResult
218 |
219 | :param video: video file path or VideoObject
220 | :param block: default to 3. when block == 3, frame will be split into 3 * 3 = 9 parts
221 | :param window_size:
222 | :param window_coefficient:
223 | :return:
224 | """
225 | # args
226 | if not block:
227 | block = 3
228 | if not window_size:
229 | window_size = 1
230 | if not window_coefficient:
231 | window_coefficient = 2
232 |
233 | start_time = time.time()
234 | if isinstance(video, str):
235 | video = VideoObject(video)
236 |
237 | logger.info(f"start cutting: {video.path}")
238 |
239 | # if video contains 100 frames
240 | # it starts from 1, and length of list is 99, not 100
241 | # [Range(1-2), Range(2-3), Range(3-4) ... Range(99-100)]
242 | range_list = self._convert_video_into_range_list(
243 | video, block, window_size, window_coefficient
244 | )
245 | logger.info(f"cut finished: {video}")
246 | end_time = time.time()
247 | logger.debug(f"cutter cost: {end_time - start_time}")
248 |
249 | # TODO other analysis results can be added to VideoCutResult, such as AI cutter?
250 | return VideoCutResult(video, range_list, cut_kwargs=kwargs)
251 |
--------------------------------------------------------------------------------
/stagesepx/hook.py:
--------------------------------------------------------------------------------
1 | import os
2 | from loguru import logger
3 | import cv2
4 | import typing
5 | from findit import FindIt
6 |
7 | from stagesepx import toolbox
8 | from stagesepx.video import VideoFrame
9 |
10 |
11 | class BaseHook(object):
12 | def __init__(self, *_, **__):
13 | logger.debug(f"start initialing: {self.__class__.__name__} ...")
14 |
15 | # default: dict
16 | self.result = dict()
17 |
18 | def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:
19 | info = f"execute hook: {self.__class__.__name__}"
20 |
21 | frame_id = frame.frame_id
22 | # when frame id == -1, it means handling some pictures outside the video
23 | if frame_id != -1:
24 | logger.debug(f"{info}, frame id: {frame_id}")
25 | return frame
26 |
27 |
28 | class ExampleHook(BaseHook):
29 | """this hook will help you write your own hook class"""
30 |
31 | def __init__(self, *_, **__):
32 | """
33 | hook has two ways to affect the result of analysis
34 |
35 | 1. add your result to self.result (or somewhere else), and get it by yourself after cut or classify
36 | 2. use label 'overwrite'. by enabling this, hook will changing the origin frame
37 | """
38 | super().__init__(*_, **__)
39 |
40 | # add your code here
41 | # ...
42 |
43 | def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:
44 | super().do(frame, *_, **__)
45 |
46 | # you can get frame_id and frame data here
47 | # and use them to custom your own function
48 |
49 | # add your code here
50 | # ...
51 |
52 | # for example, i want to turn grey, and save size of each frames
53 | frame.data = toolbox.turn_grey(frame.data)
54 | self.result[frame.frame_id] = frame.data.shape
55 |
56 | # if you are going to change the origin frame
57 | # just return the changed frame
58 | # and set 'overwrite' to 'True' when you are calling __init__
59 | return frame
60 |
61 | # for safety, if you do not want to modify the origin frame
62 | # you can return a 'None' instead of frame
63 | # and nothing will happen even if setting 'overwrite' to 'True'
64 |
65 |
66 | # --- inner hook start ---
67 |
68 |
69 | class CompressHook(BaseHook):
70 | def __init__(
71 | self,
72 | compress_rate: float = None,
73 | target_size: typing.Tuple[int, int] = None,
74 | *_,
75 | **__,
76 | ):
77 | super().__init__(*_, **__)
78 | self.compress_rate = compress_rate
79 | self.target_size = target_size
80 | logger.debug(f"compress rate: {compress_rate}")
81 | logger.debug(f"target size: {target_size}")
82 |
83 | def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:
84 | super().do(frame, *_, **__)
85 | frame.data = toolbox.compress_frame(
86 | frame.data, compress_rate=self.compress_rate, target_size=self.target_size
87 | )
88 | return frame
89 |
90 |
91 | class GreyHook(BaseHook):
92 | def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:
93 | super().do(frame, *_, **__)
94 | frame.data = toolbox.turn_grey(frame.data)
95 | return frame
96 |
97 |
98 | class RefineHook(BaseHook):
99 | """this hook was built for refining the edges of images"""
100 |
101 | def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:
102 | super().do(frame, *_, **__)
103 | frame.data = toolbox.sharpen_frame(frame.data)
104 | return frame
105 |
106 |
107 | class _AreaBaseHook(BaseHook):
108 | def __init__(
109 | self,
110 | size: typing.Tuple[typing.Union[int, float], typing.Union[int, float]],
111 | offset: typing.Tuple[typing.Union[int, float], typing.Union[int, float]] = None,
112 | *_,
113 | **__,
114 | ):
115 | """
116 | init crop hook, (height, width)
117 |
118 | :param size:
119 | :param offset:
120 | :param _:
121 | :param __:
122 | """
123 | super().__init__(*_, **__)
124 |
125 | self.size = size
126 | self.offset = offset or (0, 0)
127 | logger.debug(f"size: {self.size}")
128 | logger.debug(f"offset: {self.offset}")
129 |
130 | @staticmethod
131 | def is_proportion(
132 | target: typing.Tuple[typing.Union[int, float], typing.Union[int, float]]
133 | ) -> bool:
134 | return len([i for i in target if 0.0 <= i <= 1.0]) == 2
135 |
136 | @staticmethod
137 | def convert(
138 | origin_h: int,
139 | origin_w: int,
140 | input_h: typing.Union[float, int],
141 | input_w: typing.Union[float, int],
142 | ) -> typing.Tuple[typing.Union[int, float], typing.Union[int, float]]:
143 | if _AreaBaseHook.is_proportion((input_h, input_w)):
144 | return origin_h * input_h, origin_w * input_w
145 | return input_h, input_w
146 |
147 | def convert_size_and_offset(
148 | self, *origin_size
149 | ) -> typing.Tuple[typing.Tuple, typing.Tuple]:
150 | # convert to real size
151 | logger.debug(f"origin size: ({origin_size})")
152 | size_h, size_w = self.convert(*origin_size, *self.size)
153 | logger.debug(f"size: ({size_h}, {size_w})")
154 | offset_h, offset_w = self.convert(*origin_size, *self.offset)
155 | logger.debug(f"offset: {offset_h}, {offset_w}")
156 | height_range, width_range = (
157 | (int(offset_h), int(offset_h + size_h)),
158 | (int(offset_w), int(offset_w + size_w)),
159 | )
160 | logger.debug(f"final range h: {height_range}, w: {width_range}")
161 | return height_range, width_range
162 |
163 |
164 | class CropHook(_AreaBaseHook):
165 | """this hook was built for cropping frames, eg: keep only a half of origin frame"""
166 |
167 | def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:
168 | super().do(frame, *_, **__)
169 |
170 | height_range, width_range = self.convert_size_and_offset(*frame.data.shape)
171 | # ignore the rest of this frame
172 | # same as IgnoreHook
173 | frame.data[: height_range[0], :] = 0
174 | frame.data[height_range[1] :, :] = 0
175 | frame.data[:, : width_range[0]] = 0
176 | frame.data[:, width_range[1] :] = 0
177 | return frame
178 |
179 |
180 | class IgnoreHook(_AreaBaseHook):
181 | """ignore some area of frames"""
182 |
183 | def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:
184 | super().do(frame, *_, **__)
185 |
186 | height_range, width_range = self.convert_size_and_offset(*frame.data.shape)
187 | # ignore this area
188 | frame.data[
189 | height_range[0] : height_range[1], width_range[0] : width_range[1]
190 | ] = 0
191 | return frame
192 |
193 |
194 | # --- inner hook end ---
195 |
196 |
197 | class FrameSaveHook(BaseHook):
198 | """add this hook, and save all the frames you want to specific dir"""
199 |
200 | def __init__(self, target_dir: str, *_, **__):
201 | super().__init__(*_, **__)
202 |
203 | # init target dir
204 | self.target_dir = target_dir
205 | os.makedirs(target_dir, exist_ok=True)
206 |
207 | logger.debug(f"target dir: {target_dir}")
208 |
209 | def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:
210 | super().do(frame, *_, **__)
211 | safe_timestamp = str(frame.timestamp).replace(".", "_")
212 | frame_name = f"{frame.frame_id}({safe_timestamp}).png"
213 | target_path = os.path.join(self.target_dir, frame_name)
214 | cv2.imwrite(target_path, frame.data)
215 | logger.debug(f"frame saved to {target_path}")
216 | return frame
217 |
218 |
219 | class InterestPointHook(BaseHook):
220 | """use ORB detector to get the number of interest points"""
221 |
222 | def __init__(self, *_, **__):
223 | super().__init__(*_, **__)
224 | self._orb = cv2.ORB_create()
225 |
226 | def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:
227 | super().do(frame, *_, **__)
228 | kp = self._orb.detect(frame.data, None)
229 | self.result[frame.frame_id] = len(kp)
230 | return frame
231 |
232 |
233 | class InvalidFrameDetectHook(BaseHook):
234 | def __init__(self, *_, **__):
235 | super(InvalidFrameDetectHook, self).__init__(*_, **__)
236 | raise DeprecationWarning("you'd better use EmptyFrameDetectHook instead")
237 |
238 |
239 | class TemplateCompareHook(BaseHook):
240 | def __init__(self, template_dict: typing.Dict[str, str], *args, **kwargs):
241 | """
242 | args and kwargs will be sent to findit.__init__
243 |
244 | :param template_dict:
245 | # k: template name
246 | # v: template picture path
247 | :param args:
248 | :param kwargs:
249 | """
250 | super().__init__(*args, **kwargs)
251 | self.fi = FindIt(*args, **kwargs)
252 | self.template_dict = template_dict
253 |
254 | def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:
255 | super().do(frame, *_, **__)
256 | for each_template_name, each_template_path in self.template_dict.items():
257 | self.fi.load_template(each_template_name, each_template_path)
258 | res = self.fi.find(str(frame.frame_id), target_pic_object=frame.data)
259 | logger.debug(f"compare with template {self.template_dict}: {res}")
260 | self.result[frame.frame_id] = res
261 | return frame
262 |
--------------------------------------------------------------------------------
/stagesepx/reporter.py:
--------------------------------------------------------------------------------
1 | import os
2 | import typing
3 | import json
4 | import numpy as np
5 | from markupsafe import Markup
6 | from jinja2 import Template
7 | from pyecharts.charts import Line, Bar, Page
8 | from pyecharts import options as opts
9 | from loguru import logger
10 |
11 | from stagesepx.classifier import ClassifierResult, SingleClassifierResult
12 | from stagesepx import toolbox
13 | from stagesepx import constants
14 | from stagesepx.cutter import VideoCutResult, VideoCutRange
15 | from stagesepx.video import VideoFrame
16 | from stagesepx import __VERSION__
17 |
18 | # load template
19 | template_dir_path = os.path.join(os.path.dirname(__file__), "template")
20 | template_path = os.path.join(template_dir_path, "report.html")
21 |
22 |
23 | def get_template() -> str:
24 | with open(template_path, encoding=constants.CHARSET) as t:
25 | template = t.read()
26 | return template
27 |
28 |
29 | class Reporter(object):
30 | # 3 status:
31 | # - `stable` means nothing happened (nearly) during this period
32 | # - `unstable` means something happened
33 | # - `unspecific` means your model has no idea about `which class this frame should be` (lower than threshold)
34 | LABEL_STABLE: str = "stable"
35 | LABEL_UNSTABLE: str = "unstable"
36 | # unknown stage actually
37 | LABEL_UNSPECIFIC: str = "unspecific"
38 |
39 | def __init__(self):
40 | self.thumbnail_list: typing.List[typing.Tuple[str, str]] = list()
41 | self.extra_dict: typing.Dict[str, str] = dict()
42 |
43 | def add_thumbnail(self, name: str, pic_object: np.ndarray):
44 | """
45 | add picture object (cv2) to your report
46 |
47 | :param name:
48 | :param pic_object:
49 | :return:
50 | """
51 | b64_str = toolbox.np2b64str(pic_object)
52 | self.thumbnail_list.append((name, b64_str))
53 |
54 | def add_extra(self, name: str, value: str):
55 | """
56 | add some extra info ( key-value part) to your report
57 |
58 | :param name:
59 | :param value:
60 | :return:
61 | """
62 | self.extra_dict[name] = value
63 |
64 | @staticmethod
65 | def _draw_line(result: ClassifierResult) -> Line:
66 | # draw line chart
67 | x_axis = [str(i) for i in result.get_timestamp_list()]
68 | y_axis = result.get_stage_list()
69 |
70 | line = Line(init_opts=opts.InitOpts(bg_color=constants.BACKGROUND_COLOR))
71 | line.add_xaxis(x_axis)
72 | line.add_yaxis("stage", y_axis, is_step=False, is_symbol_show=True)
73 | line.set_global_opts(
74 | title_opts=opts.TitleOpts(
75 | title="Trend", subtitle="describe how these stages switching"
76 | ),
77 | toolbox_opts=opts.ToolboxOpts(is_show=True),
78 | tooltip_opts=opts.TooltipOpts(
79 | is_show=True, trigger="axis", axis_pointer_type="cross"
80 | ),
81 | brush_opts=opts.BrushOpts(x_axis_index="all", tool_box=["lineX"]),
82 | )
83 | return line
84 |
85 | @staticmethod
86 | def _draw_sim(data: VideoCutResult) -> Line:
87 | x_axis = [str(i.start) for i in data.range_list]
88 | ssim_axis = [i.ssim for i in data.range_list]
89 | mse_axis = [i.mse for i in data.range_list]
90 | psnr_axis = [i.psnr for i in data.range_list]
91 |
92 | line = Line(init_opts=opts.InitOpts(bg_color=constants.BACKGROUND_COLOR))
93 | line.add_xaxis(x_axis)
94 | line.add_yaxis("ssim", ssim_axis)
95 | line.add_yaxis("mse", mse_axis)
96 | line.add_yaxis("psnr", psnr_axis)
97 | line.set_global_opts(
98 | title_opts=opts.TitleOpts(title="SIM"),
99 | toolbox_opts=opts.ToolboxOpts(is_show=True),
100 | tooltip_opts=opts.TooltipOpts(
101 | is_show=True, trigger="axis", axis_pointer_type="cross"
102 | ),
103 | )
104 | line.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
105 | return line
106 |
107 | @staticmethod
108 | def _draw_bar(result: ClassifierResult) -> Bar:
109 | # draw bar chart
110 | bar = Bar(init_opts=opts.InitOpts(bg_color=constants.BACKGROUND_COLOR))
111 | x_axis = sorted(list(result.get_stage_set()))
112 | y_axis = list()
113 | offset = result.get_offset()
114 | for each_stage_name in x_axis:
115 | ranges = result.get_specific_stage_range(each_stage_name)
116 | time_cost: float = 0.0
117 | for each in ranges:
118 | # last frame - first frame
119 | time_cost += each[-1].timestamp - each[0].timestamp + offset
120 | y_axis.append(time_cost)
121 |
122 | bar.add_xaxis(x_axis)
123 | bar.add_yaxis("time cost", y_axis)
124 | bar.set_global_opts(
125 | title_opts=opts.TitleOpts(title="Time Cost", subtitle="... of each stages"),
126 | toolbox_opts=opts.ToolboxOpts(is_show=True),
127 | )
128 | logger.debug(f"time cost: {dict(zip(x_axis, y_axis))}")
129 | return bar
130 |
131 | @staticmethod
132 | def get_stable_stage_sample_frame_list(
133 | result: ClassifierResult, *args, **kwargs
134 | ) -> typing.List[VideoFrame]:
135 | # VideoFrame: with data
136 | # SingleClassifierResult: without data
137 | last = result.data[0]
138 | picked: typing.List[SingleClassifierResult] = [last]
139 | for each in result.data:
140 | # ignore unstable stage
141 | if not each.is_stable():
142 | continue
143 | if last.stage != each.stage:
144 | last = each
145 | picked.append(each)
146 |
147 | return [each.to_video_frame(*args, **kwargs) for each in picked]
148 |
149 | @classmethod
150 | def get_stable_stage_sample(
151 | cls, result: ClassifierResult, *args, **kwargs
152 | ) -> np.ndarray:
153 | def get_split_line(f):
154 | return np.zeros((f.shape[0], 5))
155 |
156 | frame_list: typing.List[np.ndarray] = list()
157 | for each in cls.get_stable_stage_sample_frame_list(result, *args, **kwargs):
158 | frame_list.append(each.data)
159 | frame_list.append(get_split_line(each.data))
160 | return np.hstack(frame_list)
161 |
162 | @classmethod
163 | def save(cls, to_file: str, result: ClassifierResult):
164 | assert not os.path.isfile(to_file), f"file {to_file} already existed"
165 | data = [i.to_dict() for i in result.data]
166 | with open(to_file, "w", encoding=constants.CHARSET) as f:
167 | json.dump(data, f)
168 |
169 | @classmethod
170 | def load(cls, from_file: str) -> ClassifierResult:
171 | assert os.path.isfile(from_file), f"file {from_file} not existed"
172 | with open(from_file, encoding=constants.CHARSET) as f:
173 | content = json.load(f)
174 | return ClassifierResult([SingleClassifierResult(**each) for each in content])
175 |
176 | def draw(
177 | self,
178 | classifier_result: ClassifierResult,
179 | report_path: str = None,
180 | unstable_ranges: typing.List[VideoCutRange] = None,
181 | cut_result: VideoCutResult = None,
182 | compress_rate: float = None,
183 | target_size: typing.Tuple[int, int] = None,
184 | *_,
185 | **__,
186 | ):
187 | """
188 | draw report file
189 |
190 | :param classifier_result: classifierResult, output of classifier
191 | :param report_path: your report will be there
192 | :param unstable_ranges: for marking unstable ranges
193 | :param cut_result: more charts would be built
194 | :param compress_rate:
195 | :param target_size:
196 | :return:
197 | """
198 | # default: compress_rate
199 | if not compress_rate:
200 | compress_rate = 0.2
201 | if not unstable_ranges:
202 | unstable_ranges = []
203 |
204 | # draw
205 | line = self._draw_line(classifier_result)
206 | bar = self._draw_bar(classifier_result)
207 |
208 | # merge charts
209 | page = Page()
210 | page.add(line)
211 | page.add(bar)
212 |
213 | # insert pictures
214 | if cut_result:
215 | # sim chart
216 | sim_line = self._draw_sim(cut_result)
217 | page.add(sim_line)
218 |
219 | # mark range
220 | for each_range in unstable_ranges:
221 | classifier_result.mark_range_unstable(each_range.start, each_range.end)
222 |
223 | offset = classifier_result.get_offset()
224 | stage_range = classifier_result.get_stage_range()
225 | for cur_index in range(len(stage_range)):
226 | each_range = stage_range[cur_index]
227 | middle = each_range[len(each_range) // 2]
228 | # which means this range is stable
229 | if middle.is_stable():
230 | label = self.LABEL_STABLE
231 | frame = toolbox.compress_frame(
232 | middle.get_data(),
233 | compress_rate=compress_rate,
234 | target_size=target_size,
235 | )
236 | # not stable
237 | else:
238 | # todo: looks not good enough. `unspecific` looks a little weird but I have no idea now
239 | if middle.stage == constants.UNKNOWN_STAGE_FLAG:
240 | label = self.LABEL_UNSPECIFIC
241 | else:
242 | label = self.LABEL_UNSTABLE
243 | # add a frame for human readable
244 | if cur_index + 1 < len(stage_range):
245 | range_for_display = [*each_range, stage_range[cur_index + 1][0]]
246 | else:
247 | range_for_display = each_range
248 | # merge these frames into one
249 | # note: these frames should have the same size
250 | frame = np.hstack(
251 | [
252 | toolbox.compress_frame(
253 | i.get_data(),
254 | compress_rate=compress_rate,
255 | target_size=target_size,
256 | )
257 | for i in range_for_display
258 | ]
259 | )
260 |
261 | first, last = each_range[0], each_range[-1]
262 | self.add_thumbnail(
263 | f"{label} range {first.frame_id}({first.timestamp}) - {last.frame_id}({last.timestamp + offset}), "
264 | f"duration: {last.timestamp - first.timestamp + offset}, "
265 | f"stage: {first.stage}",
266 | frame,
267 | )
268 | # calc time cost
269 | cost_dict = classifier_result.calc_changing_cost()
270 |
271 | # time stamp
272 | timestamp = toolbox.get_timestamp_str()
273 |
274 | # video
275 | self.add_extra("video path", classifier_result.video_path)
276 | self.add_extra("frame count", str(classifier_result.get_length()))
277 | self.add_extra("offset between frames", str(classifier_result.get_offset()))
278 |
279 | # insert extras
280 | template = Template(get_template())
281 | template_content = template.render(
282 | chart=Markup(page.render_embed()),
283 | thumbnail_list=self.thumbnail_list,
284 | extras=self.extra_dict,
285 | background_color=constants.BACKGROUND_COLOR,
286 | cost_dict=cost_dict,
287 | timestamp=timestamp,
288 | version_code=__VERSION__,
289 | )
290 |
291 | # default: write to current dir
292 | default_name = f"{timestamp}.html"
293 | if not report_path:
294 | report_path = default_name
295 | # somewhere specific
296 | # existed dir?
297 | elif os.path.isdir(report_path):
298 | report_path = os.path.join(report_path, default_name)
299 | logger.debug(f"trying to save report to {report_path}")
300 |
301 | # write file
302 | with open(report_path, "w", encoding=constants.CHARSET) as fh:
303 | fh.write(template_content)
304 | logger.info(f"save report to {report_path}")
305 |
--------------------------------------------------------------------------------
/stagesepx/template/report.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 | stagesep-x report
15 |
16 |
17 |
18 |
40 |
41 |
42 |
43 | stagesep x report
44 |
45 |
46 |
47 |
57 |
58 |
59 |
60 |
61 | {% if thumbnail_list %}
62 |
63 |
64 |
65 |
66 | {% for name, each_thumbnail in thumbnail_list %}
67 |
68 | {{ name }}
69 |
70 |
71 | {% endfor %}
72 |
73 |
74 |
75 |
76 | {% endif %}
77 |
78 |
79 |
80 |
81 |
82 |
Charts
83 |
84 | {{ chart }}
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
WARNING: Result in this page may contains some incorrect parts which cause by deprecated API.
93 | Please use `Stages` tab instead or handle `ClassierResult` by yourself.
94 | View issue #115 for more details.
95 |
96 |
97 | {% if cost_dict %}
98 |
99 |
100 |
101 |
Time Cost between stages
102 |
Calculate the time spent on stage changes.
103 | {% for name, result in cost_dict.items() %}
104 |
stage {{ name }}
105 |
106 | range: {{ result[0].frame_id }} - {{ result[1].frame_id }} ({{ result[0].timestamp }} - {{
107 | result[1].timestamp }})
108 |
109 | time cost: {{ result[1].timestamp - result[0].timestamp }}
110 |
111 | {% endfor %}
112 |
113 |
114 |
115 | {% endif %}
116 |
117 | {% if extras %}
118 |
119 |
120 |
121 |
Extras
122 | {% for name, value in extras.items() %}
123 |
{{ name }}
124 |
{{ value }}
125 | {% endfor %}
126 |
127 |
128 |
129 | {% endif %}
130 |
131 |
132 |
133 |
134 |
Stages
135 |
Page `stages` will show what happened in your video, with precise timestamp and thumbnail.
136 |
137 |
138 | - `stable` means nothing happened (nearly) during this period
139 | - `unstable` means something happened
140 | - `unspecific` means your model has no idea about `which class this frame should be` (lower than threshold)
141 |
142 |
143 |
144 |
You can view #75 for more details.
145 |
146 |
147 |
148 |
Need more help?
149 |
View our main page for more support, or contact us by leaving issues.
150 |
151 |
152 |
153 |
154 |
155 |
156 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
--------------------------------------------------------------------------------
/stagesepx/toolbox.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import contextlib
3 | import time
4 | import random
5 | import typing
6 | import math
7 | import os
8 | import numpy as np
9 | import subprocess
10 | from base64 import b64encode
11 | from skimage.metrics import structural_similarity as origin_compare_ssim
12 | from skimage.metrics import normalized_root_mse as compare_nrmse
13 | from skimage.metrics import peak_signal_noise_ratio as compare_psnr
14 | from skimage.feature import hog, local_binary_pattern
15 | from loguru import logger
16 | from findit import FindIt
17 |
18 |
19 | # DO NOT IMPORT ANYTHING FROM STAGESEPX HERE
20 | # MAKE TOOLBOX STATIC
21 |
22 |
23 | @contextlib.contextmanager
24 | def video_capture(video_path: str):
25 | video_cap = cv2.VideoCapture(video_path)
26 | try:
27 | yield video_cap
28 | finally:
29 | video_cap.release()
30 |
31 |
32 | def video_jump(video_cap: cv2.VideoCapture, frame_id: int):
33 | # IMPORTANT:
34 | # - frame is a range actually
35 | # - frame 1 's timestamp is the beginning of this frame
36 | #
37 | # video_jump(cap, 2) means: moving the pointer to the start point of frame 2 => the end point of frame 1
38 |
39 | # another -1 for re-read
40 | video_cap.set(cv2.CAP_PROP_POS_FRAMES, frame_id - 1 - 1)
41 | video_cap.read()
42 |
43 | # notice: this timestamp may not correct because of resync by moviepy
44 | # logger.debug(
45 | # f"previous pointer: {get_current_frame_id(video_cap)}({get_current_frame_time(video_cap)})"
46 | # )
47 |
48 |
49 | def compare_ssim(pic1: np.ndarray, pic2: np.ndarray) -> float:
50 | pic1, pic2 = [turn_grey(i) for i in [pic1, pic2]]
51 | return origin_compare_ssim(pic1, pic2)
52 |
53 |
54 | def multi_compare_ssim(
55 | pic1_list: typing.List, pic2_list: typing.List, hooks: typing.List = None
56 | ) -> typing.List[float]:
57 | # avoid import loop
58 | from stagesepx.video import VideoFrame
59 |
60 | if isinstance(pic1_list[0], VideoFrame):
61 | if hooks:
62 | for each in hooks:
63 | pic1_list = [each.do(each_frame) for each_frame in pic1_list]
64 | pic1_list = [i.data for i in pic1_list]
65 |
66 | if isinstance(pic2_list[0], VideoFrame):
67 | if hooks:
68 | for each in hooks:
69 | pic2_list = [each.do(each_frame) for each_frame in pic2_list]
70 | pic2_list = [i.data for i in pic2_list]
71 |
72 | return [compare_ssim(a, b) for a, b in zip(pic1_list, pic2_list)]
73 |
74 |
75 | def get_current_frame_id(video_cap: cv2.VideoCapture) -> int:
76 | # IMPORTANT:
77 | # this id is the frame which has already been grabbed
78 | # we jump to 5, which means the next frame will be 5
79 | # so the current frame id is: 5 - 1 = 4
80 | return int(video_cap.get(cv2.CAP_PROP_POS_FRAMES))
81 |
82 |
83 | def get_current_frame_time(video_cap: cv2.VideoCapture) -> float:
84 | # same as get_current_frame_id, take good care of them
85 | return video_cap.get(cv2.CAP_PROP_POS_MSEC) / 1000
86 |
87 |
88 | def imread(img_path: str, *_, **__) -> np.ndarray:
89 | """wrapper of cv2.imread"""
90 | assert os.path.isfile(img_path), f"file {img_path} is not existed"
91 | return cv2.imread(img_path, *_, **__)
92 |
93 |
94 | def get_frame_time(
95 | video_cap: cv2.VideoCapture, frame_id: int, recover: bool = None
96 | ) -> float:
97 | cur = get_current_frame_id(video_cap)
98 | video_jump(video_cap, frame_id)
99 | result = get_current_frame_time(video_cap)
100 | logger.debug(f"frame {frame_id} -> {result}")
101 |
102 | if recover:
103 | video_jump(video_cap, cur + 1)
104 | return result
105 |
106 |
107 | def get_frame_count(video_cap: cv2.VideoCapture) -> int:
108 | # NOT always accurate, see:
109 | # https://stackoverflow.com/questions/31472155/python-opencv-cv2-cv-cv-cap-prop-frame-count-get-wrong-numbers
110 | return int(video_cap.get(cv2.CAP_PROP_FRAME_COUNT))
111 |
112 |
113 | def get_frame_size(video_cap: cv2.VideoCapture) -> typing.Tuple[int, int]:
114 | """return size of frame: (width, height)"""
115 | h = video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
116 | w = video_cap.get(cv2.CAP_PROP_FRAME_WIDTH)
117 | return int(w), int(h)
118 |
119 |
120 | def get_frame(
121 | video_cap: cv2.VideoCapture, frame_id: int, recover: bool = None
122 | ) -> np.ndarray:
123 | cur = get_current_frame_id(video_cap)
124 | video_jump(video_cap, frame_id)
125 | ret, frame = video_cap.read()
126 | assert ret, f"read frame failed, frame id: {frame_id}"
127 |
128 | if recover:
129 | video_jump(video_cap, cur + 1)
130 | return frame
131 |
132 |
133 | def turn_grey(old: np.ndarray) -> np.ndarray:
134 | try:
135 | return cv2.cvtColor(old, cv2.COLOR_RGB2GRAY)
136 | except cv2.error:
137 | return old
138 |
139 |
140 | def turn_binary(old: np.ndarray) -> np.ndarray:
141 | grey = turn_grey(old).astype("uint8")
142 | return cv2.adaptiveThreshold(
143 | grey, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2
144 | )
145 |
146 |
147 | def turn_hog_desc(old: np.ndarray) -> np.ndarray:
148 | fd, _ = hog(
149 | old,
150 | orientations=8,
151 | pixels_per_cell=(16, 16),
152 | cells_per_block=(1, 1),
153 | block_norm="L2-Hys",
154 | visualize=True,
155 | )
156 |
157 | # also available with opencv-python
158 | # hog = cv2.HOGDescriptor()
159 | # return hog.compute(old)
160 | return fd
161 |
162 |
163 | def turn_lbp_desc(old: np.ndarray, radius: int = None) -> np.ndarray:
164 | if not radius:
165 | radius = 3
166 | n_points = 8 * radius
167 |
168 | grey = turn_grey(old)
169 | lbp = local_binary_pattern(grey, n_points, radius, method="default")
170 | return lbp
171 |
172 |
173 | def turn_blur(old: np.ndarray) -> np.ndarray:
174 | # TODO these args are locked and can not be changed
175 | return cv2.GaussianBlur(old, (7, 7), 0)
176 |
177 |
178 | def sharpen_frame(old: np.ndarray) -> np.ndarray:
179 | """
180 | refine the edges of an image
181 |
182 | - https://answers.opencv.org/question/121205/how-to-refine-the-edges-of-an-image/
183 | - https://stackoverflow.com/questions/4993082/how-to-sharpen-an-image-in-opencv
184 |
185 | :param old:
186 | :return:
187 | """
188 |
189 | # TODO these args are locked and can not be changed
190 | blur = turn_blur(old)
191 | smooth = cv2.addWeighted(blur, 1.5, old, -0.5, 0)
192 | canny = cv2.Canny(smooth, 50, 150)
193 | return canny
194 |
195 |
196 | def calc_mse(pic1: np.ndarray, pic2: np.ndarray) -> float:
197 | # MSE: https://en.wikipedia.org/wiki/Mean_squared_error
198 | # return np.sum((pic1.astype('float') - pic2.astype('float')) ** 2) / float(pic1.shape[0] * pic2.shape[1])
199 | return compare_nrmse(pic1, pic2)
200 |
201 |
202 | def calc_psnr(pic1: np.ndarray, pic2: np.ndarray) -> float:
203 | # PSNR: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
204 | psnr = compare_psnr(pic1, pic2)
205 | # when err == 0, psnr will be 'inf'
206 | if math.isinf(psnr):
207 | psnr = 100.0
208 | # normalize
209 | return psnr / 100
210 |
211 |
212 | def compress_frame(
213 | old: np.ndarray,
214 | compress_rate: float = None,
215 | target_size: typing.Tuple[int, int] = None,
216 | not_grey: bool = None,
217 | interpolation: int = None,
218 | *_,
219 | **__,
220 | ) -> np.ndarray:
221 | """
222 | Compress frame
223 |
224 | :param old:
225 | origin frame
226 |
227 | :param compress_rate:
228 | before_pic * compress_rate = after_pic. default to 1 (no compression)
229 | eg: 0.2 means 1/5 size of before_pic
230 |
231 | :param target_size:
232 | tuple. (100, 200) means compressing before_pic to 100x200
233 |
234 | :param not_grey:
235 | convert into grey if True
236 |
237 | :param interpolation:
238 | :return:
239 | """
240 |
241 | target = turn_grey(old) if not not_grey else old
242 |
243 | if not interpolation:
244 | interpolation = cv2.INTER_AREA
245 | # target size first
246 | if target_size:
247 | return cv2.resize(target, target_size, interpolation=interpolation)
248 | # else, use compress rate
249 | # default rate is 1 (no compression)
250 | if not compress_rate:
251 | return target
252 | return cv2.resize(
253 | target, (0, 0), fx=compress_rate, fy=compress_rate, interpolation=interpolation
254 | )
255 |
256 |
257 | def get_timestamp_str() -> str:
258 | time_str = time.strftime("%Y%m%d%H%M%S", time.localtime())
259 | salt = random.randint(10, 99)
260 | return f"{time_str}{salt}"
261 |
262 |
263 | def np2b64str(frame: np.ndarray) -> str:
264 | buffer = cv2.imencode(".png", frame)[1].tostring()
265 | return b64encode(buffer).decode()
266 |
267 |
268 | def fps_convert(
269 | target_fps: int, source_path: str, target_path: str, ffmpeg_exe: str = None
270 | ) -> int:
271 | # for portable ffmpeg
272 | if not ffmpeg_exe:
273 | ffmpeg_exe = r"ffmpeg"
274 | command: typing.List[str] = [
275 | ffmpeg_exe,
276 | "-i",
277 | source_path,
278 | "-r",
279 | str(target_fps),
280 | target_path,
281 | ]
282 | logger.debug(f"convert video: {command}")
283 | return subprocess.check_call(command)
284 |
285 |
286 | def match_template_with_object(
287 | template: np.ndarray,
288 | target: np.ndarray,
289 | engine_template_cv_method_name: str = None,
290 | **kwargs,
291 | ) -> typing.Dict[str, typing.Any]:
292 | # change the default method
293 | if not engine_template_cv_method_name:
294 | engine_template_cv_method_name = "cv2.TM_CCOEFF_NORMED"
295 |
296 | fi = FindIt(
297 | engine=["template"],
298 | engine_template_cv_method_name=engine_template_cv_method_name,
299 | **kwargs,
300 | )
301 | # load template
302 | fi_template_name = "default"
303 | fi.load_template(fi_template_name, pic_object=template)
304 |
305 | result = fi.find(target_pic_name="", target_pic_object=target, **kwargs)
306 | logger.debug(f"findit result: {result}")
307 | return result["data"][fi_template_name]["TemplateEngine"]
308 |
309 |
310 | def match_template_with_path(
311 | template: str, target: np.ndarray, **kwargs
312 | ) -> typing.Dict[str, typing.Any]:
313 | assert os.path.isfile(template), f"image {template} not existed"
314 | template_object = turn_grey(imread(template))
315 | return match_template_with_object(template_object, target, **kwargs)
316 |
--------------------------------------------------------------------------------
/stagesepx/video.py:
--------------------------------------------------------------------------------
1 | import os
2 | import tempfile
3 | import typing
4 |
5 | import cv2
6 | import imageio_ffmpeg
7 | import numpy as np
8 | from loguru import logger
9 | import moviepy.editor as mpy
10 |
11 | from stagesepx import toolbox
12 |
13 | if typing.TYPE_CHECKING:
14 | from stagesepx.hook import BaseHook
15 |
16 |
17 | class VideoFrame(object):
18 | def __init__(self, frame_id: int, timestamp: float, data: np.ndarray):
19 | self.frame_id: int = frame_id
20 | self.timestamp: float = timestamp
21 | self.data: np.ndarray = data
22 |
23 | def __str__(self):
24 | return f""
25 |
26 | @classmethod
27 | def init(cls, cap: cv2.VideoCapture, frame: np.ndarray) -> "VideoFrame":
28 | frame_id = toolbox.get_current_frame_id(cap)
29 | timestamp = toolbox.get_current_frame_time(cap)
30 | grey = toolbox.turn_grey(frame)
31 | logger.debug(f"new a frame: {frame_id}({timestamp})")
32 | return VideoFrame(frame_id, timestamp, grey)
33 |
34 | def copy(self):
35 | return VideoFrame(self.frame_id, self.timestamp, self.data[:])
36 |
37 | def contain_image(
38 | self, *, image_path: str = None, image_object: np.ndarray = None, **kwargs
39 | ) -> typing.Dict[str, typing.Any]:
40 | assert image_path or (
41 | image_object is not None
42 | ), "should fill image_path or image_object"
43 |
44 | if image_path:
45 | logger.debug(f"found image path, use it first: {image_path}")
46 | return toolbox.match_template_with_path(image_path, self.data, **kwargs)
47 | image_object = toolbox.turn_grey(image_object)
48 | return toolbox.match_template_with_object(image_object, self.data, **kwargs)
49 |
50 |
51 | class _BaseFrameOperator(object):
52 | def __init__(self, video: "VideoObject"):
53 | # pointer
54 | self.cur_ptr: int = 0
55 | self.video: VideoObject = video
56 |
57 | def get_frame_by_id(self, frame_id: int) -> typing.Optional[VideoFrame]:
58 | raise NotImplementedError
59 |
60 | def get_length(self) -> int:
61 | return self.video.frame_count
62 |
63 |
64 | class MemFrameOperator(_BaseFrameOperator):
65 | def get_frame_by_id(self, frame_id: int) -> typing.Optional[VideoFrame]:
66 | if frame_id > self.get_length():
67 | return None
68 | # list starts from zero, but frame starts from one
69 | frame_id = frame_id - 1
70 | return self.video.data[frame_id].copy()
71 |
72 |
73 | class FileFrameOperator(_BaseFrameOperator):
74 | def get_frame_by_id(self, frame_id: int) -> typing.Optional[VideoFrame]:
75 | if frame_id > self.get_length():
76 | return None
77 | with toolbox.video_capture(self.video.path) as cap:
78 | toolbox.video_jump(cap, frame_id)
79 | success, frame = cap.read()
80 | video_frame = VideoFrame.init(cap, frame) if success else None
81 | return video_frame
82 |
83 |
84 | class VideoObject(object):
85 | def __init__(
86 | self,
87 | path: typing.Union[str, os.PathLike],
88 | pre_load: bool = None,
89 | fps: int = None,
90 | *_,
91 | **__,
92 | ):
93 | assert os.path.isfile(path), f"video {path} not existed"
94 | self.path: str = str(path)
95 | self.data: typing.Optional[typing.Tuple[VideoFrame]] = tuple()
96 | self._hook_list: typing.List["BaseHook"] = []
97 |
98 | self.fps: int = fps
99 | if fps:
100 | video_path = os.path.join(tempfile.mkdtemp(), f"tmp_{fps}.mp4")
101 | logger.debug(f"convert video, and bind path to {video_path}")
102 | toolbox.fps_convert(
103 | fps, self.path, video_path, imageio_ffmpeg.get_ffmpeg_exe()
104 | )
105 | self.path = video_path
106 |
107 | with toolbox.video_capture(self.path) as cap:
108 | self.frame_count = toolbox.get_frame_count(cap)
109 | self.frame_size = toolbox.get_frame_size(cap)
110 |
111 | if pre_load is not None:
112 | logger.warning(
113 | f"`pre_load` has been deprecated. use `video.load_frames()` instead"
114 | )
115 | logger.info(
116 | f"video object generated, length: {self.frame_count}, size: {self.frame_size}"
117 | )
118 |
119 | def __str__(self):
120 | return f""
121 |
122 | __repr__ = __str__
123 |
124 | def sync_timestamp(self):
125 | vid = mpy.VideoFileClip(self.path)
126 |
127 | # moviepy start from 0, 0.0
128 | # but stagesepx is 1, 0.0
129 | assert self.data, "load_frames() first"
130 | for frame_id, (timestamp, _) in enumerate(vid.iter_frames(with_times=True)):
131 | if frame_id >= len(self.data):
132 | # ignore the rest
133 | break
134 | frame_id_real = frame_id + 1
135 | if not self.data[frame_id].timestamp:
136 | logger.debug(f"fix frame {frame_id_real}'s timestamp: {timestamp}")
137 | self.data[frame_id].timestamp = timestamp
138 | logger.info("sync timestamp with moviepy finished")
139 |
140 | def add_preload_hook(self, new_hook: "BaseHook"):
141 | """this hook only will be executed when preload"""
142 | self._hook_list.append(new_hook)
143 |
144 | def clean_frames(self):
145 | self.data = tuple()
146 |
147 | def load_frames(self, *args, **kwargs):
148 | logger.info(f"start loading {self.path} to memory ...")
149 |
150 | data: typing.List[VideoFrame] = []
151 | with toolbox.video_capture(self.path) as cap:
152 | # the first
153 | success, frame = cap.read()
154 | while success:
155 | frame_object = VideoFrame.init(cap, frame)
156 | # apply hooks
157 | for each_hook in self._hook_list:
158 | frame_object = each_hook.do(frame_object, *args, **kwargs)
159 | data.append(frame_object)
160 | # read the next one
161 | success, frame = cap.read()
162 |
163 | # calculate memory cost
164 | each_cost = data[0].data.nbytes
165 | logger.debug(f"single frame cost: {each_cost} bytes")
166 | total_cost = each_cost * self.frame_count
167 | logger.debug(f"total frame cost: {total_cost} bytes")
168 |
169 | # lock the order
170 | self.data = tuple(data)
171 | # fix the length ( the last frame may be broken sometimes )
172 | self.frame_count = len(data)
173 | # and size (reversed, see: https://github.com/williamfzc/stagesepx/issues/132)
174 | self.frame_size = data[0].data.shape[::-1]
175 | logger.info(
176 | f"frames loaded. frame count: {self.frame_count}, size: {self.frame_size}, memory cost: {total_cost} bytes"
177 | )
178 |
179 | # sync timestamp for some newer versions opencv
180 | # see: #178, #181
181 | self.sync_timestamp()
182 |
183 | def _read_from_file(self) -> typing.Generator[VideoFrame, None, None]:
184 | with toolbox.video_capture(self.path) as cap:
185 | success, frame = cap.read()
186 | while success:
187 | yield VideoFrame.init(cap, frame)
188 | success, frame = cap.read()
189 |
190 | def _read_from_mem(self) -> typing.Generator[VideoFrame, None, None]:
191 | for each_frame in self.data:
192 | yield each_frame
193 |
194 | def _read(self) -> typing.Generator[VideoFrame, None, None]:
195 | if self.data:
196 | yield from self._read_from_mem()
197 | else:
198 | yield from self._read_from_file()
199 |
200 | def get_iterator(self) -> typing.Generator[VideoFrame, None, None]:
201 | return self._read()
202 |
203 | def get_operator(self) -> _BaseFrameOperator:
204 | if self.data:
205 | return MemFrameOperator(self)
206 | return FileFrameOperator(self)
207 |
208 | def __iter__(self):
209 | return self.get_iterator()
210 |
--------------------------------------------------------------------------------
/test/README.md:
--------------------------------------------------------------------------------
1 | # unittests
2 |
3 | Built with [pytest](https://docs.pytest.org/en/latest/contents.html)
4 |
5 | ## usage
6 |
7 | run `pytest`
8 |
9 | ## parts
10 |
11 | - test_cutter
12 | - test_classifier
13 | - test_hook
14 | - test_cli
15 | - test_diff
16 | - test_toolbox
17 |
--------------------------------------------------------------------------------
/test/min_run_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "output": ".",
3 | "video": {
4 | "path": "demo.mp4"
5 | }
6 | }
7 |
--------------------------------------------------------------------------------
/test/run_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "output": ".",
3 | "video": {
4 | "path": "demo.mp4",
5 | "pre_load": true,
6 | "fps": null
7 | },
8 | "cutter": {
9 | "threshold": 0.95,
10 | "frame_count": 3,
11 | "offset": 3,
12 | "limit": null,
13 | "block": 3,
14 | "compress_rate": 0.2,
15 | "target_size": [
16 | 600,
17 | 800
18 | ]
19 | },
20 | "classifier": {
21 | "classifier_type": "svm",
22 | "model": null,
23 | "boost_mode": true,
24 | "compress_rate": 0.2,
25 | "target_size": [
26 | 600,
27 | 800
28 | ]
29 | },
30 | "extras": {
31 | "save_train_set": "./trainset_run_config"
32 | },
33 | "calc": {
34 | "output": "some2.json",
35 | "ignore_error": false,
36 | "operators": [
37 | {
38 | "name": "calc_between_0_1",
39 | "calc_type": "between",
40 | "args": {
41 | "from_stage": "0",
42 | "to_stage": "1"
43 | }
44 | },
45 | {
46 | "name": "display everything",
47 | "calc_type": "display"
48 | }
49 | ]
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/test/run_config_with_model.json:
--------------------------------------------------------------------------------
1 | {
2 | "video": {
3 | "path": "demo.mp4"
4 | },
5 | "classifier": {
6 | "classifier_type": "keras",
7 | "model": "output.weights.h5"
8 | },
9 | "output": "."
10 | }
11 |
--------------------------------------------------------------------------------
/test/test_api.py:
--------------------------------------------------------------------------------
1 | import os
2 | import tempfile
3 | import uuid
4 | from pydantic import ValidationError
5 | import pytest
6 |
7 | from stagesepx.api import analyse, run, keras_train
8 |
9 | PROJECT_PATH = os.path.dirname(os.path.dirname(__file__))
10 | VIDEO_PATH = os.path.join(PROJECT_PATH, "demo.mp4")
11 |
12 |
13 | def _get_random_str():
14 | return str(uuid.uuid4())
15 |
16 |
17 | def test_analyse():
18 | with tempfile.NamedTemporaryFile(suffix=".html", mode="w") as f:
19 | analyse(VIDEO_PATH, f.name)
20 |
21 |
22 | @pytest.mark.skip(reason="temp removed in ci")
23 | def test_train():
24 | trainset = os.path.join(PROJECT_PATH, _get_random_str())
25 | mod = os.path.join(PROJECT_PATH, "a.weights.h5")
26 | config = {
27 | # fmt: off
28 | "video": {
29 | "path": VIDEO_PATH,
30 | "fps": 30,
31 | },
32 | "output": ".",
33 | "extras": {
34 | "save_train_set": trainset,
35 | }
36 | }
37 | run(config)
38 |
39 | # train
40 | keras_train(trainset, model_path=mod, epochs=1)
41 |
42 | # todo: weird. it did not work in github actions
43 | # predict with existed mod
44 | # config = {
45 | # # fmt: off
46 | # "video": {
47 | # "path": VIDEO_PATH,
48 | # },
49 | # "classifier": {
50 | # "classifier_type": "keras",
51 | # "model": mod,
52 | # },
53 | # "output": ".",
54 | # }
55 | # run(config)
56 |
57 |
58 | def test_run_validation():
59 | # enum
60 | config = {
61 | # fmt: off
62 | "video": {
63 | "path": VIDEO_PATH,
64 | },
65 | "classifier": {
66 | "classifier_type": "unknwonwonn",
67 | },
68 | "output": ".",
69 | }
70 | try:
71 | run(config)
72 | except ValidationError:
73 | pass
74 | else:
75 | raise TypeError("should raise an error if classifier_type is unexpected")
76 |
77 | config = {
78 | # fmt: off
79 | "video": {
80 | "path": VIDEO_PATH,
81 | },
82 | "output": ".",
83 | "calc": {
84 | "output": f"{_get_random_str()}.json",
85 | "operators": [
86 | {
87 | "name": "error_test",
88 | "calc_type": "unknwonww",
89 | },
90 | ]
91 | }
92 | }
93 | try:
94 | run(config)
95 | except ValidationError:
96 | pass
97 | else:
98 | raise TypeError("should raise an error if calc_type is unexpected")
99 |
100 | config = {
101 | # fmt: off
102 | "video": {
103 | "path": VIDEO_PATH,
104 | },
105 | "output": ".",
106 | "calc": {
107 | "output": f"{_get_random_str()}.json",
108 | "ignore_error": True,
109 | "operators": [
110 | {
111 | "name": "error_test",
112 | "calc_type": "between",
113 | "args": {
114 | "from_stage": "0",
115 | # unexpected stage
116 | "to_stage": "999",
117 | }
118 | },
119 | ]
120 | }
121 | }
122 | run(config)
123 |
124 |
125 | def test_run_calc():
126 | config = {
127 | # fmt: off
128 | "video": {
129 | "path": VIDEO_PATH,
130 | },
131 | "output": ".",
132 | "calc": {
133 | "output": f"{_get_random_str()}.json",
134 | "operators": [
135 | {
136 | "name": "calc_between_0_1",
137 | "calc_type": "between",
138 | "args": {
139 | "from_stage": "0",
140 | "to_stage": "1",
141 | },
142 | },
143 | {
144 | "name": "display everything",
145 | "calc_type": "display",
146 | }
147 | ]
148 | }
149 | }
150 | run(config)
151 |
152 |
153 | def test_diff():
154 | from stagesepx.api import _diff
155 |
156 | diff_object = _diff(VIDEO_PATH, VIDEO_PATH)
157 | assert diff_object
158 | assert not diff_object.any_stage_lost()
159 | assert diff_object.stage_diff()
160 |
--------------------------------------------------------------------------------
/test/test_classifier.py:
--------------------------------------------------------------------------------
1 | from stagesepx.classifier import SSIMClassifier, SVMClassifier
2 | from stagesepx.classifier.keras import KerasClassifier
3 | from stagesepx.classifier.base import ClassifierResult
4 | from stagesepx.reporter import Reporter
5 | from stagesepx.cutter import VideoCutResult
6 | from stagesepx import toolbox
7 | import numpy as np
8 |
9 | from test_cutter import test_default as cutter_default
10 | from test_cutter import RESULT_DIR as CUTTER_RESULT_DIR
11 |
12 | import os
13 |
14 | PROJECT_PATH = os.path.dirname(os.path.dirname(__file__))
15 | VIDEO_PATH = os.path.join(PROJECT_PATH, "demo.mp4")
16 | MODEL_PATH = os.path.join(PROJECT_PATH, "model.pkl")
17 | IMAGE_NAME = "demo.jpg"
18 | IMAGE_PATH = os.path.join(PROJECT_PATH, IMAGE_NAME)
19 |
20 | # cut, and get result dir
21 | cutter_res: VideoCutResult = cutter_default()
22 |
23 |
24 | def _draw_report(res):
25 | r = Reporter()
26 | report_path = os.path.join(CUTTER_RESULT_DIR, "report.html")
27 | r.draw(res, report_path=report_path)
28 | assert os.path.isfile(report_path)
29 |
30 |
31 | def test_default():
32 | # --- classify ---
33 | cl = SVMClassifier()
34 | cl.load(CUTTER_RESULT_DIR)
35 | cl.train()
36 | cl.save_model(MODEL_PATH, overwrite=True)
37 | cl.classify(VIDEO_PATH, boost_mode=False)
38 |
39 |
40 | def test_ssim_classifier():
41 | cl = SSIMClassifier()
42 | cl.load(CUTTER_RESULT_DIR)
43 | cl.classify(VIDEO_PATH, boost_mode=False)
44 |
45 |
46 | def test_work_with_cutter():
47 | cl = SVMClassifier()
48 | cl.load_model(MODEL_PATH)
49 | stable, _ = cutter_res.get_range()
50 | classify_result = cl.classify(VIDEO_PATH, stable)
51 |
52 | # --- draw ---
53 | _draw_report(classify_result)
54 |
55 |
56 | def test_save_and_load():
57 | cl = SVMClassifier()
58 | cl.load_model(MODEL_PATH)
59 | classify_result = cl.classify(VIDEO_PATH, boost_mode=False)
60 |
61 | result_file = "save.json"
62 | reporter = Reporter()
63 | reporter.add_extra("some_name", "some_value")
64 | reporter.save(result_file, classify_result)
65 | assert os.path.isfile(result_file)
66 | classify_result_after = Reporter.load(result_file)
67 |
68 | assert classify_result.get_length() == classify_result_after.get_length()
69 | for i, j in zip(classify_result.data, classify_result_after.data):
70 | assert i.to_dict() == j.to_dict()
71 |
72 | assert isinstance(reporter.get_stable_stage_sample(classify_result), np.ndarray)
73 |
74 |
75 | def test_keep_data():
76 | cl = SVMClassifier()
77 | cl.load_model(MODEL_PATH)
78 | stable, _ = cutter_res.get_range()
79 | classify_result = cl.classify(VIDEO_PATH, stable, keep_data=True)
80 |
81 | # todo findit bug here
82 | image_object = toolbox.imread(IMAGE_PATH)[0:20, 0:20]
83 | assert classify_result.data[0].contain_image(image_object=image_object)
84 |
85 |
86 | def test_result():
87 | cl = SVMClassifier()
88 | cl.load_model(MODEL_PATH)
89 | stable, _ = cutter_res.get_range()
90 | classify_result = cl.classify(VIDEO_PATH, stable, keep_data=True)
91 |
92 | assert classify_result.to_dict()
93 | classify_result.mark_range(1, 3, "0")
94 | classify_result.mark_range_unstable(1, 3)
95 | classify_result.get_important_frame_list()
96 | classify_result.get_stage_range()
97 | classify_result.get_specific_stage_range("0")
98 | classify_result.get_not_stable_stage_range()
99 | classify_result.mark_range_ignore(23, 24)
100 | classify_result.time_cost_between("0", "1")
101 | assert classify_result.contain("1")
102 | assert classify_result.first("1").frame_id == 20
103 | assert classify_result.last("1").frame_id == 21
104 | assert classify_result.is_order_correct(["0", "0", "1", "2"])
105 | assert classify_result.is_order_correct(["0", "0", "2"])
106 | assert classify_result.is_order_correct(["0", "1"])
107 | assert classify_result.is_order_correct(["0", "2"])
108 | assert classify_result.is_order_correct(["1", "2"])
109 |
110 |
111 | def test_dump_and_load():
112 | cl = SVMClassifier()
113 | cl.load_model(MODEL_PATH)
114 | classify_result = cl.classify(VIDEO_PATH, boost_mode=False)
115 |
116 | json_path = "classify_result.json"
117 | classify_result.dump(json_path)
118 |
119 | res_from_file = ClassifierResult.load(json_path)
120 | assert classify_result.dumps() == res_from_file.dumps()
121 |
122 | # test diff
123 | assert classify_result.diff(res_from_file).ok()
124 |
125 | diffobj = classify_result.diff(res_from_file)
126 | diffobj.get_diff_str()
127 |
128 |
129 | def test_keras():
130 | # set epochs to 1 for quickly training (test only)
131 | cl = KerasClassifier(epochs=1)
132 | cl.train(CUTTER_RESULT_DIR)
133 | cl.save_model("haha.weights.h5")
134 | # recreate
135 | cl = KerasClassifier()
136 | cl.load_model("haha.weights.h5")
137 | stable, _ = cutter_res.get_range()
138 | classify_result = cl.classify(VIDEO_PATH, stable, keep_data=True)
139 | assert classify_result.to_dict()
140 | # not stable in case
141 | assert cl.predict(IMAGE_PATH) in ("0", "1", "2")
142 |
--------------------------------------------------------------------------------
/test/test_cli.py:
--------------------------------------------------------------------------------
1 | from loguru import logger
2 | import subprocess
3 | import os
4 |
5 | PROJECT_PATH = os.path.dirname(os.path.dirname(__file__))
6 | VIDEO_PATH = os.path.join(PROJECT_PATH, "demo.mp4")
7 |
8 | from test_cutter import test_default as cutter_default
9 | from test_cutter import RESULT_DIR as CUTTER_RESULT_DIR
10 |
11 | # prepare
12 | cutter_default()
13 |
14 |
15 | def test_cli():
16 | logger.info("checking main")
17 | subprocess.check_call(["python3", "-m", "stagesepx.cli"])
18 |
19 |
20 | def test_analyse():
21 | output = "output.html"
22 | subprocess.check_call(["stagesepx", "analyse", VIDEO_PATH, output])
23 | os.remove(output)
24 |
25 |
26 | def test_train():
27 | mod = "output.weights.h5"
28 | subprocess.check_call(
29 | ["stagesepx", "train", CUTTER_RESULT_DIR, mod, "--epochs", "1"]
30 | )
31 | # predict
32 | subprocess.check_call(["stagesepx", "run", "test/run_config_with_model.json"])
33 |
34 |
35 | def test_with_min_config():
36 | subprocess.check_call(["stagesepx", "run", "test/min_run_config.json"])
37 |
38 |
39 | def test_with_fat_config():
40 | subprocess.check_call(["stagesepx", "run", "test/run_config.json"])
41 |
--------------------------------------------------------------------------------
/test/test_cutter.py:
--------------------------------------------------------------------------------
1 | from stagesepx.cutter import VideoCutter, VideoCutResult
2 | from stagesepx.cutter.cut_result import VideoCutResultDiff
3 | from stagesepx.video import VideoObject
4 | import os
5 | import shutil
6 | import numpy as np
7 |
8 | PROJECT_PATH = os.path.dirname(os.path.dirname(__file__))
9 | VIDEO_PATH = os.path.join(PROJECT_PATH, "demo.mp4")
10 | RESULT_DIR = os.path.join(PROJECT_PATH, "cut_result")
11 | IMAGE_NAME = "demo.jpg"
12 | IMAGE_PATH = os.path.join(PROJECT_PATH, IMAGE_NAME)
13 | assert os.path.isfile(IMAGE_PATH), f"{IMAGE_NAME} not existed!"
14 |
15 |
16 | def test_default():
17 | cutter = VideoCutter()
18 | res = cutter.cut(VIDEO_PATH)
19 | stable, unstable = res.get_range()
20 | assert len(stable) == 3, "count of stable range is not correct"
21 |
22 | if os.path.exists(RESULT_DIR):
23 | shutil.rmtree(RESULT_DIR)
24 |
25 | data_home = res.pick_and_save(stable, 5, to_dir=RESULT_DIR)
26 | assert data_home == RESULT_DIR
27 | assert os.path.isdir(data_home), "result dir not existed"
28 |
29 | # run again to test covering result
30 | data_home = res.pick_and_save(stable, 5, to_dir=RESULT_DIR)
31 | assert data_home == RESULT_DIR
32 | assert os.path.isdir(data_home), "result dir not existed"
33 |
34 | sub_dir_0 = os.path.join(data_home, "0")
35 | assert len(os.listdir(sub_dir_0)) == 10
36 | return res
37 |
38 |
39 | def test_limit():
40 | cutter = VideoCutter()
41 | res = cutter.cut(VIDEO_PATH)
42 | stable, unstable = res.get_range(limit=3)
43 | # when limit=3, final stage should be ignored.
44 | assert len(stable) == 1, "count of stable range is not correct"
45 |
46 |
47 | def test_step():
48 | cutter = VideoCutter(step=2)
49 | res = cutter.cut(VIDEO_PATH)
50 | stable, unstable = res.get_range()
51 | # when limit=3, final stage should be ignored.
52 | assert len(stable) == 2, "count of stable range is not correct"
53 |
54 |
55 | def test_dump_and_load():
56 | cutter = VideoCutter()
57 | res = cutter.cut(VIDEO_PATH)
58 | json_path = "cutter_result.json"
59 | res.dump(json_path)
60 |
61 | res_from_file = VideoCutResult.load(json_path)
62 | assert res.dumps() == res_from_file.dumps()
63 |
64 |
65 | def test_prune():
66 | cutter = VideoCutter()
67 | res = cutter.cut(VIDEO_PATH)
68 | stable, unstable = res.get_range()
69 | assert len(stable) == 3, "count of stable range is not correct"
70 |
71 | data_home = res.pick_and_save(stable, 5, prune=0.99)
72 | assert os.path.isdir(data_home), "result dir not existed"
73 |
74 | # meaningful name
75 | data_home = res.pick_and_save(stable, 3, meaningful_name=True)
76 | assert data_home
77 |
78 |
79 | def test_cut_range():
80 | cutter = VideoCutter()
81 | res = cutter.cut(VIDEO_PATH)
82 | stable, _ = res.get_range()
83 | stable[0].contain_image(IMAGE_PATH)
84 | stable[0].is_loop(0.95)
85 |
86 |
87 | def test_cut_result():
88 | cutter = VideoCutter()
89 | v = VideoObject(VIDEO_PATH)
90 | res = cutter.cut(v)
91 | stable, _ = res.get_range()
92 | assert len(stable) == len(res.get_stable_range())
93 | assert isinstance(res.diff(res, auto_merge=True), VideoCutResultDiff)
94 | assert isinstance(res.thumbnail(stable[0]), np.ndarray)
95 | assert isinstance(res.thumbnail(stable[0], is_vertical=True), np.ndarray)
96 | assert isinstance(res.thumbnail(stable[0], to_dir="somewhere"), np.ndarray)
97 |
98 | res.get_range_dynamic([4, 5], threshold=0.95)
99 | res.get_range_dynamic([1, 2], threshold=0.85)
100 |
101 |
102 | def test_window():
103 | cutter = VideoCutter()
104 | v = VideoObject(VIDEO_PATH)
105 | res = cutter.cut(v, window_size=2, window_coefficient=2)
106 | assert res
107 |
--------------------------------------------------------------------------------
/test/test_diff.py:
--------------------------------------------------------------------------------
1 | from stagesepx.cutter import VideoCutter
2 | import os
3 | import pprint
4 |
5 | from stagesepx.cutter.cut_result import VideoCutResultDiff
6 | from stagesepx.hook import CropHook
7 |
8 | PROJECT_PATH = os.path.dirname(os.path.dirname(__file__))
9 |
10 | # use same video?
11 | VIDEO_PATH = os.path.join(PROJECT_PATH, "demo.mp4")
12 | ANOTHER_VIDEO_PATH = os.path.join(PROJECT_PATH, "demo.mp4")
13 |
14 |
15 | def test_diff():
16 | cutter = VideoCutter()
17 | res = cutter.cut(VIDEO_PATH)
18 | res1 = cutter.cut(ANOTHER_VIDEO_PATH)
19 |
20 | for each in (res, res1):
21 | stable, _ = each.get_range()
22 | res.pick_and_save(stable, 3)
23 |
24 | diff: VideoCutResultDiff = res.diff(res1, frame_count=5)
25 | pprint.pprint(diff.data)
26 | assert diff.data
27 | assert not diff.any_stage_lost()
28 |
29 |
30 | def test_diff_with_hook():
31 | cutter = VideoCutter()
32 | res = cutter.cut(VIDEO_PATH)
33 | res1 = cutter.cut(ANOTHER_VIDEO_PATH)
34 |
35 | for each in (res, res1):
36 | stable, _ = each.get_range()
37 | res.pick_and_save(stable, 3)
38 |
39 | hook = [CropHook(size=(0.5, 0.5))]
40 |
41 | diff = res.diff(res1, pre_hooks=hook, frame_count=5)
42 | pprint.pprint(diff.data)
43 | assert diff.data
44 | assert not diff.any_stage_lost()
45 |
--------------------------------------------------------------------------------
/test/test_hook.py:
--------------------------------------------------------------------------------
1 | from stagesepx.cutter import VideoCutter
2 | from stagesepx.classifier import SVMClassifier
3 | from stagesepx.reporter import Reporter
4 | from stagesepx.hook import (
5 | ExampleHook,
6 | IgnoreHook,
7 | CropHook,
8 | FrameSaveHook,
9 | RefineHook,
10 | InterestPointHook,
11 | TemplateCompareHook,
12 | InvalidFrameDetectHook,
13 | _AreaBaseHook,
14 | )
15 |
16 | import os
17 |
18 | PROJECT_PATH = os.path.dirname(os.path.dirname(__file__))
19 | VIDEO_PATH = os.path.join(PROJECT_PATH, "demo.mp4")
20 | IMAGE_NAME = "demo.jpg"
21 | IMAGE_PATH = os.path.join(PROJECT_PATH, IMAGE_NAME)
22 | assert os.path.isfile(IMAGE_PATH)
23 |
24 |
25 | def test_others():
26 | assert _AreaBaseHook.convert(200, 200, 100, 100) == (100, 100)
27 | try:
28 | InvalidFrameDetectHook()
29 | except DeprecationWarning:
30 | pass
31 |
32 |
33 | def test_hook():
34 | # init hook
35 | hook = ExampleHook()
36 | hook1 = ExampleHook()
37 | hook2 = IgnoreHook(size=(0.5, 0.5))
38 | frame_home = os.path.join(PROJECT_PATH, "frame_save_dir")
39 | hook3 = FrameSaveHook(frame_home)
40 | hook4 = CropHook(size=(0.5, 0.5), offset=(0.0, 0.5))
41 | hook5 = RefineHook()
42 | hook6 = InterestPointHook()
43 | hook7 = TemplateCompareHook({"amazon": IMAGE_PATH})
44 |
45 | # --- cutter ---
46 | cutter = VideoCutter(compress_rate=0.9)
47 | # add hook
48 | cutter.add_hook(hook)
49 | cutter.add_hook(hook1)
50 | cutter.add_hook(hook2)
51 | cutter.add_hook(hook3)
52 | cutter.add_hook(hook4)
53 | cutter.add_hook(hook5)
54 | cutter.add_hook(hook6)
55 | cutter.add_hook(hook7)
56 |
57 | res = cutter.cut(VIDEO_PATH)
58 | stable, unstable = res.get_range()
59 | assert len(stable) == 2, "count of stable range is not correct"
60 |
61 | data_home = res.pick_and_save(stable, 5)
62 | assert os.path.isdir(data_home), "result dir not existed"
63 |
64 | # --- classify ---
65 | cl = SVMClassifier()
66 | cl.load(data_home)
67 | cl.train()
68 | classify_result = cl.classify(VIDEO_PATH, stable)
69 |
70 | # --- draw ---
71 | r = Reporter()
72 | report_path = os.path.join(data_home, "report.html")
73 | r.draw(classify_result, report_path=report_path, cut_result=res)
74 | assert os.path.isfile(report_path)
75 |
76 | # hook check
77 | assert os.path.isdir(frame_home)
78 | assert hook6.result
79 | assert hook7.result
80 |
--------------------------------------------------------------------------------
/test/test_toolbox.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from stagesepx import toolbox
4 |
5 | PROJECT_PATH = os.path.dirname(os.path.dirname(__file__))
6 | IMAGE_NAME = "demo.jpg"
7 | IMAGE_PATH = os.path.join(PROJECT_PATH, IMAGE_NAME)
8 | VIDEO_NAME = "demo.mp4"
9 | VIDEO_PATH = os.path.join(PROJECT_PATH, VIDEO_NAME)
10 | assert os.path.isfile(IMAGE_PATH), f"{IMAGE_NAME} not existed!"
11 | assert os.path.isfile(VIDEO_PATH), f"{VIDEO_PATH} not existed!"
12 |
13 |
14 | def test_turn_blur():
15 | image = toolbox.imread(IMAGE_PATH)
16 | grey = toolbox.turn_grey(image)
17 | toolbox.turn_blur(grey)
18 |
19 |
20 | def test_turn_grey():
21 | image = toolbox.imread(IMAGE_PATH)
22 | toolbox.turn_grey(image)
23 |
24 |
25 | def test_turn_binary():
26 | image = toolbox.imread(IMAGE_PATH)
27 | toolbox.turn_binary(image)
28 |
29 |
30 | def test_turn_lbp_desc():
31 | image = toolbox.imread(IMAGE_PATH)
32 | toolbox.turn_lbp_desc(image)
33 |
34 |
35 | def test_get_frame():
36 | with toolbox.video_capture(VIDEO_PATH) as cap:
37 | first = 5
38 | second = 8
39 |
40 | toolbox.video_jump(cap, first)
41 | actual = toolbox.get_frame_time(cap, first)
42 | should = toolbox.get_current_frame_time(cap)
43 |
44 | # should be frame 5
45 | assert actual == should
46 | assert actual - 0.16 < 0.01
47 |
48 | # 5 -> 8 -> 5
49 | frame = toolbox.get_frame(cap, second, True)
50 | assert frame is not None
51 | # grab, and recover
52 | # the next frame will be 5
53 | # the current frame is 4
54 | assert toolbox.get_current_frame_id(cap) == first - 1
55 |
56 | # 5 -> 8
57 | frame = toolbox.get_frame(cap, second)
58 | assert frame is not None
59 | assert toolbox.get_current_frame_id(cap) == second
60 |
61 | #
62 | cur_time = toolbox.get_current_frame_time(cap)
63 | toolbox.get_frame_time(cap, second, True)
64 | assert toolbox.get_current_frame_time(cap) == cur_time
65 |
66 |
67 | def test_compress():
68 | image = toolbox.imread(IMAGE_PATH)
69 | frame = toolbox.compress_frame(image, target_size=(100, 100))
70 | assert frame.shape == (100, 100)
71 |
72 |
73 | def test_convert_video():
74 | target_fps: int = 30
75 | ret = toolbox.fps_convert(
76 | target_fps, VIDEO_PATH, os.path.join(PROJECT_PATH, f"{target_fps}.mp4")
77 | )
78 | assert not ret
79 |
80 |
81 | def test_match_template():
82 | image1 = toolbox.imread(IMAGE_PATH)
83 | image2 = toolbox.imread(IMAGE_PATH)
84 | ret = toolbox.match_template_with_object(image1, image2)
85 | assert ret["ok"]
86 |
87 | ret = toolbox.match_template_with_path(IMAGE_PATH, image2)
88 | assert ret["ok"]
89 |
--------------------------------------------------------------------------------
/test/test_video.py:
--------------------------------------------------------------------------------
1 | from stagesepx.video import VideoObject, VideoFrame
2 | from stagesepx.hook import ExampleHook
3 | import os
4 | import pathlib
5 |
6 |
7 | PROJECT_PATH = os.path.dirname(os.path.dirname(__file__))
8 | VIDEO_PATH = os.path.join(PROJECT_PATH, "demo.mp4")
9 | VIDEO_PATHLIB_PATH = pathlib.Path(VIDEO_PATH)
10 | IMAGE_NAME = "demo.jpg"
11 | IMAGE_PATH = os.path.join(PROJECT_PATH, IMAGE_NAME)
12 |
13 |
14 | def test_read_from_file():
15 | v = VideoObject(VIDEO_PATH)
16 | count = 0
17 | for f in v:
18 | assert isinstance(f, VideoFrame)
19 | count += 1
20 | assert count == 30
21 |
22 |
23 | def test_pathlib_path():
24 | v = VideoObject(VIDEO_PATHLIB_PATH)
25 | count = 0
26 | for f in v:
27 | assert isinstance(f, VideoFrame)
28 | count += 1
29 | assert count == 30
30 |
31 |
32 | def test_read_from_mem():
33 | v = VideoObject(VIDEO_PATH)
34 | print(str(v))
35 | v.load_frames()
36 | count = 0
37 | for f in v:
38 | assert isinstance(f, VideoFrame)
39 | print(str(f))
40 | count += 1
41 | assert count == 30
42 |
43 | v.clean_frames()
44 | assert not v.data
45 |
46 |
47 | def test_convert_first():
48 | v = VideoObject(VIDEO_PATH, fps=30)
49 | v.load_frames()
50 | assert len(v.data) == 36
51 |
52 |
53 | def test_contain_image():
54 | v = VideoObject(VIDEO_PATH)
55 | v.load_frames()
56 | ret = v.data[0].contain_image(image_path=IMAGE_PATH)
57 | assert ret["ok"]
58 |
59 |
60 | def test_preload_with_hook():
61 | v = VideoObject(VIDEO_PATH)
62 | hook = ExampleHook()
63 | v.add_preload_hook(hook)
64 | v.load_frames()
65 |
66 |
67 | def test_sync_timestamp():
68 | v = VideoObject(VIDEO_PATH)
69 | v.load_frames()
70 | v.sync_timestamp()
71 |
--------------------------------------------------------------------------------