├── .gitattributes
├── .github
└── workflows
│ ├── push-latest.yml
│ └── release.yml
├── .gitignore
├── CHANGELOG.md
├── LICENSE.txt
├── README.md
├── app.py
├── docker
├── Dockerfile
└── s6
│ └── s6-rc.d
│ ├── ikaros
│ ├── dependencies.d
│ │ └── init-adduser
│ ├── finish
│ ├── run
│ └── type
│ ├── init-adduser
│ ├── run
│ ├── type
│ └── up
│ └── user
│ └── contents.d
│ └── ikaros
├── docs
├── imgs
│ ├── emby1.jpg
│ ├── emby2.jpg
│ ├── javmodify.png
│ ├── javview.png
│ ├── path.png
│ ├── transfermodify.png
│ └── transferview.png
└── intro.md
├── migrations
├── README
├── alembic.ini
├── env.py
├── script.py.mako
└── versions
│ ├── 01f93f4b9988_update.py
│ ├── 13c0a3a534a0_update_transferconfig.py
│ ├── 16525dfee3f9_update_scraping_config.py
│ ├── 17b29e2630f4_add_deadtime.py
│ ├── 1eac15402cf5_add_task_number.py
│ ├── 206e24daedc4_add_link_type.py
│ ├── 25cc4c9d1c33_add_fix_series_tag.py
│ ├── 39aa630232f1_update_task_cid.py
│ ├── 43e878537ab5_add_extrafanart.py
│ ├── 484e6db26cb9_update_records.py
│ ├── 55d303fd558e_add_loglvl.py
│ ├── 5c6e23a62577_add_specifiedurl.py
│ ├── 6749dee88757_add_srcfolder.py
│ ├── 67cbdd6413c3_update_series_record.py
│ ├── 76a849adb003_update_records.py
│ ├── 825048469450_update_scraping_tags.py
│ ├── 98894b006faa_add_transmission.py
│ ├── 98e01c6ecbdc_add_auto_watch.py
│ ├── 9b27f48ac200_rename_table.py
│ ├── a76b476b9525_update_morestoryline.py
│ ├── a9c9e7063598_update_refresh_url.py
│ ├── ab98111cb095_rename_config_table.py
│ ├── ae24eb9602af_update_multithread.py
│ ├── afd66a240ba0_add_is_sym_relative_path.py
│ ├── b7f1f83c525e_movie_forced_name.py
│ ├── c6d3cfb805e7_update_config.py
│ ├── d029a987b48c_add_minisize.py
│ ├── de30b0f0cf2e_add_cookies.py
│ ├── e76363a21cbe_update_site_sources.py
│ ├── ef9c9dbb8a8e_add_refresh_url_for_transfer.py
│ ├── f43a0835f0b5_update_proxy.py
│ └── f6ca967bad99_add_scraping_cd_num.py
├── package-lock.json
├── package.json
├── requirements.txt
├── scripts
├── qbcomplete.sh
└── trcomplete.sh
├── src
├── __init__.py
├── bizlogic
│ ├── automation.py
│ ├── manager.py
│ ├── mediaserver.py
│ ├── rename.py
│ ├── schedulertask.py
│ ├── scraper.py
│ └── transfer.py
├── config.py
├── controller
│ ├── __init__.py
│ ├── automationctrl.py
│ ├── filescan_ctrl.py
│ ├── main_ctrl.py
│ ├── optionctrl.py
│ ├── scrapingctrl.py
│ ├── transferctrl.py
│ └── viewsctrl.py
├── downloader
│ ├── __init__.py
│ ├── qbittorrent.py
│ └── transmission.py
├── images
│ ├── CNSUB.png
│ ├── HACK.png
│ ├── LEAK.png
│ └── UNCENSORED.png
├── mappingtable
│ ├── mapping_actor.xml
│ └── mapping_info.xml
├── model
│ ├── __init__.py
│ ├── config.py
│ ├── record.py
│ └── task.py
├── notifications
│ ├── __init__.py
│ ├── telegram.py
│ └── wechat.py
├── service
│ ├── configservice.py
│ ├── recordservice.py
│ ├── schedulerservice.py
│ └── taskservice.py
├── utils
│ ├── filehelper.py
│ ├── number_parser.py
│ └── regex.py
└── wscontroller
│ ├── __init__.py
│ └── wsloger.py
└── web
├── static
└── README.md
└── templates
└── README.md
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.py text=auto eol=lf
2 |
--------------------------------------------------------------------------------
/.github/workflows/push-latest.yml:
--------------------------------------------------------------------------------
1 | name: Push Latest
2 | on:
3 | workflow_dispatch:
4 | inputs:
5 | publish_tag:
6 | description: 'Publish Tag'
7 | required: false
8 | default: 'latest'
9 | jobs:
10 | buildDocker:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Checkout
14 | uses: actions/checkout@v3
15 |
16 | - name: Set up QEMU
17 | uses: docker/setup-qemu-action@v2
18 |
19 | - name: Set up Docker Buildx
20 | uses: docker/setup-buildx-action@v2
21 |
22 | - name: Login to Docker Hub
23 | uses: docker/login-action@v2
24 | with:
25 | username: ${{ secrets.DOCKERHUB_USERNAME }}
26 | password: ${{ secrets.DOCKERHUB_TOKEN }}
27 |
28 | - name: Build and push
29 | uses: docker/build-push-action@v3
30 | with:
31 | context: .
32 | file: ./docker/Dockerfile
33 | platforms: linux/amd64,linux/arm64
34 | push: true
35 | tags: |
36 | suwmlee/ikaros:${{ github.event.inputs.publish_tag }}
37 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 | on:
3 | push:
4 | branches:
5 | - master
6 | jobs:
7 | buildDocker:
8 | runs-on: ubuntu-latest
9 | steps:
10 | - name: Checkout
11 | uses: actions/checkout@v3
12 |
13 | - name: Setup Node.js
14 | uses: actions/setup-node@v2
15 | with:
16 | node-version: '16'
17 | - name: Install dependencies
18 | run: npm install --only=production
19 |
20 | - name: Semantic Release
21 | id: semantic
22 | env:
23 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
24 | run: npx semantic-release
25 |
26 | - name: Set up QEMU
27 | if: ${{ env.VERSION_TAG != '' }}
28 | uses: docker/setup-qemu-action@v2
29 |
30 | - name: Set up Docker Buildx
31 | if: ${{ env.VERSION_TAG != '' }}
32 | uses: docker/setup-buildx-action@v2
33 |
34 | - name: Login to DockerHub
35 | if: ${{ env.VERSION_TAG != '' }}
36 | uses: docker/login-action@v2
37 | with:
38 | username: ${{ secrets.DOCKERHUB_USERNAME }}
39 | password: ${{ secrets.DOCKERHUB_TOKEN }}
40 |
41 | - name: Build and push
42 | if: ${{ env.VERSION_TAG != '' }}
43 | uses: docker/build-push-action@v3
44 | with:
45 | context: .
46 | file: ./docker/Dockerfile
47 | platforms: linux/amd64,linux/arm64
48 | push: true
49 | tags: suwmlee/ikaros:latest,suwmlee/ikaros:${{ env.VERSION_TAG }}
50 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | ### NodeJS
7 | node_modules
8 |
9 | # C extensions
10 | *.so
11 |
12 | # Distribution / packaging
13 | .Python
14 | build/
15 | develop-eggs/
16 | dist/
17 | downloads/
18 | eggs/
19 | .eggs/
20 | lib/
21 | lib64/
22 | parts/
23 | sdist/
24 | var/
25 | wheels/
26 | pip-wheel-metadata/
27 | share/python-wheels/
28 | *.egg-info/
29 | .installed.cfg
30 | *.egg
31 | MANIFEST
32 |
33 | # PyInstaller
34 | # Usually these files are written by a python script from a template
35 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
36 | *.manifest
37 | *.spec
38 |
39 | # Installer logs
40 | pip-log.txt
41 | pip-delete-this-directory.txt
42 |
43 | # Unit test / coverage reports
44 | htmlcov/
45 | .tox/
46 | .nox/
47 | .coverage
48 | .coverage.*
49 | .cache
50 | nosetests.xml
51 | coverage.xml
52 | *.cover
53 | .hypothesis/
54 | .pytest_cache/
55 |
56 | # Translations
57 | *.mo
58 | *.pot
59 |
60 | # Django stuff:
61 | *.log
62 | local_settings.py
63 | db.sqlite3
64 | *.db
65 |
66 | # Flask stuff:
67 | instance/
68 | .webassets-cache
69 |
70 | # Scrapy stuff:
71 | .scrapy
72 |
73 | # Sphinx documentation
74 | docs/_build/
75 |
76 | # PyBuilder
77 | target/
78 |
79 | # Jupyter Notebook
80 | .ipynb_checkpoints
81 |
82 | # IPython
83 | profile_default/
84 | ipython_config.py
85 |
86 | # vscode
87 | .vscode
88 | # pyenv
89 | .python-version
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # celery beat schedule file
98 | celerybeat-schedule
99 |
100 | # SageMath parsed files
101 | *.sage.py
102 |
103 | # Environments
104 | .env
105 | .venv
106 | env/
107 | venv/
108 | ENV/
109 | env.bak/
110 | venv.bak/
111 |
112 | # Spyder project settings
113 | .spyderproject
114 | .spyproject
115 |
116 | # Rope project settings
117 | .ropeproject
118 |
119 | # mkdocs documentation
120 | /site
121 |
122 | # mypy
123 | .mypy_cache/
124 | .dmypy.json
125 | dmypy.json
126 |
127 | # Pyre type checker
128 | .pyre/
129 |
130 | # Custom
131 | web/
132 | web.log.*
133 |
134 | # MacOS venv
135 | bin/
136 | include/
137 | pyvenv.cfg
138 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 suwmlee
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | # ikaros
3 |
4 | [](https://github.com/suwmlee/ikaros/actions) [](https://github.com/suwmlee/ikaros/releases) [](https://hub.docker.com/r/suwmlee/ikaros)
5 |
6 | 解决下载软件与媒体服务内诸多问题,安心享受影片
7 |
8 | 特性:
9 | - 批量软/硬链接
10 | - 批量修改文件名,优化剧集名及自定义
11 | - JAV刮削及自定义
12 | - 自动清理关联的软/硬链接及种子文件
13 | - 托管(忘记这款软件,安心看片)
14 |
15 | 关联`transmission`/`qBittorrent`与`emby`。
16 | 下载完成后,自动筛选文件创建软/硬链接,刮削JAV目录,推送emby库刷新,清理失效文件/种子。
17 | 只需要在网页内操作,不需要打开文件夹,不需要使用命令行
18 |
19 |
20 | __!!! ikaros不再进行大更新,新的特性和功能将在[Bonita](https://github.com/Suwmlee/bonita)中实现__
21 | __!!! ikaros不再进行大更新,新的特性和功能将在[Bonita](https://github.com/Suwmlee/bonita)中实现__
22 | __!!! ikaros不再进行大更新,新的特性和功能将在[Bonita](https://github.com/Suwmlee/bonita)中实现__
23 |
24 |
25 | ### 安装
26 |
27 | 本项目仅后端,需要搭配[ikaros-web](https://github.com/Suwmlee/ikaros-web)
28 | 可自行编译或使用编译好的文件
29 |
30 | - 使用编译好的[web release](https://github.com/Suwmlee/ikaros-web/tree/release)
31 | (机器已安装`python`与`pip`)
32 | 1. 将`index.html`放到`web/templates`
33 | 2. 将其他文件放到`web/static`
34 | 3. `pip install -r requirements.txt`
35 | 4. `python app.py`
36 |
37 | - 使用[docker](https://registry.hub.docker.com/r/suwmlee/ikaros)(推荐)
38 | ```sh
39 | docker run -d \
40 | --name=ikaros \
41 | -e PUID=0 \
42 | -e PGID=0 \
43 | -e TZ=Asia/Shanghai \
44 | -p 12346:12346 \
45 | -v /path/to/media:/media \
46 | -v /path/to/data:/app/data \
47 | --restart unless-stopped \
48 | suwmlee/ikaros:latest
49 | ```
50 | 默认 `PUID=0 PGID=0`,即使用root权限。可以用 __id__ 命令查找具体用户值:
51 | ```
52 | $ id abc
53 | uid=1000(abc) gid=1000(users) groups=1000(users)
54 | ```
55 |
56 | - 群晖docker
57 | 1. 设置存储空间映射
58 |
59 |
60 |
61 | __注:__
62 | - 默认Web访问端口: __12346__
63 | - 可以使用[watchtower](https://hub.docker.com/r/containrrr/watchtower)自动化更新Docker
64 |
65 | ### 默认WEB界面预览
66 |
67 |
68 | | 刮削 | 转移文件 |
69 | | :-----------------------------------: | :---------------------------------------------: |
70 | |  |  |
71 | |  |  |
72 |
73 | ### 文档
74 |
75 | [使用说明](docs/intro.md)
76 |
77 | ### TODO
78 |
79 | 1. 更新 webui
80 | 2. 保留删除记录
81 | 3. 自动检测文件夹,不需要关联下载器
82 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 |
2 | from src import create_app
3 |
4 | application = create_app()
5 |
6 | if __name__ == "__main__":
7 | application.run(host='127.0.0.1', port=12346, debug=True)
8 |
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.11-slim
2 | LABEL git="https://github.com/Suwmlee/ikaros"
3 |
4 | ENV FLASK_APP=app.py
5 | ENV FLASK_RUN_HOST=0.0.0.0
6 | ENV TZ=Asia/Shanghai
7 | ENV PUID=0
8 | ENV PGID=0
9 |
10 | EXPOSE 12346
11 |
12 | RUN apt-get update && \
13 | apt-get install -y wget ca-certificates procps xz-utils
14 |
15 | # update TZ
16 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && \
17 | echo $TZ > /etc/timezone
18 |
19 | # set version for s6 overlay
20 | ENV S6_KEEP_ENV=1
21 | ARG S6_OVERLAY_VERSION="3.1.2.1"
22 | ARG S6_OVERLAY_ARCH="x86_64"
23 |
24 | # add s6 overlay
25 | RUN wget https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz -P /tmp && \
26 | tar -C / -Jxpf /tmp/s6-overlay-noarch.tar.xz
27 | RUN wget https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${S6_OVERLAY_ARCH}.tar.xz -P /tmp && \
28 | tar -C / -Jxpf /tmp/s6-overlay-${S6_OVERLAY_ARCH}.tar.xz
29 |
30 | # add s6 optional symlinks
31 | RUN wget https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-noarch.tar.xz -P /tmp && \
32 | tar -C / -Jxpf /tmp/s6-overlay-symlinks-noarch.tar.xz
33 | RUN wget https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-arch.tar.xz -P /tmp && \
34 | tar -C / -Jxpf /tmp/s6-overlay-symlinks-arch.tar.xz
35 |
36 | # fetch ikaros web
37 | RUN wget https://github.com/Suwmlee/ikaros-web/archive/release.tar.gz -P /tmp && \
38 | tar -C /tmp -xzf /tmp/release.tar.gz
39 |
40 | COPY requirements.txt /tmp/requirements.txt
41 | RUN pip install --no-cache-dir -r /tmp/requirements.txt
42 |
43 | WORKDIR /app
44 | COPY . .
45 |
46 | # setup ikaros web
47 | RUN mv /tmp/ikaros-web-release/index.html /app/web/templates/ && \
48 | mv /tmp/ikaros-web-release/* /app/web/static/
49 |
50 | # clean
51 | RUN rm -rf /tmp/*
52 |
53 | RUN echo "**** create tomoki user and make folders ****" && \
54 | groupmod -g 1000 users && \
55 | useradd -u 911 -U -d /config -s /bin/false tomoki && \
56 | usermod -G users tomoki && \
57 | mkdir /config
58 |
59 | VOLUME /media
60 | VOLUME /app/data
61 |
62 | #Copy s6-overlay 3.x services
63 | #Uses a system-d like definition that can't be use in 2.x
64 | COPY docker/s6/s6-rc.d/ /etc/s6-overlay/s6-rc.d/
65 |
66 | ENTRYPOINT ["/init"]
67 |
--------------------------------------------------------------------------------
/docker/s6/s6-rc.d/ikaros/dependencies.d/init-adduser:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suwmlee/ikaros/e29997560bedf8ae70900ea4c1d4d66a09759883/docker/s6/s6-rc.d/ikaros/dependencies.d/init-adduser
--------------------------------------------------------------------------------
/docker/s6/s6-rc.d/ikaros/finish:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | sleep 5
3 |
--------------------------------------------------------------------------------
/docker/s6/s6-rc.d/ikaros/run:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | cd /app
3 | export HOME=/app
4 | exec s6-setuidgid tomoki flask run --port=12346
5 |
--------------------------------------------------------------------------------
/docker/s6/s6-rc.d/ikaros/type:
--------------------------------------------------------------------------------
1 | longrun
2 |
--------------------------------------------------------------------------------
/docker/s6/s6-rc.d/init-adduser/run:
--------------------------------------------------------------------------------
1 | #!/usr/bin/with-contenv bash
2 | # shellcheck shell=bash
3 |
4 | PUID=${PUID:-911}
5 | PGID=${PGID:-911}
6 |
7 | groupmod -o -g "$PGID" tomoki
8 | usermod -o -u "$PUID" tomoki
9 |
10 | echo "
11 | -------------------------------------
12 | .__ __
13 | |__| | _______ _______ ____ ______
14 | | | |/ /\__ \\_ __ \/ _ \/ ___/
15 | | | < / __ \| | \( <_> )___ \
16 | |__|__|_ \(____ /__| \____/____ >
17 | \/ \/ \/
18 | -------------------------------------
19 | Starting with
20 | User uid: $(id -u tomoki)
21 | User gid: $(id -g tomoki)
22 | -------------------------------------
23 | "
24 |
25 | chown tomoki:tomoki /config
26 | chown tomoki:tomoki /app
27 |
28 | chown -R tomoki /app/data
29 | chmod -R u+rwx /app/data
30 |
--------------------------------------------------------------------------------
/docker/s6/s6-rc.d/init-adduser/type:
--------------------------------------------------------------------------------
1 | oneshot
2 |
--------------------------------------------------------------------------------
/docker/s6/s6-rc.d/init-adduser/up:
--------------------------------------------------------------------------------
1 | sh /etc/s6-overlay/s6-rc.d/init-adduser/run
2 |
--------------------------------------------------------------------------------
/docker/s6/s6-rc.d/user/contents.d/ikaros:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suwmlee/ikaros/e29997560bedf8ae70900ea4c1d4d66a09759883/docker/s6/s6-rc.d/user/contents.d/ikaros
--------------------------------------------------------------------------------
/docs/imgs/emby1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suwmlee/ikaros/e29997560bedf8ae70900ea4c1d4d66a09759883/docs/imgs/emby1.jpg
--------------------------------------------------------------------------------
/docs/imgs/emby2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suwmlee/ikaros/e29997560bedf8ae70900ea4c1d4d66a09759883/docs/imgs/emby2.jpg
--------------------------------------------------------------------------------
/docs/imgs/javmodify.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suwmlee/ikaros/e29997560bedf8ae70900ea4c1d4d66a09759883/docs/imgs/javmodify.png
--------------------------------------------------------------------------------
/docs/imgs/javview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suwmlee/ikaros/e29997560bedf8ae70900ea4c1d4d66a09759883/docs/imgs/javview.png
--------------------------------------------------------------------------------
/docs/imgs/path.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suwmlee/ikaros/e29997560bedf8ae70900ea4c1d4d66a09759883/docs/imgs/path.png
--------------------------------------------------------------------------------
/docs/imgs/transfermodify.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suwmlee/ikaros/e29997560bedf8ae70900ea4c1d4d66a09759883/docs/imgs/transfermodify.png
--------------------------------------------------------------------------------
/docs/imgs/transferview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suwmlee/ikaros/e29997560bedf8ae70900ea4c1d4d66a09759883/docs/imgs/transferview.png
--------------------------------------------------------------------------------
/docs/intro.md:
--------------------------------------------------------------------------------
1 |
2 | ## 使用说明
3 |
4 | ### 配置
5 |
6 | ##### JAV刮削
7 |
8 | 刮削默认跳过带有忽略或已完成标记的影片。
9 | 需自定义刮削番号/中文标签时,需要将状态设置为`未完成`或直接点击刷新按钮
10 |
11 | ~~填写cookies: 复制浏览器内完整的cookies,直接拖动选择后复制,非右键`copy value`,不需要json格式~~
12 | (鉴于目前cookies并不能帮助获取额外的刮削内容,填写cookies的意义不大)
13 |
14 | ##### 转移
15 |
16 | 开启`修正剧集名`后,剧集文件夹最多两级目录结构。
17 | 第一级是`剧集名字`,第二级是`季`或者特典目录`Specials`。如果同一电视剧有多季的文件夹是分散开的,也不需要担心,emby会自动将多季归到一个剧集内展示。
18 | 遇到异常的视频,可参考tmdb内数据,在web页面内修改`季/集`编号或文件名,并再次进行转移操作。
19 |
20 | _请在web页面内进行自定义修改,这样ikaros会更新记录,再次转移会应用修改_
21 | _[媒体文件分类/命名参考](https://suwmlee.github.io/posts/2021/12/05/%E5%AA%92%E4%BD%93%E6%96%87%E4%BB%B6%E5%91%BD%E5%90%8D.html)_
22 | _特典SP剧集的季编号为 0_
23 | _在命名与网络正常的情况下,emby自带的刮削功能完全可以满足日常使用,即使出现个别问题,也可以手动修改_
24 | _仅转移视频文件,jpg/png/nfo等不会转移_
25 |
26 | ### 自动清理
27 |
28 | 自动清理任务会删除文件,如果设置了下载服务器,则会删除对应的种子文件。
29 |
30 | 可配合emby内删除影片功能,在emby内删除后,会自动检测删除的影片。查找关联的源文件,到期后自动删除。
31 | 默认7天的缓冲时间。
32 |
33 | __请慎重填写下载服务器__
34 |
35 | ### 获取刷新emby库地址
36 |
37 | __注:__ 如果Emby库内设置`实时监控`则不需要进行此设置,二者都有优点,选择适合自己的方案即可
38 |
39 | 在刮削和转移配置内填入`emby链接`即可在任务完成后刷新emby库
40 |
41 | 刷新emby需要查找刷新地址,[参考论坛回复](https://emby.media/community/index.php?/topic/50862-trigger-a-library-rescan-via-cmd-line/&do=findComment&comment=487929)
42 |
43 | 具体按图示操作:
44 |
45 | 图 1: 网页内刷新媒体库
46 |
47 |
48 | 图 2: 查找媒体刷新链接
49 |
50 |
51 | 找到刷新该媒体库的地址:
52 | ```
53 | http://192.168.1.233:8096/emby/Items/3227ce1e069754c594af25ea66d69fc7/Refresh?Recursive=true&ImageRefreshMode=Default&MetadataRefreshMode=Default&ReplaceAllImages=false&ReplaceAllMetadata=false&X-Emby-Client=Emby Web&X-Emby-Device-Name=Chrome Windows&X-Emby-Device-Id=123123123214123&X-Emby-Client-Version=4.7.0.19&X-Emby-Token=123123412312312
54 | ```
55 |
56 | 得到媒体库item的ID为:`3227ce1e069754c594af25ea66d69fc7`
57 |
58 | 在emby服务端`控制面板 - 高级 - API密钥` 获取 __api_key__ : `dd4b16934ab81cbxxxxxx`
59 |
60 | 替换找到的刷新链接`itemid`和`ReplaceAllMetadata=false`后半部分:
61 | ```
62 | http://192.168.1.233:8096/emby/Items/3227ce1e069754c594af25ea66d69fc7/Refresh?Recursive=true&ImageRefreshMode=Default&MetadataRefreshMode=Default&ReplaceAllImages=false&ReplaceAllMetadata=false&api_key=dd4b16934ab81cbxxxxxx
63 | ```
64 |
65 | 以上地址即填入的刷新emby链接
66 |
67 | ~~emby也提供了其他刷新方式,但实现会麻烦很多,有兴趣的可以提交PR~~
68 |
69 | ### 关联 transmission/qBittorrent
70 |
71 | - 配置 transmission/qBittorrent 下载完成脚本
72 | - 脚本在项目的`scripts`目录,或在web页面里查看并自行创建脚本
73 | - 在下载软件配置内指定脚本路径
74 | - 在`自动`选项卡里配置过滤目录
75 | - ~~后续可能修改策略,直接扫描下载目录,不需要配置这些~~
76 | __注:__
77 | - 默认请求 __127.0.0.1__ ,需根据实际情况更改
78 | - tr可参考[配置完成脚本](https://github.com/ronggang/transmission-web-control/wiki/About-script-torrent-done-filename)
79 | - 如果docker的挂载路径与qbit挂载路径不同名的话,需要用以下命令`a="%F"&& sh qbcomplete.sh ${a/bit挂载路径/ikros挂载路径}`,tr同理
80 |
81 | ### 其他
82 |
83 | - 软连接:相对映射路径,可以修改前缀,有些复杂。没有特殊需求,使用硬链接即可
84 | - 软链接只在有实际指向文件的环境内生效,适应范围单一
85 | - 软链接清理只需要删除源文件即可,硬链接需要删除源文件与硬连接后的文件
86 |
--------------------------------------------------------------------------------
/migrations/README:
--------------------------------------------------------------------------------
1 | Generic single-database configuration.
--------------------------------------------------------------------------------
/migrations/alembic.ini:
--------------------------------------------------------------------------------
1 | # A generic, single database configuration.
2 |
3 | [alembic]
4 | # template used to generate migration files
5 | # file_template = %%(rev)s_%%(slug)s
6 |
7 | # set to 'true' to run the environment during
8 | # the 'revision' command, regardless of autogenerate
9 | # revision_environment = false
10 |
11 |
12 | # Logging configuration
13 | [loggers]
14 | keys = root,sqlalchemy,alembic,flask_migrate
15 |
16 | [handlers]
17 | keys = console
18 |
19 | [formatters]
20 | keys = generic
21 |
22 | [logger_root]
23 | level = WARN
24 | handlers = console
25 | qualname =
26 |
27 | [logger_sqlalchemy]
28 | level = WARN
29 | handlers =
30 | qualname = sqlalchemy.engine
31 |
32 | [logger_alembic]
33 | level = INFO
34 | handlers =
35 | qualname = alembic
36 |
37 | [logger_flask_migrate]
38 | level = INFO
39 | handlers =
40 | qualname = flask_migrate
41 |
42 | [handler_console]
43 | class = StreamHandler
44 | args = (sys.stderr,)
45 | level = NOTSET
46 | formatter = generic
47 |
48 | [formatter_generic]
49 | format = %(levelname)-5.5s [%(name)s] %(message)s
50 | datefmt = %H:%M:%S
51 |
--------------------------------------------------------------------------------
/migrations/env.py:
--------------------------------------------------------------------------------
1 | from __future__ import with_statement
2 |
3 | import logging
4 | # from logging.config import fileConfig
5 |
6 | from flask import current_app
7 |
8 | from alembic import context
9 |
10 | # this is the Alembic Config object, which provides
11 | # access to the values within the .ini file in use.
12 | config = context.config
13 |
14 | # Interpret the config file for Python logging.
15 | # This line sets up loggers basically.
16 | # fileConfig(config.config_file_name)
17 | logger = logging.getLogger('alembic.env')
18 |
19 | # add your model's MetaData object here
20 | # for 'autogenerate' support
21 | # from myapp import mymodel
22 | # target_metadata = mymodel.Base.metadata
23 | config.set_main_option(
24 | 'sqlalchemy.url',
25 | str(current_app.extensions['migrate'].db.get_engine().url).replace(
26 | '%', '%%'))
27 | target_metadata = current_app.extensions['migrate'].db.metadata
28 |
29 | # other values from the config, defined by the needs of env.py,
30 | # can be acquired:
31 | # my_important_option = config.get_main_option("my_important_option")
32 | # ... etc.
33 |
34 |
35 | def run_migrations_offline():
36 | """Run migrations in 'offline' mode.
37 |
38 | This configures the context with just a URL
39 | and not an Engine, though an Engine is acceptable
40 | here as well. By skipping the Engine creation
41 | we don't even need a DBAPI to be available.
42 |
43 | Calls to context.execute() here emit the given string to the
44 | script output.
45 |
46 | """
47 | url = config.get_main_option("sqlalchemy.url")
48 | context.configure(
49 | url=url, target_metadata=target_metadata, literal_binds=True
50 | )
51 |
52 | with context.begin_transaction():
53 | context.run_migrations()
54 |
55 |
56 | def run_migrations_online():
57 | """Run migrations in 'online' mode.
58 |
59 | In this scenario we need to create an Engine
60 | and associate a connection with the context.
61 |
62 | """
63 |
64 | # this callback is used to prevent an auto-migration from being generated
65 | # when there are no changes to the schema
66 | # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
67 | def process_revision_directives(context, revision, directives):
68 | if getattr(config.cmd_opts, 'autogenerate', False):
69 | script = directives[0]
70 | if script.upgrade_ops.is_empty():
71 | directives[:] = []
72 | logger.info('No changes in schema detected.')
73 |
74 | connectable = current_app.extensions['migrate'].db.get_engine()
75 |
76 | with connectable.connect() as connection:
77 | context.configure(
78 | connection=connection,
79 | target_metadata=target_metadata,
80 | process_revision_directives=process_revision_directives,
81 | **current_app.extensions['migrate'].configure_args
82 | )
83 |
84 | with context.begin_transaction():
85 | context.run_migrations()
86 |
87 |
88 | if context.is_offline_mode():
89 | run_migrations_offline()
90 | else:
91 | run_migrations_online()
92 |
--------------------------------------------------------------------------------
/migrations/script.py.mako:
--------------------------------------------------------------------------------
1 | """${message}
2 |
3 | Revision ID: ${up_revision}
4 | Revises: ${down_revision | comma,n}
5 | Create Date: ${create_date}
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 | ${imports if imports else ""}
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = ${repr(up_revision)}
14 | down_revision = ${repr(down_revision)}
15 | branch_labels = ${repr(branch_labels)}
16 | depends_on = ${repr(depends_on)}
17 |
18 |
19 | def upgrade():
20 | ${upgrades if upgrades else "pass"}
21 |
22 |
23 | def downgrade():
24 | ${downgrades if downgrades else "pass"}
25 |
--------------------------------------------------------------------------------
/migrations/versions/01f93f4b9988_update.py:
--------------------------------------------------------------------------------
1 | """update
2 |
3 | Revision ID: 01f93f4b9988
4 | Revises: 16525dfee3f9
5 | Create Date: 2022-04-13 21:00:33.685775
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '01f93f4b9988'
14 | down_revision = '16525dfee3f9'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('autoconfigs', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('scrapingconfs', sa.String(), nullable=True, comment='以;间隔'))
23 | batch_op.add_column(sa.Column('transferconfs', sa.String(), nullable=True, comment='以;间隔'))
24 | batch_op.add_column(sa.Column('remark', sa.String(), nullable=True))
25 | batch_op.drop_column('mark')
26 |
27 | with op.batch_alter_table('transferconfigs', schema=None) as batch_op:
28 | batch_op.add_column(sa.Column('remark', sa.String(), nullable=True))
29 | batch_op.drop_column('mark')
30 |
31 | # ### end Alembic commands ###
32 |
33 |
34 | def downgrade():
35 | # ### commands auto generated by Alembic - please adjust! ###
36 | with op.batch_alter_table('transferconfigs', schema=None) as batch_op:
37 | batch_op.add_column(sa.Column('mark', sa.VARCHAR(), nullable=True))
38 | batch_op.drop_column('remark')
39 |
40 | with op.batch_alter_table('autoconfigs', schema=None) as batch_op:
41 | batch_op.add_column(sa.Column('mark', sa.VARCHAR(), nullable=True))
42 | batch_op.drop_column('remark')
43 | batch_op.drop_column('transferconfs')
44 | batch_op.drop_column('scrapingconfs')
45 |
46 | # ### end Alembic commands ###
47 |
--------------------------------------------------------------------------------
/migrations/versions/13c0a3a534a0_update_transferconfig.py:
--------------------------------------------------------------------------------
1 | """Update transferConfig
2 |
3 | Revision ID: 13c0a3a534a0
4 | Revises: a9c9e7063598
5 | Create Date: 2021-09-18 14:15:01.405004
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '13c0a3a534a0'
14 | down_revision = 'a9c9e7063598'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('transferconfigs', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('clean_others', sa.Boolean(), nullable=True))
23 | batch_op.add_column(sa.Column('replace_CJK', sa.Boolean(), nullable=True))
24 |
25 | # ### end Alembic commands ###
26 |
27 |
28 | def downgrade():
29 | # ### commands auto generated by Alembic - please adjust! ###
30 | with op.batch_alter_table('transferconfigs', schema=None) as batch_op:
31 | batch_op.drop_column('replace_CJK')
32 | batch_op.drop_column('clean_others')
33 |
34 | # ### end Alembic commands ###
35 |
--------------------------------------------------------------------------------
/migrations/versions/16525dfee3f9_update_scraping_config.py:
--------------------------------------------------------------------------------
1 | """update scraping config
2 |
3 | Revision ID: 16525dfee3f9
4 | Revises: 39aa630232f1
5 | Create Date: 2022-04-13 19:33:19.450917
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '16525dfee3f9'
14 | down_revision = '39aa630232f1'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('settings', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('remark', sa.String(), nullable=True))
23 |
24 | # ### end Alembic commands ###
25 |
26 |
27 | def downgrade():
28 | # ### commands auto generated by Alembic - please adjust! ###
29 | with op.batch_alter_table('settings', schema=None) as batch_op:
30 | batch_op.drop_column('remark')
31 |
32 | # ### end Alembic commands ###
33 |
--------------------------------------------------------------------------------
/migrations/versions/17b29e2630f4_add_deadtime.py:
--------------------------------------------------------------------------------
1 | """add deadtime
2 |
3 | Revision ID: 17b29e2630f4
4 | Revises: a76b476b9525
5 | Create Date: 2022-06-15 17:15:15.952589
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '17b29e2630f4'
14 | down_revision = 'a76b476b9525'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('scrapingrecords', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('deadtime', sa.DateTime(), nullable=True, comment='time to delete files'))
23 |
24 | with op.batch_alter_table('transrecords', schema=None) as batch_op:
25 | batch_op.add_column(sa.Column('deadtime', sa.DateTime(), nullable=True, comment='time to delete files'))
26 |
27 | # ### end Alembic commands ###
28 |
29 |
30 | def downgrade():
31 | # ### commands auto generated by Alembic - please adjust! ###
32 | with op.batch_alter_table('transrecords', schema=None) as batch_op:
33 | batch_op.drop_column('deadtime')
34 |
35 | with op.batch_alter_table('scrapingrecords', schema=None) as batch_op:
36 | batch_op.drop_column('deadtime')
37 |
38 | # ### end Alembic commands ###
39 |
--------------------------------------------------------------------------------
/migrations/versions/1eac15402cf5_add_task_number.py:
--------------------------------------------------------------------------------
1 | """Add task number
2 |
3 | Revision ID: 1eac15402cf5
4 | Revises: de30b0f0cf2e
5 | Create Date: 2021-06-24 10:03:02.291131
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '1eac15402cf5'
14 | down_revision = 'de30b0f0cf2e'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | op.add_column('task', sa.Column('total', sa.Integer(), nullable=True))
22 | op.add_column('task', sa.Column('finished', sa.Integer(), nullable=True))
23 | # ### end Alembic commands ###
24 |
25 |
26 | def downgrade():
27 | # ### commands auto generated by Alembic - please adjust! ###
28 | op.drop_column('task', 'finished')
29 | op.drop_column('task', 'total')
30 | # ### end Alembic commands ###
31 |
--------------------------------------------------------------------------------
/migrations/versions/206e24daedc4_add_link_type.py:
--------------------------------------------------------------------------------
1 | """Add link type
2 |
3 | Revision ID: 206e24daedc4
4 | Revises: 1eac15402cf5
5 | Create Date: 2021-07-15 10:07:21.342292
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '206e24daedc4'
14 | down_revision = '1eac15402cf5'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | op.add_column('settings', sa.Column('link_type', sa.Integer(), nullable=True))
22 | # ### end Alembic commands ###
23 |
24 |
25 | def downgrade():
26 | # ### commands auto generated by Alembic - please adjust! ###
27 | op.drop_column('settings', 'link_type')
28 | # ### end Alembic commands ###
29 |
--------------------------------------------------------------------------------
/migrations/versions/25cc4c9d1c33_add_fix_series_tag.py:
--------------------------------------------------------------------------------
1 | """add fix series tag
2 |
3 | Revision ID: 25cc4c9d1c33
4 | Revises: 13c0a3a534a0
5 | Create Date: 2021-10-18 13:40:21.622995
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '25cc4c9d1c33'
14 | down_revision = '13c0a3a534a0'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('transferconfigs', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('fix_series', sa.Boolean(), nullable=True))
23 |
24 | # ### end Alembic commands ###
25 |
26 |
27 | def downgrade():
28 | # ### commands auto generated by Alembic - please adjust! ###
29 | with op.batch_alter_table('transferconfigs', schema=None) as batch_op:
30 | batch_op.drop_column('fix_series')
31 |
32 | # ### end Alembic commands ###
33 |
--------------------------------------------------------------------------------
/migrations/versions/39aa630232f1_update_task_cid.py:
--------------------------------------------------------------------------------
1 | """update task cid
2 |
3 | Revision ID: 39aa630232f1
4 | Revises: d029a987b48c
5 | Create Date: 2022-04-13 19:06:19.032203
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '39aa630232f1'
14 | down_revision = 'd029a987b48c'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('task', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('cid', sa.Integer(), nullable=True))
23 |
24 | # ### end Alembic commands ###
25 |
26 |
27 | def downgrade():
28 | # ### commands auto generated by Alembic - please adjust! ###
29 | with op.batch_alter_table('task', schema=None) as batch_op:
30 | batch_op.drop_column('cid')
31 |
32 | # ### end Alembic commands ###
33 |
--------------------------------------------------------------------------------
/migrations/versions/43e878537ab5_add_extrafanart.py:
--------------------------------------------------------------------------------
1 | """Add extrafanart
2 |
3 | Revision ID: 43e878537ab5
4 | Revises: 206e24daedc4
5 | Create Date: 2021-07-28 12:33:52.154813
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '43e878537ab5'
14 | down_revision = '206e24daedc4'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | op.add_column('settings', sa.Column('extrafanart_enable', sa.Boolean(), nullable=True))
22 | op.add_column('settings', sa.Column('extrafanart_folder', sa.String(), server_default='extrafanart', nullable=True))
23 | # ### end Alembic commands ###
24 |
25 |
26 | def downgrade():
27 | # ### commands auto generated by Alembic - please adjust! ###
28 | op.drop_column('settings', 'extrafanart_folder')
29 | op.drop_column('settings', 'extrafanart_enable')
30 | # ### end Alembic commands ###
31 |
--------------------------------------------------------------------------------
/migrations/versions/484e6db26cb9_update_records.py:
--------------------------------------------------------------------------------
1 | """update records
2 |
3 | Revision ID: 484e6db26cb9
4 | Revises: 5c6e23a62577
5 | Create Date: 2024-03-02 14:00:44.337457
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '484e6db26cb9'
14 | down_revision = '5c6e23a62577'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('scrapingconfigs', schema=None) as batch_op:
22 | batch_op.drop_column('proxy_timeout')
23 | batch_op.drop_column('proxy_retry')
24 | batch_op.drop_column('async_request')
25 | batch_op.drop_column('proxy_address')
26 | batch_op.drop_column('multi_scraping')
27 | batch_op.drop_column('proxy_type')
28 | batch_op.drop_column('proxy_enable')
29 |
30 | with op.batch_alter_table('scrapingrecords', schema=None) as batch_op:
31 | batch_op.add_column(sa.Column('ignored', sa.Integer(), nullable=True))
32 | batch_op.add_column(sa.Column('locked', sa.Boolean(), nullable=True))
33 | batch_op.add_column(sa.Column('deleted', sa.Boolean(), nullable=True))
34 |
35 | with op.batch_alter_table('transrecords', schema=None) as batch_op:
36 | batch_op.add_column(sa.Column('ignored', sa.Integer(), nullable=True))
37 | batch_op.add_column(sa.Column('locked', sa.Boolean(), nullable=True))
38 | batch_op.add_column(sa.Column('deleted', sa.Boolean(), nullable=True))
39 |
40 | # ### end Alembic commands ###
41 |
42 |
43 | def downgrade():
44 | # ### commands auto generated by Alembic - please adjust! ###
45 | with op.batch_alter_table('transrecords', schema=None) as batch_op:
46 | batch_op.drop_column('deleted')
47 | batch_op.drop_column('locked')
48 | batch_op.drop_column('ignored')
49 |
50 | with op.batch_alter_table('scrapingrecords', schema=None) as batch_op:
51 | batch_op.drop_column('deleted')
52 | batch_op.drop_column('locked')
53 | batch_op.drop_column('ignored')
54 |
55 | with op.batch_alter_table('scrapingconfigs', schema=None) as batch_op:
56 | batch_op.add_column(sa.Column('proxy_enable', sa.BOOLEAN(), nullable=True))
57 | batch_op.add_column(sa.Column('proxy_type', sa.VARCHAR(), nullable=True))
58 | batch_op.add_column(sa.Column('multi_scraping', sa.BOOLEAN(), nullable=True))
59 | batch_op.add_column(sa.Column('proxy_address', sa.VARCHAR(), nullable=True))
60 | batch_op.add_column(sa.Column('async_request', sa.BOOLEAN(), nullable=True))
61 | batch_op.add_column(sa.Column('proxy_retry', sa.INTEGER(), nullable=True))
62 | batch_op.add_column(sa.Column('proxy_timeout', sa.INTEGER(), nullable=True))
63 |
64 | # ### end Alembic commands ###
65 |
--------------------------------------------------------------------------------
/migrations/versions/55d303fd558e_add_loglvl.py:
--------------------------------------------------------------------------------
1 | """add loglvl
2 |
3 | Revision ID: 55d303fd558e
4 | Revises: 98894b006faa
5 | Create Date: 2022-06-22 22:38:25.023254
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '55d303fd558e'
14 | down_revision = '98894b006faa'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('localconfigs', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('loglevel', sa.Integer(), nullable=True))
23 |
24 | # ### end Alembic commands ###
25 |
26 |
27 | def downgrade():
28 | # ### commands auto generated by Alembic - please adjust! ###
29 | with op.batch_alter_table('localconfigs', schema=None) as batch_op:
30 | batch_op.drop_column('loglevel')
31 |
32 | # ### end Alembic commands ###
33 |
--------------------------------------------------------------------------------
/migrations/versions/5c6e23a62577_add_specifiedurl.py:
--------------------------------------------------------------------------------
1 | """add specifiedurl
2 |
3 | Revision ID: 5c6e23a62577
4 | Revises: 55d303fd558e
5 | Create Date: 2022-07-29 15:30:08.183426
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '5c6e23a62577'
14 | down_revision = '55d303fd558e'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('scrapingrecords', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('specifiedsource', sa.String(), nullable=True, comment='specified scraping site'))
23 | batch_op.add_column(sa.Column('specifiedurl', sa.String(), nullable=True, comment='specified scraping site url'))
24 | batch_op.drop_column('scrapingurl')
25 |
26 | # ### end Alembic commands ###
27 |
28 |
29 | def downgrade():
30 | # ### commands auto generated by Alembic - please adjust! ###
31 | with op.batch_alter_table('scrapingrecords', schema=None) as batch_op:
32 | batch_op.add_column(sa.Column('scrapingurl', sa.VARCHAR(), nullable=True))
33 | batch_op.drop_column('specifiedurl')
34 | batch_op.drop_column('specifiedsource')
35 |
36 | # ### end Alembic commands ###
37 |
--------------------------------------------------------------------------------
/migrations/versions/6749dee88757_add_srcfolder.py:
--------------------------------------------------------------------------------
1 | """Add srcfolder
2 |
3 | Revision ID: 6749dee88757
4 | Revises: 67cbdd6413c3
5 | Create Date: 2021-12-21 12:37:44.402694
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '6749dee88757'
14 | down_revision = '67cbdd6413c3'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('transrecords', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('srcfolder', sa.String(), nullable=True))
23 | batch_op.drop_column('success')
24 |
25 | # ### end Alembic commands ###
26 |
27 |
28 | def downgrade():
29 | # ### commands auto generated by Alembic - please adjust! ###
30 | with op.batch_alter_table('transrecords', schema=None) as batch_op:
31 | batch_op.add_column(sa.Column('success', sa.BOOLEAN(), nullable=True))
32 | batch_op.drop_column('srcfolder')
33 |
34 | # ### end Alembic commands ###
35 |
--------------------------------------------------------------------------------
/migrations/versions/67cbdd6413c3_update_series_record.py:
--------------------------------------------------------------------------------
1 | """update series record
2 |
3 | Revision ID: 67cbdd6413c3
4 | Revises: 25cc4c9d1c33
5 | Create Date: 2021-10-18 14:09:48.947312
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '67cbdd6413c3'
14 | down_revision = '25cc4c9d1c33'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('transrecords', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('status', sa.Integer(), nullable=True))
23 | batch_op.add_column(sa.Column('topfolder', sa.String(), nullable=True))
24 | batch_op.add_column(sa.Column('secondfolder', sa.String(), nullable=True))
25 | batch_op.add_column(sa.Column('isepisode', sa.Boolean(), nullable=True))
26 | batch_op.add_column(sa.Column('season', sa.Integer(), nullable=True))
27 | batch_op.add_column(sa.Column('episode', sa.Integer(), nullable=True))
28 |
29 | # ### end Alembic commands ###
30 |
31 |
32 | def downgrade():
33 | # ### commands auto generated by Alembic - please adjust! ###
34 | with op.batch_alter_table('transrecords', schema=None) as batch_op:
35 | batch_op.drop_column('episode')
36 | batch_op.drop_column('season')
37 | batch_op.drop_column('isepisode')
38 | batch_op.drop_column('secondfolder')
39 | batch_op.drop_column('topfolder')
40 | batch_op.drop_column('status')
41 |
42 | # ### end Alembic commands ###
43 |
--------------------------------------------------------------------------------
/migrations/versions/76a849adb003_update_records.py:
--------------------------------------------------------------------------------
1 | """update records
2 |
3 | Revision ID: 76a849adb003
4 | Revises: 484e6db26cb9
5 | Create Date: 2024-03-02 17:19:19.745250
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '76a849adb003'
14 | down_revision = '484e6db26cb9'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('scrapingrecords', schema=None) as batch_op:
22 | batch_op.alter_column('ignored',
23 | existing_type=sa.INTEGER(),
24 | type_=sa.Boolean(),
25 | existing_nullable=True)
26 |
27 | with op.batch_alter_table('transrecords', schema=None) as batch_op:
28 | batch_op.alter_column('ignored',
29 | existing_type=sa.INTEGER(),
30 | type_=sa.Boolean(),
31 | existing_nullable=True)
32 |
33 | # ### end Alembic commands ###
34 |
35 |
36 | def downgrade():
37 | # ### commands auto generated by Alembic - please adjust! ###
38 | with op.batch_alter_table('transrecords', schema=None) as batch_op:
39 | batch_op.alter_column('ignored',
40 | existing_type=sa.Boolean(),
41 | type_=sa.INTEGER(),
42 | existing_nullable=True)
43 |
44 | with op.batch_alter_table('scrapingrecords', schema=None) as batch_op:
45 | batch_op.alter_column('ignored',
46 | existing_type=sa.Boolean(),
47 | type_=sa.INTEGER(),
48 | existing_nullable=True)
49 |
50 | # ### end Alembic commands ###
51 |
--------------------------------------------------------------------------------
/migrations/versions/825048469450_update_scraping_tags.py:
--------------------------------------------------------------------------------
1 | """update scraping tags
2 |
3 | Revision ID: 825048469450
4 | Revises: e76363a21cbe
5 | Create Date: 2022-02-22 16:53:40.433388
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '825048469450'
14 | down_revision = 'e76363a21cbe'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('scrapingrecords', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('leaktag', sa.Boolean(), nullable=True, comment='leak tag'))
23 | batch_op.add_column(sa.Column('uncensoredtag', sa.Boolean(), nullable=True, comment='uncensored tag'))
24 | batch_op.add_column(sa.Column('hacktag', sa.Boolean(), nullable=True, comment='hack tag'))
25 |
26 | # ### end Alembic commands ###
27 |
28 |
29 | def downgrade():
30 | # ### commands auto generated by Alembic - please adjust! ###
31 | with op.batch_alter_table('scrapingrecords', schema=None) as batch_op:
32 | batch_op.drop_column('hacktag')
33 | batch_op.drop_column('uncensoredtag')
34 | batch_op.drop_column('leaktag')
35 |
36 | # ### end Alembic commands ###
37 |
--------------------------------------------------------------------------------
/migrations/versions/98894b006faa_add_transmission.py:
--------------------------------------------------------------------------------
1 | """add transmission
2 |
3 | Revision ID: 98894b006faa
4 | Revises: ab98111cb095
5 | Create Date: 2022-06-22 17:15:52.459637
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '98894b006faa'
14 | down_revision = 'ab98111cb095'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('localconfigs', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('tr_url', sa.String(), nullable=True))
23 | batch_op.add_column(sa.Column('tr_username', sa.String(), nullable=True))
24 | batch_op.add_column(sa.Column('tr_passwd', sa.String(), nullable=True))
25 | batch_op.add_column(sa.Column('tr_prefix', sa.String(), nullable=True))
26 |
27 | # ### end Alembic commands ###
28 |
29 |
30 | def downgrade():
31 | # ### commands auto generated by Alembic - please adjust! ###
32 | with op.batch_alter_table('localconfigs', schema=None) as batch_op:
33 | batch_op.drop_column('tr_prefix')
34 | batch_op.drop_column('tr_passwd')
35 | batch_op.drop_column('tr_username')
36 | batch_op.drop_column('tr_url')
37 |
38 | # ### end Alembic commands ###
39 |
--------------------------------------------------------------------------------
/migrations/versions/98e01c6ecbdc_add_auto_watch.py:
--------------------------------------------------------------------------------
1 | """add auto watch
2 |
3 | Revision ID: 98e01c6ecbdc
4 | Revises: b7f1f83c525e
5 | Create Date: 2024-05-09 22:52:16.179646
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '98e01c6ecbdc'
14 | down_revision = 'b7f1f83c525e'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('scrapingconfigs', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('auto_watch', sa.Boolean(), nullable=True))
23 |
24 | with op.batch_alter_table('transferconfigs', schema=None) as batch_op:
25 | batch_op.add_column(sa.Column('auto_watch', sa.Boolean(), nullable=True))
26 |
27 | # ### end Alembic commands ###
28 |
29 |
30 | def downgrade():
31 | # ### commands auto generated by Alembic - please adjust! ###
32 | with op.batch_alter_table('transferconfigs', schema=None) as batch_op:
33 | batch_op.drop_column('auto_watch')
34 |
35 | with op.batch_alter_table('scrapingconfigs', schema=None) as batch_op:
36 | batch_op.drop_column('auto_watch')
37 |
38 | # ### end Alembic commands ###
39 |
--------------------------------------------------------------------------------
/migrations/versions/9b27f48ac200_rename_table.py:
--------------------------------------------------------------------------------
1 | """rename table
2 |
3 | Revision ID: 9b27f48ac200
4 | Revises: 01f93f4b9988
5 | Create Date: 2022-04-15 10:22:54.470754
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = '9b27f48ac200'
14 | down_revision = '01f93f4b9988'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | try:
22 | op.drop_table('scrapingconfigs')
23 | except:
24 | pass
25 | try:
26 | op.rename_table('settings', 'scrapingconfigs')
27 | except:
28 | op.create_table('scrapingconfigs')
29 | # ### end Alembic commands ###
30 |
31 |
32 | def downgrade():
33 | # ### commands auto generated by Alembic - please adjust! ###
34 | op.rename_table('scrapingconfigs', 'settings')
35 | # ### end Alembic commands ###
36 |
--------------------------------------------------------------------------------
/migrations/versions/a76b476b9525_update_morestoryline.py:
--------------------------------------------------------------------------------
1 | """update morestoryline
2 |
3 | Revision ID: a76b476b9525
4 | Revises: f43a0835f0b5
5 | Create Date: 2022-06-07 10:33:06.982589
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 | from sqlalchemy.sql import expression
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = 'a76b476b9525'
14 | down_revision = 'f43a0835f0b5'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('scrapingconfigs', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('morestoryline', sa.Boolean(), server_default=expression.true(), nullable=True))
23 |
24 | # ### end Alembic commands ###
25 |
26 |
27 | def downgrade():
28 | # ### commands auto generated by Alembic - please adjust! ###
29 | with op.batch_alter_table('scrapingconfigs', schema=None) as batch_op:
30 | batch_op.drop_column('morestoryline')
31 |
32 | # ### end Alembic commands ###
33 |
--------------------------------------------------------------------------------
/migrations/versions/a9c9e7063598_update_refresh_url.py:
--------------------------------------------------------------------------------
1 | """Update refresh url
2 |
3 | Revision ID: a9c9e7063598
4 | Revises: ef9c9dbb8a8e
5 | Create Date: 2021-08-21 13:36:32.737312
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = 'a9c9e7063598'
14 | down_revision = 'ef9c9dbb8a8e'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('settings', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('refresh_url', sa.String(), nullable=True))
23 |
24 | # ### end Alembic commands ###
25 |
26 |
27 | def downgrade():
28 | # ### commands auto generated by Alembic - please adjust! ###
29 | with op.batch_alter_table('settings', schema=None) as batch_op:
30 | batch_op.drop_column('refresh_url')
31 |
32 | # ### end Alembic commands ###
33 |
--------------------------------------------------------------------------------
/migrations/versions/ab98111cb095_rename_config_table.py:
--------------------------------------------------------------------------------
1 | """rename config table
2 |
3 | Revision ID: ab98111cb095
4 | Revises: 17b29e2630f4
5 | Create Date: 2022-06-16 14:44:49.375653
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 | from sqlalchemy.sql import expression
11 |
12 |
13 | # revision identifiers, used by Alembic.
14 | revision = 'ab98111cb095'
15 | down_revision = '17b29e2630f4'
16 | branch_labels = None
17 | depends_on = None
18 |
19 |
20 | def upgrade():
21 | # ### commands auto generated by Alembic - please adjust! ###
22 | # op.drop_table('notificationconfigs')
23 | try:
24 | op.drop_table('localconfigs')
25 | except:
26 | pass
27 | try:
28 | op.rename_table('notificationconfigs', 'localconfigs')
29 | with op.batch_alter_table('localconfigs', schema=None) as batch_op:
30 | batch_op.add_column(sa.Column('task_clean', sa.Boolean(), server_default=expression.false(), nullable=True))
31 | except:
32 | op.create_table('localconfigs')
33 | # ### end Alembic commands ###
34 |
35 |
36 | def downgrade():
37 | # ### commands auto generated by Alembic - please adjust! ###
38 | op.rename_table('localconfigs', 'notificationconfigs')
39 | # ### end Alembic commands ###
40 |
--------------------------------------------------------------------------------
/migrations/versions/ae24eb9602af_update_multithread.py:
--------------------------------------------------------------------------------
1 | """update multithread
2 |
3 | Revision ID: ae24eb9602af
4 | Revises: 98e01c6ecbdc
5 | Create Date: 2024-09-06 21:25:23.629679
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = 'ae24eb9602af'
14 | down_revision = '98e01c6ecbdc'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('scrapingconfigs', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('threads_num', sa.Integer(), nullable=True))
23 |
24 | # ### end Alembic commands ###
25 |
26 |
27 | def downgrade():
28 | # ### commands auto generated by Alembic - please adjust! ###
29 | with op.batch_alter_table('scrapingconfigs', schema=None) as batch_op:
30 | batch_op.drop_column('threads_num')
31 |
32 | # ### end Alembic commands ###
33 |
--------------------------------------------------------------------------------
/migrations/versions/afd66a240ba0_add_is_sym_relative_path.py:
--------------------------------------------------------------------------------
1 | """add is_sym_relative_path
2 |
3 | Revision ID: afd66a240ba0
4 | Revises: ae24eb9602af
5 | Create Date: 2024-12-31 23:29:47.864965
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = 'afd66a240ba0'
14 | down_revision = 'ae24eb9602af'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('scrapingconfigs', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('is_sym_relative_path', sa.Boolean(), nullable=True))
23 |
24 | with op.batch_alter_table('transferconfigs', schema=None) as batch_op:
25 | batch_op.add_column(sa.Column('is_sym_relative_path', sa.Boolean(), nullable=True))
26 |
27 | # ### end Alembic commands ###
28 |
29 |
30 | def downgrade():
31 | # ### commands auto generated by Alembic - please adjust! ###
32 | with op.batch_alter_table('transferconfigs', schema=None) as batch_op:
33 | batch_op.drop_column('is_sym_relative_path')
34 |
35 | with op.batch_alter_table('scrapingconfigs', schema=None) as batch_op:
36 | batch_op.drop_column('is_sym_relative_path')
37 |
38 | # ### end Alembic commands ###
39 |
--------------------------------------------------------------------------------
/migrations/versions/b7f1f83c525e_movie_forced_name.py:
--------------------------------------------------------------------------------
1 | """movie forced name
2 |
3 | Revision ID: b7f1f83c525e
4 | Revises: 76a849adb003
5 | Create Date: 2024-03-02 20:50:41.966667
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = 'b7f1f83c525e'
14 | down_revision = '76a849adb003'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('transrecords', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('forcedname', sa.String(), nullable=True, comment='forced name'))
23 |
24 | # ### end Alembic commands ###
25 |
26 |
27 | def downgrade():
28 | # ### commands auto generated by Alembic - please adjust! ###
29 | with op.batch_alter_table('transrecords', schema=None) as batch_op:
30 | batch_op.drop_column('forcedname')
31 |
32 | # ### end Alembic commands ###
33 |
--------------------------------------------------------------------------------
/migrations/versions/c6d3cfb805e7_update_config.py:
--------------------------------------------------------------------------------
1 | """Update config
2 |
3 | Revision ID: c6d3cfb805e7
4 | Revises: f6ca967bad99
5 | Create Date: 2021-08-12 17:04:20.425316
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = 'c6d3cfb805e7'
14 | down_revision = 'f6ca967bad99'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | op.add_column('settings', sa.Column('multi_scraping', sa.Boolean(), nullable=True, comment='Multiple file scraping at the same time'))
22 | op.add_column('settings', sa.Column('async_request', sa.Boolean(), nullable=True, comment='Scrape a movie asynchronously'))
23 | op.add_column('settings', sa.Column('cookies_javlib', sa.String(), nullable=True))
24 | op.drop_column('settings', 'soft_link')
25 | # ### end Alembic commands ###
26 |
27 |
28 | def downgrade():
29 | # ### commands auto generated by Alembic - please adjust! ###
30 | op.add_column('settings', sa.Column('soft_link', sa.BOOLEAN(), nullable=True))
31 | op.drop_column('settings', 'cookies_javlib')
32 | op.drop_column('settings', 'async_request')
33 | op.drop_column('settings', 'multi_scraping')
34 | # ### end Alembic commands ###
35 |
--------------------------------------------------------------------------------
/migrations/versions/d029a987b48c_add_minisize.py:
--------------------------------------------------------------------------------
1 | """add minisize
2 |
3 | Revision ID: d029a987b48c
4 | Revises: 825048469450
5 | Create Date: 2022-04-11 11:49:53.822184
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = 'd029a987b48c'
14 | down_revision = '825048469450'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('settings', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('escape_size', sa.Integer(), nullable=True))
23 |
24 | with op.batch_alter_table('transferconfigs', schema=None) as batch_op:
25 | batch_op.add_column(sa.Column('escape_size', sa.Integer(), nullable=True))
26 |
27 | # ### end Alembic commands ###
28 |
29 |
30 | def downgrade():
31 | # ### commands auto generated by Alembic - please adjust! ###
32 | with op.batch_alter_table('transferconfigs', schema=None) as batch_op:
33 | batch_op.drop_column('escape_size')
34 |
35 | with op.batch_alter_table('settings', schema=None) as batch_op:
36 | batch_op.drop_column('escape_size')
37 |
38 | # ### end Alembic commands ###
39 |
--------------------------------------------------------------------------------
/migrations/versions/de30b0f0cf2e_add_cookies.py:
--------------------------------------------------------------------------------
1 | """Add cookies
2 |
3 | Revision ID: de30b0f0cf2e
4 | Revises:
5 | Create Date: 2021-05-31 16:49:00.204118
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = 'de30b0f0cf2e'
14 | down_revision = None
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | op.add_column('settings', sa.Column('cookies_javdb', sa.String(), nullable=True))
22 | # ### end Alembic commands ###
23 |
24 |
25 | def downgrade():
26 | # ### commands auto generated by Alembic - please adjust! ###
27 | op.drop_column('settings', 'cookies_javdb')
28 | # ### end Alembic commands ###
29 |
--------------------------------------------------------------------------------
/migrations/versions/e76363a21cbe_update_site_sources.py:
--------------------------------------------------------------------------------
1 | """update site sources
2 |
3 | Revision ID: e76363a21cbe
4 | Revises: 6749dee88757
5 | Create Date: 2022-02-22 09:34:12.817637
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = 'e76363a21cbe'
14 | down_revision = '6749dee88757'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('settings', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('site_sources', sa.String(), nullable=True))
23 | batch_op.drop_column('website_priority')
24 |
25 | # ### end Alembic commands ###
26 |
27 |
28 | def downgrade():
29 | # ### commands auto generated by Alembic - please adjust! ###
30 | with op.batch_alter_table('settings', schema=None) as batch_op:
31 | batch_op.add_column(sa.Column('website_priority', sa.VARCHAR(), nullable=True))
32 | batch_op.drop_column('site_sources')
33 |
34 | # ### end Alembic commands ###
35 |
--------------------------------------------------------------------------------
/migrations/versions/ef9c9dbb8a8e_add_refresh_url_for_transfer.py:
--------------------------------------------------------------------------------
1 | """Add refresh url for transfer
2 |
3 | Revision ID: ef9c9dbb8a8e
4 | Revises: c6d3cfb805e7
5 | Create Date: 2021-08-21 12:39:01.825614
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = 'ef9c9dbb8a8e'
14 | down_revision = 'c6d3cfb805e7'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('settings', schema=None) as batch_op:
22 | batch_op.drop_column('auto_exit')
23 | batch_op.drop_column('soft_link')
24 | batch_op.drop_column('debug_info')
25 |
26 | with op.batch_alter_table('transferconfigs', schema=None) as batch_op:
27 | batch_op.add_column(sa.Column('refresh_url', sa.String(), nullable=True))
28 |
29 | # ### end Alembic commands ###
30 |
31 |
32 | def downgrade():
33 | # ### commands auto generated by Alembic - please adjust! ###
34 | with op.batch_alter_table('transferconfigs', schema=None) as batch_op:
35 | batch_op.drop_column('refresh_url')
36 |
37 | with op.batch_alter_table('settings', schema=None) as batch_op:
38 | batch_op.add_column(sa.Column('debug_info', sa.BOOLEAN(), nullable=True))
39 | batch_op.add_column(sa.Column('soft_link', sa.BOOLEAN(), nullable=True))
40 | batch_op.add_column(sa.Column('auto_exit', sa.BOOLEAN(), nullable=True))
41 |
42 | # ### end Alembic commands ###
43 |
--------------------------------------------------------------------------------
/migrations/versions/f43a0835f0b5_update_proxy.py:
--------------------------------------------------------------------------------
1 | """update proxy
2 |
3 | Revision ID: f43a0835f0b5
4 | Revises: 9b27f48ac200
5 | Create Date: 2022-05-31 17:43:48.924481
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = 'f43a0835f0b5'
14 | down_revision = '9b27f48ac200'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | with op.batch_alter_table('notificationconfigs', schema=None) as batch_op:
22 | batch_op.add_column(sa.Column('proxy_enable', sa.Boolean(), nullable=True))
23 | batch_op.add_column(sa.Column('proxy_type', sa.String(), server_default='socks5h', nullable=True))
24 | batch_op.add_column(sa.Column('proxy_address', sa.String(), server_default='127.0.0.1:1080', nullable=True))
25 | # ### end Alembic commands ###
26 |
27 |
28 | def downgrade():
29 | # ### commands auto generated by Alembic - please adjust! ###
30 | with op.batch_alter_table('notificationconfigs', schema=None) as batch_op:
31 | batch_op.drop_column('proxy_address')
32 | batch_op.drop_column('proxy_type')
33 | batch_op.drop_column('proxy_enable')
34 | # ### end Alembic commands ###
35 |
--------------------------------------------------------------------------------
/migrations/versions/f6ca967bad99_add_scraping_cd_num.py:
--------------------------------------------------------------------------------
1 | """Add scraping cd num
2 |
3 | Revision ID: f6ca967bad99
4 | Revises: 43e878537ab5
5 | Create Date: 2021-08-07 01:17:29.852648
6 |
7 | """
8 | from alembic import op
9 | import sqlalchemy as sa
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision = 'f6ca967bad99'
14 | down_revision = '43e878537ab5'
15 | branch_labels = None
16 | depends_on = None
17 |
18 |
19 | def upgrade():
20 | # ### commands auto generated by Alembic - please adjust! ###
21 | op.add_column('scrapingrecords', sa.Column('cdnum', sa.Integer(), nullable=True, comment='cd num'))
22 | # ### end Alembic commands ###
23 |
24 |
25 | def downgrade():
26 | # ### commands auto generated by Alembic - please adjust! ###
27 | op.drop_column('scrapingrecords', 'cdnum')
28 | # ### end Alembic commands ###
29 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "dependencies": {
3 | "@google/semantic-release-replace-plugin": "^1.1.0",
4 | "@semantic-release/changelog": "^6.0.1",
5 | "@semantic-release/exec": "^6.0.3",
6 | "@semantic-release/git": "^10.0.1",
7 | "is-ci": "^3.0.1",
8 | "semantic-release": "^19.0.2"
9 | },
10 | "devDependencies": {
11 | "@commitlint/cli": "^16.2.1",
12 | "@commitlint/config-conventional": "^16.2.1",
13 | "commitlint": "^16.2.1",
14 | "pinst": "^3.0.0"
15 | },
16 | "commitlint": {
17 | "extends": [
18 | "@commitlint/config-conventional"
19 | ]
20 | },
21 | "release": {
22 | "plugins": [
23 | "@semantic-release/commit-analyzer",
24 | "@semantic-release/release-notes-generator",
25 | "@semantic-release/changelog",
26 | [
27 | "@google/semantic-release-replace-plugin",
28 | {
29 | "replacements": [
30 | {
31 | "files": [
32 | "src/config.py"
33 | ],
34 | "from": "VERSION = '.*'",
35 | "to": "VERSION = '${nextRelease.version}'",
36 | "results": [
37 | {
38 | "file": "src/config.py",
39 | "hasChanged": true,
40 | "numMatches": 1,
41 | "numReplacements": 1
42 | }
43 | ],
44 | "countMatches": true
45 | }
46 | ]
47 | }
48 | ],
49 | [
50 | "@semantic-release/exec",
51 | {
52 | "publishCmd": "echo 'VERSION_TAG=${nextRelease.version}' >> $GITHUB_ENV"
53 | }
54 | ],
55 | [
56 | "@semantic-release/git",
57 | {
58 | "assets": [
59 | "src/config.py",
60 | "CHANGELOG.md"
61 | ]
62 | }
63 | ],
64 | "@semantic-release/github"
65 | ]
66 | },
67 | "scripts": {
68 | "prepare": "is-ci",
69 | "postinstall": "is-ci",
70 | "prepublishOnly": "pinst --disable",
71 | "postpublish": "pinst --enable"
72 | }
73 | }
74 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | Flask
2 | Flask-Migrate
3 | Flask-APScheduler
4 | Pillow
5 | lxml
6 | beautifulsoup4
7 | MechanicalSoup
8 | cloudscraper
9 | transmission_rpc
10 | scrapinglib
11 | flask-sock
12 |
--------------------------------------------------------------------------------
/scripts/qbcomplete.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # qbcomplete.sh "%F"
3 | # 如果docker的挂载路径与qbit挂载路径不同名的话,需要用以下命令a="%F"&& sh qbcomplete.sh ${a/qbit挂载路径/ikros挂载路径}
4 |
5 | QB_DOWNLOADS="${1}"
6 | curl -XPOST http://127.0.0.1:12346/api/client -H 'Content-Type: application/json' \
7 | --data @<(cat < 0:
55 | # 如果是多集,则只清理当前文件
56 | cleanFilebyFilter(folder, filter)
57 | else:
58 | cleanFolderbyFilter(folder, filter)
59 | # 过滤
60 | ignore = False
61 | if movie_info.ignored:
62 | ignore = True
63 | elif conf.escape_size and conf.escape_size > 0:
64 | minsize = conf.escape_size * 1024 * 1024
65 | filesize = os.path.getsize(file_path)
66 | if filesize < minsize:
67 | ignore = True
68 | current_app.logger.info('[!] ' + str(file_path) + ' below size limit, will pass')
69 | if not ignore:
70 | num_info = FileNumInfo(file_path)
71 | # 查询是否有额外设置
72 | if movie_info.scrapingname and movie_info.scrapingname != '':
73 | num_info.num = movie_info.scrapingname
74 | if movie_info.cnsubtag:
75 | num_info.chs_tag = True
76 | if movie_info.leaktag:
77 | num_info.leak_tag = True
78 | if movie_info.uncensoredtag:
79 | num_info.uncensored_tag = True
80 | if movie_info.hacktag:
81 | num_info.hack_tag = True
82 | if movie_info.cdnum:
83 | num_info.updateCD(movie_info.cdnum)
84 | current_app.logger.info("[!]Making Data for [{}], the number is [{}]".format(file_path, num_info.num))
85 | movie_info.status = 4
86 | movie_info.scrapingname = num_info.num
87 | movie_info.updatetime = datetime.datetime.now()
88 | scrapingrecordService.commit()
89 | # main proccess
90 | (flag, new_path) = core_main(file_path, num_info, conf, movie_info.specifiedsource, movie_info.specifiedurl)
91 | if flag:
92 | movie_info.status = 1
93 | (filefolder, newname) = os.path.split(new_path)
94 | movie_info.destname = newname
95 | movie_info.destpath = new_path
96 | movie_info.linktype = conf.link_type
97 | movie_info.cnsubtag = num_info.chs_tag
98 | movie_info.leaktag = num_info.leak_tag
99 | movie_info.uncensoredtag = num_info.uncensored_tag
100 | movie_info.hacktag = num_info.hack_tag
101 | if num_info.multipart_tag:
102 | movie_info.cdnum = num_info.part[3:]
103 | else:
104 | # 失败
105 | movie_info.status = 2
106 | movie_info.destpath = new_path
107 | else:
108 | # 忽略
109 | movie_info.status = 3
110 | movie_info.updatetime = datetime.datetime.now()
111 | scrapingrecordService.commit()
112 | else:
113 | current_app.logger.info("[!]Already done: [{}]".format(file_path))
114 | try:
115 | current_app.logger.info(
116 | f"[!]Checking dest file status: type {conf.link_type} and destpath [{movie_info.destpath}]")
117 | if movie_info and movie_info.status == 1:
118 | if conf.link_type == 0:
119 | if os.path.exists(movie_info.destpath) and not pathlib.Path(movie_info.destpath).is_symlink():
120 | current_app.logger.info(f"[!]Checking file status: OK")
121 | else:
122 | current_app.logger.error(f"[!]Checking file status: file missing")
123 | if os.path.exists(movie_info.srcpath):
124 | shutil.move(movie_info.srcpath, movie_info.destpath)
125 | current_app.logger.info(f"[!]Checking file status: fixed")
126 | elif conf.link_type == 1:
127 | if os.path.exists(movie_info.srcpath) and pathlib.Path(movie_info.destpath).is_symlink():
128 | current_app.logger.info(f"[!]Checking file status: OK")
129 | else:
130 | current_app.logger.error(f"[!]Checking file status: wrong symlink")
131 | elif conf.link_type == 2:
132 | if os.path.exists(movie_info.srcpath) and \
133 | os.path.exists(movie_info.destpath) and \
134 | os.path.samefile(movie_info.srcpath, movie_info.destpath):
135 | current_app.logger.info(f"[!]Checking file status: OK")
136 | else:
137 | current_app.logger.error(f"[!]Checking file status: file missing")
138 | linkFile(movie_info.srcpath, movie_info.destpath, 2)
139 | current_app.logger.info(f"[!]Checking file status: fixed")
140 | except Exception as e:
141 | current_app.logger.error(f"[!]Checking file status: ERROR")
142 | current_app.logger.error(e)
143 | except Exception as err:
144 | # Sometimes core_main may cause exception, and the status will stuck on 4(scraping)
145 | # So we have to set a defer func to handle this situation
146 | movie_info.status = 2 # set task as failed
147 | scrapingrecordService.commit()
148 | current_app.logger.error("[!] ERROR: [{}] ".format(file_path))
149 | current_app.logger.error(err)
150 | moveFailedFolder(file_path)
151 | current_app.logger.info("[*]======================================================")
152 |
153 |
154 | def startScrapingAll(cid, folder=''):
155 | import threading, time
156 | """ 启动入口
157 | 返回
158 | 0: 刮削失败
159 | 1: 刮削完成,推送媒体库
160 | 2: 刮削完成,推送媒体库异常
161 | 3: 正在执行其他任务
162 | 4: 任务中断
163 | """
164 | task = taskService.getTask('scrape')
165 | if task.status == 2:
166 | return 3
167 | # taskService.updateTaskStatus(task, 2)
168 | task.status = 2
169 | task.cid = cid
170 | taskService.commit()
171 |
172 | conf = scrapingConfService.getConfig(cid)
173 |
174 | if folder == '':
175 | folder = conf.scraping_folder
176 | movie_list = findAllMovies(folder, re.split("[,,]", conf.escape_folders))
177 |
178 | count = 0
179 | total = len(movie_list)
180 | taskService.updateTaskNum(task, total)
181 | current_app.logger.info("[*]======================================================")
182 | current_app.logger.info('[+]Find ' + str(total) + ' movies')
183 |
184 | threadPoolSize = conf.threads_num
185 | currentThreads = []
186 | for movie_path in movie_list:
187 | # refresh data
188 | task = taskService.getTask('scrape')
189 | if task.status == 0:
190 | return 4
191 | taskService.updateTaskFinished(task, count)
192 | percentage = str(count / total * 100)[:4] + '%'
193 | current_app.logger.debug('[!] - ' + percentage + ' [' + str(count) + '/' + str(total) + '] -')
194 | currentApp = current_app._get_current_object()
195 | t = threading.Thread(target=create_data_and_move, args=(movie_path, conf, currentApp))
196 | currentThreads.append(t)
197 | t.start()
198 | time.sleep(2)
199 | if len(currentThreads) == threadPoolSize or count == total - 1:
200 | for t in currentThreads:
201 | t.join()
202 | currentThreads = []
203 | count = count + 1
204 |
205 | taskService.updateTaskStatus(task, 1)
206 |
207 | if conf.refresh_url:
208 | current_app.logger.info("[+]Refresh MediaServer")
209 | if not refreshMediaServer(conf.refresh_url):
210 | return 2
211 |
212 | current_app.logger.info("[+] All scraping finished!!!")
213 | current_app.logger.info("[*]======================================================")
214 | return 1
215 |
216 |
217 | def startScrapingSingle(cid, movie_path: str, forced=False):
218 | """ single movie
219 | 返回
220 | 0: 刮削失败
221 | 1: 刮削完成,推送媒体库
222 | 2: 刮削完成,推送媒体库异常
223 | 3: 正在执行其他任务
224 | """
225 | task = taskService.getTask('scrape')
226 | if task.status == 2:
227 | return 3
228 | # taskService.updateTaskStatus(task, 2)
229 | task.status = 2
230 | task.cid = cid
231 | taskService.commit()
232 |
233 | conf = scrapingConfService.getConfig(cid)
234 | current_app.logger.info("[+]Single start!!!")
235 |
236 | movie_info = scrapingrecordService.queryByPath(movie_path)
237 | # 强制 未刮削 刮削失败才进行清理
238 | if movie_info and (forced or movie_info.status == 0 or movie_info.status == 2):
239 | if os.path.exists(movie_path):
240 | # 源文件存在,目的文件存在。(链接模式)
241 | if movie_info.destpath and os.path.exists(movie_info.destpath):
242 | scrapingrecordService.deleteRecordFiles(movie_info, False)
243 | else:
244 | # 源文件不存在,目的文件存在。(非链接模式,刮削后进行了移动)
245 | if movie_info.destpath and os.path.exists(movie_info.destpath) and os.path.isfile(movie_info.destpath) \
246 | and not pathlib.Path(movie_info.destpath).is_symlink():
247 | shutil.move(movie_info.destpath, movie_path)
248 | if os.path.exists(movie_path) and os.path.isfile(movie_path):
249 | currentApp = current_app._get_current_object()
250 | create_data_and_move(movie_path, conf, currentApp, forced)
251 | else:
252 | scrapingrecordService.updateDeleted(movie_info)
253 |
254 | taskService.updateTaskStatus(task, 1)
255 |
256 | if conf.refresh_url:
257 | current_app.logger.info("[+]Refresh MediaServer")
258 | if not refreshMediaServer(conf.refresh_url):
259 | return 2
260 |
261 | current_app.logger.info("[+]Single finished!!!")
262 | return 1
263 |
--------------------------------------------------------------------------------
/src/bizlogic/mediaserver.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | '''
3 | '''
4 | import requests
5 | from flask import current_app
6 |
7 | def refreshMediaServer(url):
8 | try:
9 | requests.post(url, timeout=5)
10 | return True
11 | except Exception as e:
12 | current_app.logger.error("[!] Refresh Media Err")
13 | current_app.logger.error(e)
14 | return False
15 |
--------------------------------------------------------------------------------
/src/bizlogic/rename.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import os
4 | import shutil
5 | from flask import current_app
6 | from ..utils.filehelper import video_type, ext_type
7 | from ..utils.regex import extractEpNum, matchEpPart, regexMatch
8 |
9 |
10 | def findAllMatchedFiles(root):
11 | total = []
12 | dirs = os.listdir(root)
13 | for entry in dirs:
14 | f = os.path.join(root, entry)
15 | if os.path.splitext(f)[1].lower() in video_type or os.path.splitext(f)[1].lower() in ext_type:
16 | total.append(f)
17 | return total
18 |
19 |
20 | def rename(root, base, newfix):
21 | """ 方法1
22 | 字符替换
23 | """
24 | tvs = findAllMatchedFiles(root)
25 | for name in tvs:
26 | dirname, basename = os.path.split(name)
27 | if base in basename:
28 | newname = basename.replace(base, newfix)
29 | newfull = os.path.join(dirname, newname)
30 | shutil.move(name, newfull)
31 | current_app.logger.info("rename [{}] to [{}]".format(name, newfull))
32 |
33 |
34 | def renamebyreg(root, reg, prefix, preview: bool = True):
35 | """ 方法2
36 | 正则匹配替换
37 | """
38 | tvs = findAllMatchedFiles(root)
39 | table = []
40 | if prefix == '':
41 | prefix = "S01E"
42 | for name in tvs:
43 | dirname, basename = os.path.split(name)
44 | current_app.logger.info("开始替换: " + basename)
45 | if reg == '':
46 | originep = matchEpPart(basename)
47 | else:
48 | results = regexMatch(basename, reg)
49 | originep = results[0]
50 | if originep:
51 | # current_app.logger.info("提取剧集标签 "+nameresult)
52 | epresult = extractEpNum(originep)
53 | if epresult != '':
54 | current_app.logger.debug(originep + " "+epresult)
55 | if originep[0] == '.':
56 | renum = "." + prefix + epresult + "."
57 | elif originep[0] == '[':
58 | renum = "[" + prefix + epresult + "]"
59 | else:
60 | renum = " " + prefix + epresult + " "
61 | newname = basename.replace(originep, renum)
62 | current_app.logger.info("修正后: {}".format(newname))
63 |
64 | if not preview:
65 | newfull = os.path.join(dirname, newname)
66 | shutil.move(name, newfull)
67 | single = {'original': basename, 'rename': newname}
68 | table.append(single)
69 |
70 | return table
71 |
--------------------------------------------------------------------------------
/src/bizlogic/schedulertask.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import logging
4 | import os
5 | from logging import Logger
6 | from flask import current_app
7 |
8 | from .mediaserver import refreshMediaServer
9 | from ..utils.filehelper import checkFolderhasMedia
10 | from ..downloader.transmission import Transmission
11 | from ..bizlogic.manager import startScrapingAll
12 | from ..bizlogic.transfer import ctrlTransfer
13 | from ..service.configservice import scrapingConfService, transConfigService
14 | from ..service.taskservice import taskService
15 | from ..service.schedulerservice import schedulerService
16 | from ..service.recordservice import scrapingrecordService, transrecordService
17 | from ..service.configservice import localConfService
18 |
19 | def cleanRecordsTask(delete=True, scheduler=None):
20 | """
21 | TODO 关联下载器
22 | 清除记录任务
23 | 放开emby内删除文件权限,用户在emby内删除文件,ikaros检测到文件不存在
24 | 增加 等待删除 标记,三天后,真正删除文件,种子文件
25 | """
26 | if scheduler:
27 | with scheduler.app.app_context():
28 | cleanRecords(delete)
29 | else:
30 | cleanRecords(delete)
31 |
32 | def cleanRecords(delete=True):
33 | if delete:
34 | scrapingrecordService.cleanUnavailable()
35 | transrecordService.cleanUnavailable()
36 | else:
37 | localconfig = localConfService.getConfig()
38 | if localconfig.task_clean:
39 | logger().debug('[-] cleanRecords: start!')
40 | srecords = scrapingrecordService.deadtimetoMissingrecord()
41 | trecords = transrecordService.deadtimetoMissingrecord()
42 | records = list(set(srecords + trecords))
43 | cleanTorrents(records, localconfig)
44 | logger().debug('[-] cleanRecords: done!')
45 |
46 |
47 | def cleanTorrents(records, conf):
48 | """ 删除关联的torrent
49 | """
50 | if not records:
51 | return
52 | try:
53 | trurl = conf.tr_url
54 | trusername = conf.tr_username
55 | trpassword = conf.tr_passwd
56 | trfolder = conf.tr_prefix.split(':')[0]
57 | prefixed = conf.tr_prefix.split(':')[1]
58 |
59 | if not os.path.exists(prefixed):
60 | logger().info(f"[-] cleanRecords: Transmission mapped folder does't exist {trfolder} : {prefixed}")
61 | return
62 | trs = Transmission(trurl, trusername, trpassword)
63 | trs.login()
64 | for path in records:
65 | logger().debug(f'[-] cleanRecords: check {path}')
66 | torrents = trs.searchByPath(path)
67 | for torrent in torrents:
68 | logger().debug(f'[-] cleanRecords: find torrent {torrent.name}')
69 | downfolder = os.path.join(torrent.fields['downloadDir'], torrent.name)
70 | fixedfolder = downfolder.replace(trfolder, prefixed, 1)
71 | if checkFolderhasMedia(fixedfolder):
72 | continue
73 | trs.removeTorrent(torrent.id, True)
74 | logger().info(f'[-] cleanRecords: remove torrent {torrent.id} : {torrent.name}')
75 | except Exception as e:
76 | logger().error("[-] cleanRecords: You may not have set the transmission or an exception may have occurred.")
77 | logger().error(e)
78 |
79 |
80 | def checkDirectoriesTask(scheduler=None):
81 | """
82 | 无其他任务时,才执行
83 | 增加检测 转移/刮削 文件夹顶层目录内容 计划任务
84 | 间隔10分钟检测是否有新增内容,不需要下载器脚本
85 | """
86 | if scheduler:
87 | with scheduler.app.app_context():
88 | autoWatchDirectories()
89 | else:
90 | autoWatchDirectories()
91 |
92 | def autoWatchDirectories():
93 | if taskService.haveRunningTask():
94 | return
95 | logger().debug('[!] watch Directories')
96 | logger().debug('watch scraping folder')
97 | scraping_configs = scrapingConfService.getConfiglist()
98 | for conf in scraping_configs:
99 | if conf.auto_watch:
100 | try:
101 | logger().debug(f"watch {conf.scraping_folder}")
102 | startScrapingAll(conf.id)
103 | except Exception as ex:
104 | logger().error(ex)
105 | logger().debug('watch transfer folder')
106 | transfer_configs = transConfigService.getConfiglist()
107 | for conf in transfer_configs:
108 | if conf.auto_watch:
109 | try:
110 | logger().debug(f"watch {conf.source_folder}")
111 | ctrlTransfer(
112 | conf.source_folder,
113 | conf.output_folder,
114 | conf.linktype,
115 | conf.soft_prefix,
116 | conf.escape_folder,
117 | "",
118 | conf.fix_series,
119 | conf.clean_others,
120 | conf.replace_CJK,
121 | conf.refresh_url,
122 | conf.is_sym_relative_path,
123 | )
124 | except Exception as ex:
125 | logger().error(ex)
126 |
127 | def initScheduler():
128 | """ 初始化
129 | """
130 | schedulerService.addJob('cleanRecords', cleanRecordsTask, args=[False, schedulerService.scheduler], seconds=3600)
131 | schedulerService.addJob('checkDirectories', checkDirectoriesTask, args=[schedulerService.scheduler], seconds=900)
132 |
133 |
134 | def logger(scheduler=None) -> Logger:
135 | if scheduler:
136 | return scheduler.app.logger
137 | elif current_app:
138 | return current_app.logger
139 | else:
140 | return logging.getLogger('src')
141 |
--------------------------------------------------------------------------------
/src/bizlogic/transfer.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | '''
3 | '''
4 | import os
5 | import re
6 | import time
7 |
8 | from .mediaserver import refreshMediaServer
9 | from ..service.configservice import transConfigService
10 | from ..service.recordservice import transrecordService
11 | from ..service.taskservice import taskService
12 | from ..utils.regex import extractEpNum, matchSeason, matchEpPart, matchSeries, simpleMatchEp
13 | from ..utils.filehelper import linkFile, video_type, ext_type, replaceRegex, cleanFolderWithoutSuffix, \
14 | replaceCJK, cleanbyNameSuffix, cleanExtraMedia, moveSubs
15 | from flask import current_app
16 |
17 |
18 | class FileInfo():
19 |
20 | realpath = ''
21 | realfolder = ''
22 | realname = ''
23 | folders = []
24 |
25 | midfolder = ''
26 | topfolder = ''
27 | secondfolder = ''
28 | name = ''
29 | ext = ''
30 |
31 | isepisode = False
32 | locked = False
33 | forcedseason = False
34 | originep = ''
35 | season = None
36 | epnum = None
37 | forcedname = ''
38 |
39 | finalpath = ''
40 | finalfolder = ''
41 |
42 | def __init__(self, filepath):
43 | self.realpath = filepath
44 | (filefolder, filename) = os.path.split(filepath)
45 | self.realfolder = filefolder
46 | self.realname = filename
47 | (name, ext) = os.path.splitext(filename)
48 | self.name = name
49 | self.ext = ext
50 |
51 | def updateMidFolder(self, mid):
52 | self.midfolder = mid
53 | folders = os.path.normpath(mid).split(os.path.sep)
54 | self.folders = folders
55 | self.topfolder = folders[0]
56 | if len(folders) > 1:
57 | self.secondfolder = folders[1]
58 |
59 | def fixMidFolder(self):
60 | temp = self.folders
61 | temp[0] = self.topfolder
62 | if self.secondfolder != '':
63 | if len(temp) > 1:
64 | temp[1] = self.secondfolder
65 | else:
66 | temp.append(self.secondfolder)
67 | return os.path.join(*temp)
68 |
69 | def updateForcedname(self, name):
70 | self.forcedname = name
71 |
72 | def fixFinalName(self):
73 | if self.forcedname != "":
74 | return self.forcedname + self.ext
75 | else:
76 | return self.name + self.ext
77 |
78 | def updateFinalPath(self, path):
79 | self.finalpath = path
80 | self.finalfolder = os.path.dirname(path)
81 |
82 | def parse(self):
83 | # 正确的剧集命名
84 | season, ep = matchSeries(self.name)
85 | if isinstance(season, int) and season > -1 and isinstance(ep, int) and ep > -1:
86 | self.isepisode = True
87 | self.season = season
88 | self.epnum = ep
89 | self.originep = 'Pass'
90 | return
91 | # 是否是需要修正的剧集命名
92 | originep = matchEpPart(self.name)
93 | if originep:
94 | epresult = extractEpNum(originep)
95 | if epresult:
96 | self.isepisode = True
97 | self.originep = originep
98 | self.epnum = epresult
99 |
100 | def fixEpName(self, season):
101 | if not self.epnum and self.forcedseason:
102 | current_app.logger.debug("强制`season`后,尝试获取`ep`")
103 | sep = simpleMatchEp(self.name)
104 | if sep:
105 | self.epnum = sep
106 | self.originep = 'Pass'
107 | else:
108 | return
109 | if isinstance(self.epnum, int):
110 | prefix = "S%02dE%02d" % (season, self.epnum)
111 | else:
112 | prefix = "S%02dE" % (season) + self.epnum
113 |
114 | if self.originep == 'Pass':
115 | if prefix in self.name:
116 | return
117 | else:
118 | self.name = prefix
119 | else:
120 | if self.originep[0] == '.':
121 | renum = "." + prefix + "."
122 | elif self.originep[0] == '[':
123 | renum = " " + prefix + " "
124 | else:
125 | renum = " " + prefix + " "
126 | current_app.logger.debug("替换内容:" + renum)
127 | newname = self.name.replace(self.originep, renum)
128 | self.name = newname
129 | current_app.logger.info("替换后: {}".format(newname))
130 |
131 |
132 | def findAllVideos(root, src_folder, escape_folder, mode=1):
133 | """ find all videos
134 | mode:
135 | :1 返回 FileInfo 合集
136 | :2 返回 realPath 合集
137 | """
138 | if os.path.basename(root) in escape_folder:
139 | return []
140 | total = []
141 | dirs = os.listdir(root)
142 | for entry in dirs:
143 | f = os.path.join(root, entry)
144 | if os.path.isdir(f):
145 | total += findAllVideos(f, src_folder, escape_folder, mode)
146 | elif os.path.splitext(f)[1].lower() in video_type:
147 | if mode == 1:
148 | fi = FileInfo(f)
149 | midfolder = fi.realfolder.replace(src_folder, '').lstrip("\\").lstrip("/")
150 | fi.updateMidFolder(midfolder)
151 | if fi.topfolder != '.':
152 | fi.parse()
153 | total.append(fi)
154 | elif mode == 2:
155 | total.append(f)
156 | return total
157 |
158 |
159 | def autoTransfer(cid, real_path: str):
160 | """ 自动转移
161 | 返回
162 | 0: 转移失败
163 | 1: 转移成功,推送媒体库
164 | 2: 转移成功,推送媒体库异常
165 | """
166 | conf = transConfigService.getConfigById(cid)
167 | try:
168 | current_app.logger.debug("任务详情: 自动转移")
169 | if not transfer(
170 | conf.source_folder,
171 | conf.output_folder,
172 | conf.linktype,
173 | conf.soft_prefix,
174 | conf.escape_folder,
175 | real_path,
176 | False,
177 | conf.replace_CJK,
178 | conf.fix_series,
179 | conf.is_sym_relative_path,
180 | ):
181 | return 0
182 | if conf.refresh_url:
183 | if not refreshMediaServer(conf.refresh_url):
184 | return 2
185 | return 1
186 | except:
187 | return 0
188 |
189 |
190 | def ctrlTransfer(
191 | src_folder,
192 | dest_folder,
193 | linktype,
194 | prefix,
195 | escape_folders,
196 | specified_files,
197 | fix_series,
198 | clean_others,
199 | replace_CJK,
200 | refresh_url,
201 | is_sym_relative_path,
202 | ):
203 | transfer(
204 | src_folder,
205 | dest_folder,
206 | linktype,
207 | prefix,
208 | escape_folders,
209 | specified_files,
210 | clean_others,
211 | replace_CJK,
212 | fix_series,
213 | is_sym_relative_path,
214 | )
215 | if refresh_url:
216 | refreshMediaServer(refresh_url)
217 |
218 |
219 | def transfer(
220 | src_folder,
221 | dest_folder,
222 | linktype,
223 | prefix,
224 | escape_folders,
225 | specified_files="",
226 | clean_others_tag=True,
227 | simplify_tag=False,
228 | fixseries_tag=False,
229 | is_sym_relative_path=False,
230 | ):
231 | """
232 | 如果 specified_files 有值,则使用 specified_files 过滤文件且不清理其他文件
233 | """
234 |
235 | task = taskService.getTask('transfer')
236 | if task.status == 2:
237 | return False
238 | taskService.updateTaskStatus(task, 2)
239 |
240 | try:
241 | movie_list = []
242 |
243 | if not specified_files or specified_files == '':
244 | movie_list = findAllVideos(src_folder, src_folder, re.split("[,,]", escape_folders))
245 | else:
246 | if not os.path.exists(specified_files):
247 | specified_files = os.path.join(src_folder, specified_files)
248 | if not os.path.exists(specified_files):
249 | taskService.updateTaskStatus(task, 1)
250 | current_app.logger.error("[!] specified_files not exists")
251 | return False
252 | clean_others_tag = False
253 | if os.path.isdir(specified_files):
254 | movie_list = findAllVideos(specified_files, src_folder, re.split("[,,]", escape_folders))
255 | else:
256 | tf = FileInfo(specified_files)
257 | midfolder = tf.realfolder.replace(src_folder, '').lstrip("\\").lstrip("/")
258 | tf.updateMidFolder(midfolder)
259 | if tf.topfolder != '.':
260 | tf.parse()
261 | movie_list.append(tf)
262 | count = 0
263 | total = str(len(movie_list))
264 | taskService.updateTaskNum(task, total)
265 | current_app.logger.debug('[+] Find ' + total+' movies')
266 |
267 | # 硬链接直接使用源目录
268 | if linktype == 1:
269 | prefix = src_folder
270 | # 清理目标目录下的文件:视频 字幕
271 | if not os.path.exists(dest_folder):
272 | os.makedirs(dest_folder)
273 |
274 | if clean_others_tag:
275 | dest_list = findAllVideos(dest_folder, '', [], 2)
276 | else:
277 | dest_list = []
278 |
279 | for currentfile in movie_list:
280 | if not isinstance(currentfile, FileInfo):
281 | continue
282 | task = taskService.getTask('transfer')
283 | if task.status == 0:
284 | return False
285 | count += 1
286 | taskService.updateTaskFinished(task, count)
287 | current_app.logger.debug('[!] - ' + str(count) + '/' + total + ' -')
288 | current_app.logger.debug("[+] start check [{}] ".format(currentfile.realpath))
289 |
290 | # 修正后给链接使用的源地址
291 | link_path = os.path.join(prefix, currentfile.midfolder, currentfile.realname)
292 |
293 | currentrecord = transrecordService.add(currentfile.realpath)
294 | currentrecord.srcfolder = src_folder
295 | # 忽略标记,直接下一个
296 | if currentrecord.ignored:
297 | continue
298 | # 锁定
299 | if currentrecord.locked:
300 | # TODO
301 | currentfile.locked = True
302 | # 记录优先
303 | # 如果是剧集,season优先
304 | if currentrecord.topfolder and currentrecord.topfolder != '.':
305 | currentfile.topfolder = currentrecord.topfolder
306 | if currentrecord.secondfolder:
307 | currentfile.secondfolder = currentrecord.secondfolder
308 |
309 | if currentrecord.isepisode:
310 | currentfile.isepisode = True
311 | if isinstance(currentrecord.season, int) and currentrecord.season > -1:
312 | currentfile.season = currentrecord.season
313 | currentfile.forcedseason = True
314 | if isinstance(currentrecord.episode, int) and currentrecord.episode > -1:
315 | currentfile.epnum = currentrecord.episode
316 | elif isinstance(currentrecord.episode, str) and currentrecord != '':
317 | currentfile.epnum = currentrecord.episode
318 | elif not fixseries_tag:
319 | currentfile.isepisode = False
320 | if currentrecord.forcedname:
321 | currentfile.updateForcedname(currentrecord.forcedname)
322 |
323 | # 优化命名
324 | naming(currentfile, movie_list, simplify_tag, fixseries_tag)
325 |
326 | if currentfile.topfolder == '.':
327 | newpath = os.path.join(dest_folder, currentfile.fixFinalName())
328 | else:
329 | newpath = os.path.join(dest_folder, currentfile.fixMidFolder(), currentfile.fixFinalName())
330 | currentfile.updateFinalPath(newpath)
331 | if linktype == 0:
332 | linkFile(link_path, newpath, 1, is_sym_relative_path)
333 | else:
334 | linkFile(link_path, newpath, 2)
335 |
336 | # 使用最终的文件名
337 | cleanbyNameSuffix(currentfile.finalfolder, currentfile.name, ext_type)
338 | oldname = os.path.splitext(currentfile.realname)[0]
339 | moveSubs(currentfile.realfolder, currentfile.finalfolder, oldname, currentfile.name)
340 |
341 | if os.path.exists(currentrecord.destpath) and newpath != currentrecord.destpath:
342 | # 清理之前转移的文件
343 | transrecordService.deleteRecordFiles(currentrecord, False)
344 |
345 | if newpath in dest_list:
346 | dest_list.remove(newpath)
347 |
348 | current_app.logger.info("[-] transfered [{}]".format(newpath))
349 | transrecordService.updateRecord(currentrecord, link_path, newpath, currentrecord.status,
350 | currentfile.topfolder, currentfile.secondfolder,
351 | currentfile.isepisode, currentfile.season, currentfile.epnum)
352 | # need rest 100ms
353 | time.sleep(0.1)
354 |
355 | if clean_others_tag:
356 | for torm in dest_list:
357 | current_app.logger.info("[!] remove other file: [{}]".format(torm))
358 | os.remove(torm)
359 | cleanExtraMedia(dest_folder)
360 | cleanFolderWithoutSuffix(dest_folder, video_type)
361 |
362 | current_app.logger.info("transfer finished")
363 | except Exception as e:
364 | current_app.logger.error(e)
365 |
366 | taskService.updateTaskStatus(task, 1)
367 |
368 | return True
369 |
370 |
371 | def naming(currentfile: FileInfo, movie_list: list, simplify_tag, fixseries_tag):
372 | # 处理 midfolder 内特殊内容
373 | # CMCT组视频文件命名比文件夹命名更好
374 | if 'CMCT' in currentfile.topfolder and not currentfile.locked:
375 | matches = [x for x in movie_list if x.topfolder == currentfile.topfolder]
376 | # 检测是否有剧集标记
377 | epfiles = [x for x in matches if x.isepisode]
378 | if len(matches) > 0 and len(epfiles) == 0:
379 | namingfiles = [x for x in matches if 'CMCT' in x.name]
380 | if len(namingfiles) == 1:
381 | # 非剧集
382 | for m in matches:
383 | m.topfolder = namingfiles[0].name
384 | current_app.logger.debug("[-] handling cmct midfolder [{}] ".format(currentfile.midfolder))
385 | # topfolder
386 | if simplify_tag and not currentfile.locked:
387 | minlen = 20
388 | tempmid = currentfile.topfolder
389 | tempmid = replaceCJK(tempmid)
390 | tempmid = replaceRegex(tempmid, '^s(\d{2})-s(\d{2})')
391 | # TODO 可增加过滤词
392 | grouptags = ['cmct', 'wiki', 'frds', '1080p', 'x264', 'x265']
393 | for gt in grouptags:
394 | if gt in tempmid.lower():
395 | minlen += len(gt)
396 | if len(tempmid) > minlen:
397 | current_app.logger.debug("[-] replace CJK [{}] ".format(tempmid))
398 | currentfile.topfolder = tempmid
399 | # 修正剧集命名
400 | if fixseries_tag:
401 | if currentfile.isepisode:
402 | current_app.logger.debug("[-] fix series name")
403 | # 检测是否有修正记录
404 | if isinstance(currentfile.season, int) and isinstance(currentfile.epnum, int) \
405 | and currentfile.season > -1 and currentfile.epnum > -1:
406 | current_app.logger.debug("[-] directly use record")
407 | if currentfile.season == 0:
408 | currentfile.secondfolder = "Specials"
409 | else:
410 | currentfile.secondfolder = "Season " + str(currentfile.season)
411 | try:
412 | currentfile.fixEpName(currentfile.season)
413 | except:
414 | currentfile.name = "S%02dE%02d" % (currentfile.season, currentfile.epnum)
415 | else:
416 | if isinstance(currentfile.season, int) and currentfile.season > -1:
417 | seasonnum = currentfile.season
418 | else:
419 | # 检测视频上级目录是否有 season 标记
420 | dirfolder = currentfile.folders[len(currentfile.folders)-1]
421 | # 根据 season 标记 更新 secondfolder
422 | seasonnum = matchSeason(dirfolder)
423 | if seasonnum:
424 | currentfile.season = seasonnum
425 | currentfile.secondfolder = "Season " + str(seasonnum)
426 | currentfile.fixEpName(seasonnum)
427 | else:
428 | # 如果检测不到 seasonnum 可能是多季?默认第一季
429 | if currentfile.secondfolder == '':
430 | currentfile.season = 1
431 | currentfile.secondfolder = "Season " + str(1)
432 | currentfile.fixEpName(1)
433 | # TODO 更多关于花絮的规则
434 | else:
435 | try:
436 | dirfolder = currentfile.folders[len(currentfile.folders)-1]
437 | if '花絮' in dirfolder and currentfile.topfolder != '.':
438 | currentfile.secondfolder = "Specials"
439 | currentfile.season = 0
440 | currentfile.fixEpName(0)
441 | except Exception as ex:
442 | current_app.logger.error(ex)
443 |
--------------------------------------------------------------------------------
/src/config.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os, platform
3 |
4 |
5 | class Config:
6 | DEBUG = False
7 | SECRET_KEY = 'secret!'
8 | SCHEDULER_API_ENABLED = True
9 | LOGGING_FORMAT = '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'
10 | LOGGING_LOCATION = 'data/web.log'
11 | LOGGING_LEVEL = logging.INFO
12 | VERSION = '2.6.1'
13 | BASE_DATABASE_URI = '../data/data.db'
14 | SQLALCHEMY_DATABASE_URI = f"sqlite:///{BASE_DATABASE_URI}" if platform.system() != "Darwin" else f"sqlite:///{os.path.abspath(BASE_DATABASE_URI.removeprefix('../'))}"
15 | SQLALCHEMY_TRACK_MODIFICATIONS = False
16 | SOCK_SERVER_OPTIONS = {'ping_interval': 10}
17 |
--------------------------------------------------------------------------------
/src/controller/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | import traceback
3 | from flask import Blueprint, json, jsonify
4 |
5 |
6 | web = Blueprint('web', __name__)
7 |
8 |
9 | def init(app):
10 | from . import main_ctrl
11 | from . import viewsctrl
12 | from . import optionctrl
13 | from . import filescan_ctrl
14 | from . import scrapingctrl
15 | from . import transferctrl
16 | from . import automationctrl
17 |
18 | app.register_blueprint(web)
19 |
20 | from werkzeug.exceptions import HTTPException
21 |
22 | @app.errorhandler(HTTPException)
23 | def handle_httpexception(e):
24 | """Return JSON instead of HTML for HTTP errors."""
25 | response = e.get_response()
26 | response.data = json.dumps({
27 | "code": e.code,
28 | "name": e.name,
29 | "description": e.description,
30 | })
31 | response.content_type = "application/json"
32 | return response
33 |
34 | @app.errorhandler(Exception)
35 | def handle_exception(e):
36 | if isinstance(e, HTTPException):
37 | return e
38 | ret = {
39 | "code": 500,
40 | "name": "Internal Server Error",
41 | "description": "Internal Server Error, Please check the logs for details",
42 | }
43 | strs = traceback.format_exc()
44 | app.logger.error(strs)
45 | return jsonify(ret), 500
46 |
--------------------------------------------------------------------------------
/src/controller/automationctrl.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | '''
3 | '''
4 | import json
5 |
6 | from flask import request, Response
7 | from flask import current_app
8 | from . import web
9 | from ..bizlogic import automation
10 | from ..service.configservice import autoConfigService
11 | from ..service.taskservice import autoTaskService
12 |
13 |
14 | @web.route("/api/client", methods=['POST'])
15 | def clientAutoTask():
16 | """ for client
17 | """
18 | content = request.get_json()
19 | if content.get('path'):
20 | client_path = content.get('path')
21 | automation.start(client_path)
22 | return Response(status=200)
23 |
24 |
25 | @web.route("/api/auto/conf", methods=['GET'])
26 | def getAutoConf():
27 | content = autoConfigService.getConfig().serialize()
28 | return json.dumps(content)
29 |
30 |
31 | @web.route("/api/auto/conf", methods=['PUT'])
32 | def updateAutoConf():
33 | content = request.get_json()
34 | autoConfigService.updateConfig(content)
35 | return Response(status=200)
36 |
37 |
38 | @web.route("/api/auto/task", methods=['GET'])
39 | def getAll():
40 | tasks = autoTaskService.getTasks()
41 | all = []
42 | for conf in tasks:
43 | all.append(conf.serialize())
44 | return json.dumps(all)
45 |
46 |
47 | @web.route("/api/auto/task", methods=['DELETE'])
48 | def clientCleanTaskQueue():
49 | """ clean client task
50 | """
51 | automation.clean()
52 | return Response(status=200)
53 |
--------------------------------------------------------------------------------
/src/controller/filescan_ctrl.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import os
4 | import json
5 | from pathlib import Path
6 | from flask import url_for, request
7 | from . import web
8 | from flask import current_app
9 |
10 |
11 | @web.route('/api/scan/', methods=['POST'])
12 | def direcotry():
13 | try:
14 | content = request.get_json()
15 | if content.get('path'):
16 | media_dir = content.get('path')
17 | else:
18 | media_dir = '/'
19 | current_app.logger.debug(media_dir)
20 | parentdir = os.path.dirname(media_dir)
21 | ret = dict()
22 | ret['parent'] = parentdir
23 | dir_ele_list = list()
24 | for f in (Path('/') / Path(media_dir)).iterdir():
25 | fullname = str(f).replace('\\', '/')
26 | if f.is_dir():
27 | fullname = str(f).replace('\\', '/')
28 | dir_ele_list.append({'is_dir': 1, 'filesize': 0,
29 | 'url': url_for('web.direcotry', media_dir=fullname[0:]),
30 | 'fullname': fullname})
31 | ret['dirs'] = dir_ele_list
32 | return json.dumps(ret)
33 | except PermissionError:
34 | ret = {'error': '拒绝访问'}
35 | return json.dumps(ret)
36 | except Exception as e:
37 | current_app.logger.error(e)
38 | ret = {'error': str(e)}
39 | return json.dumps(ret)
40 |
--------------------------------------------------------------------------------
/src/controller/main_ctrl.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import json
4 | import os
5 | from flask import request, Response, current_app
6 |
7 | from . import web
8 | from ..bizlogic import rename
9 | from ..service.taskservice import taskService
10 | from flask import current_app
11 |
12 |
13 | @web.route("/api/intro", methods=['GET'])
14 | def intro():
15 | localPath = os.path.dirname(os.path.abspath(__file__))
16 | with open(os.path.join(localPath, "..", "..", "docs", 'intro.md'), encoding='utf-8') as f:
17 | content = f.read()
18 | return content
19 |
20 |
21 | @web.route("/api/version", methods=['GET'])
22 | def version():
23 | core_num = current_app.config['VERSION']
24 | version_info = "core_" + core_num
25 | localPath = os.path.dirname(os.path.abspath(__file__))
26 | webpath = os.path.join(localPath, "..", "..", "web", "static", 'version.txt')
27 | if os.path.exists(webpath):
28 | with open(webpath, encoding='utf-8') as f:
29 | web_sha = f.read()
30 | version_info = "web_" + web_sha[:7] + " " + version_info
31 | return version_info
32 |
33 | # action
34 |
35 |
36 | @web.route("/api/stopall", methods=['GET'])
37 | def resetAllTaskStatus():
38 | taskService.updateTaskStatus(taskService.getTask('transfer'), 0)
39 | taskService.updateTaskStatus(taskService.getTask('scrape'), 0)
40 | return Response(status=200)
41 |
42 |
43 | @web.route("/api/previewrename", methods=['POST'])
44 | def previewRename():
45 | content = request.get_json()
46 | ret = rename.renamebyreg(content['source_folder'], content['reg'], content['prefix'], True)
47 | return json.dumps(ret)
48 |
49 |
50 | @web.route("/api/renamebyreg", methods=['POST'])
51 | def renamebyRegex():
52 | content = request.get_json()
53 | ret = rename.renamebyreg(content['source_folder'], content['reg'], content['prefix'], False)
54 | return json.dumps(ret)
55 |
56 |
57 | @web.route("/api/renamebyrep", methods=['POST'])
58 | def renamebyReplace():
59 | content = request.get_json()
60 | ret = rename.rename(content['source_folder'], content['base'], content['newfix'])
61 | return json.dumps(ret)
62 |
--------------------------------------------------------------------------------
/src/controller/optionctrl.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import os
4 | import json
5 | import logging
6 | import time
7 | import subprocess
8 | from flask import request, Response, current_app
9 |
10 | from . import web
11 | from ..service.configservice import localConfService
12 | from ..bizlogic.schedulertask import cleanRecordsTask
13 | from ..utils.regex import regexMatch
14 |
15 |
16 | @web.route("/api/options/loglevel", methods=['GET', 'PUT'])
17 | def loglevel():
18 | """
19 | CRITICAL = 50
20 | FATAL = CRITICAL
21 | ERROR = 40
22 | WARNING = 30
23 | WARN = WARNING
24 | INFO = 20
25 | DEBUG = 10
26 | NOTSET = 0
27 | """
28 | if request.method == 'GET':
29 | level = current_app.logger.level
30 | ret = {'loglevel': level}
31 | return json.dumps(ret)
32 | if request.method == 'PUT':
33 | content = request.get_json()
34 | if content and 'loglevel' in content:
35 | level = int(content.get('loglevel'))
36 | localConfService.updateLoglvl(level)
37 | current_app.logger.setLevel(level)
38 | else:
39 | localConfService.updateLoglvl(logging.INFO)
40 | current_app.logger.setLevel(logging.INFO)
41 | return Response(status=200)
42 |
43 |
44 | @web.route("/api/options/cleanrecord", methods=['GET'])
45 | def cleanErrData():
46 | """ clean record file not exist
47 | """
48 | cleanRecordsTask(True)
49 | return Response(status=200)
50 |
51 |
52 | @web.route("/api/options/config", methods=["GET"])
53 | def getLocalConfig():
54 | """returns config"""
55 | content = localConfService.getConfig().serialize()
56 | return json.dumps(content)
57 |
58 |
59 | @web.route("/api/options/config", methods=['PUT'])
60 | def updateLocalConf():
61 | content = request.get_json()
62 | localConfService.updateConfig(content)
63 | return Response(status=200)
64 |
65 |
66 | @web.route("/api/options/check/lib", methods=['GET'])
67 | def checklibversion():
68 | """ Beta 检测scrapinglib版本
69 | """
70 | info = subprocess.run("python -m pip index -vv versions scrapinglib", shell=True, stdout=subprocess.PIPE, encoding="utf8")
71 | out = info.stdout
72 | current_app.logger.debug(out)
73 | install_matches = regexMatch(out, "INSTALLED:\ ([\d.]+)")
74 | latest_matches = regexMatch(out, "LATEST:[\ ]+([\d.]+)")
75 | installed = install_matches[0] if len(install_matches) == 1 else "Unknown"
76 | latest = latest_matches[0] if len(latest_matches) == 1 else "Unknown"
77 | ret = {
78 | "name": "scrapinglib",
79 | "installed": installed,
80 | "latest": latest
81 | }
82 | return json.dumps(ret)
83 |
84 |
85 | @web.route("/api/options/check/lib", methods=['PUT'])
86 | def updatelibversion():
87 | """ Beta 更新scrapinglib
88 | """
89 | info = subprocess.run("python -m pip install scrapinglib -U", shell=True, stdout=subprocess.PIPE, encoding="utf8")
90 | current_app.logger.debug(info.stdout)
91 | return Response(status=200)
92 |
--------------------------------------------------------------------------------
/src/controller/scrapingctrl.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 |
4 | import json
5 | from flask import request, Response, current_app
6 |
7 | from . import web
8 | from ..bizlogic.manager import startScrapingAll, startScrapingSingle
9 | from ..bizlogic.schedulertask import cleanTorrents
10 | from ..service.configservice import localConfService, scrapingConfService
11 | from ..service.recordservice import scrapingrecordService
12 | from ..service.taskservice import taskService
13 |
14 |
15 | @web.route("/api/scraping", methods=['POST'])
16 | def startScraping():
17 | content = request.get_json()
18 | if content and content.get('id'):
19 | cid = content.get('id')
20 | startScrapingAll(cid)
21 | return Response(status=200)
22 |
23 |
24 | @web.route("/api/scraping/single", methods=['POST'])
25 | def startScrapingdirect():
26 | content = request.get_json()
27 | if content and content.get('srcpath'):
28 | filepath = content.get('srcpath')
29 | cid = content.get('configid')
30 | startScrapingSingle(cid, filepath, True)
31 | return Response(status=200)
32 |
33 |
34 | @web.route("/api/scraping/conf/", methods=['GET'])
35 | def getScrapingConf(sid):
36 | if sid == 'all':
37 | configs = scrapingConfService.getConfiglist()
38 | all = []
39 | for conf in configs:
40 | all.append(conf.serialize())
41 | return json.dumps(all)
42 | else:
43 | content = scrapingConfService.getConfig(sid).serialize()
44 | return json.dumps(content)
45 |
46 |
47 | @web.route("/api/scraping/conf", methods=['POST'])
48 | def addConf():
49 | """ 新增配置
50 | """
51 | content = request.get_json()
52 | content['id'] = None
53 | config = scrapingConfService.updateConfig(content)
54 | return json.dumps(config.serialize())
55 |
56 |
57 | @web.route("/api/scraping/conf", methods=['PUT'])
58 | def updateScapingConf():
59 | content = request.get_json()
60 | scrapingConfService.updateConfig(content)
61 | return Response(status=200)
62 |
63 |
64 | @web.route("/api/scraping/conf/", methods=['DELETE'])
65 | def deleteScrapingConf(cid):
66 | """ 删除配置
67 | """
68 | iid = int(cid)
69 | scrapingConfService.deleteConf(iid)
70 | return Response(status=200)
71 |
72 |
73 | @web.route("/api/scraping/record", methods=['DELETE'])
74 | def deleteScrapingRecordIds():
75 | content = request.get_json()
76 | delsrc = content.get('delsrc')
77 | ids = content.get('ids')
78 | delrecords = scrapingrecordService.deleteByIds(ids, delsrc)
79 | if delsrc:
80 | localconfig = localConfService.getConfig()
81 | cleanTorrents(delrecords, localconfig)
82 | return Response(status=200)
83 |
84 |
85 | @web.route("/api/scraping/record", methods=['GET'])
86 | def getScrapingRecord():
87 | """ 查询
88 | """
89 | page = int(request.args.get('page'))
90 | size = int(request.args.get('size'))
91 | status = request.args.get('status')
92 | # 排序 cnsubtag|status|updatetime,descending|ascending
93 | sortprop = request.args.get('sortprop')
94 | sortorder = request.args.get('sortorder')
95 | # 模糊查询
96 | blur = request.args.get('blur')
97 | if not blur:
98 | blur = ''
99 | if not sortprop:
100 | sortprop = ''
101 | sortorder = 'desc'
102 |
103 | infos = scrapingrecordService.queryByPage(page, size, status, sortprop, sortorder, blur)
104 | data = []
105 | for i in infos.items:
106 | data.append(i.serialize())
107 | ret = dict()
108 | ret['data'] = data
109 | ret['total'] = infos.total
110 | ret['pages'] = infos.pages
111 | ret['page'] = page
112 | taskinfo = taskService.getTask('scrape')
113 | if taskinfo.status == 2:
114 | ret['running'] = True
115 | ret['tasktotal'] = taskinfo.total
116 | ret['taskfinished'] = taskinfo.finished
117 | else:
118 | ret['running'] = False
119 | return json.dumps(ret)
120 |
121 |
122 | @web.route("/api/scraping/record", methods=['PUT'])
123 | def editScrapingData():
124 | content = request.get_json()
125 | scrapingrecordService.editRecord(content['id'],
126 | content['status'],
127 | content['scrapingname'],
128 | content['ignored'],
129 | content['locked'],
130 | content['specifiedsource'],
131 | content['specifiedurl'],
132 | content['cnsubtag'],
133 | content['leaktag'],
134 | content['uncensoredtag'],
135 | content['hacktag'],
136 | content['cdnum'],
137 | content['deadtime'])
138 | return Response(status=200)
139 |
--------------------------------------------------------------------------------
/src/controller/transferctrl.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 |
4 | import json
5 | from flask import request, Response, current_app
6 |
7 | from . import web
8 | from ..bizlogic.transfer import ctrlTransfer
9 | from ..bizlogic.schedulertask import cleanTorrents
10 | from ..service.configservice import localConfService, transConfigService
11 | from ..service.recordservice import transrecordService
12 | from ..service.taskservice import taskService
13 |
14 |
15 | @web.route("/api/transfer", methods=['POST'])
16 | def startTransfer():
17 | content = request.get_json()
18 | ctrlTransfer(
19 | content["source_folder"],
20 | content["output_folder"],
21 | content["linktype"],
22 | content["soft_prefix"],
23 | content["escape_folder"],
24 | content.get("specified_files"),
25 | content["fix_series"],
26 | content["clean_others"],
27 | content["replace_CJK"],
28 | content.get("refresh_url"),
29 | content["is_sym_relative_path"],
30 | )
31 | return Response(status=200)
32 |
33 |
34 | @web.route("/api/transfer/conf/all", methods=['GET'])
35 | def getTransConfs():
36 | """ 查询转移配置
37 | """
38 | configs = transConfigService.getConfiglist()
39 | all = []
40 | for conf in configs:
41 | all.append(conf.serialize())
42 | return json.dumps(all)
43 |
44 |
45 | @web.route("/api/transfer/conf", methods=['POST'])
46 | def addTransConf():
47 | """ 新增转移配置
48 | """
49 | content = request.get_json()
50 | content['id'] = None
51 | config = transConfigService.updateConf(content)
52 | return json.dumps(config.serialize())
53 |
54 |
55 | @web.route("/api/transfer/conf", methods=['PUT'])
56 | def updateTransConf():
57 | """ 更新转移配置
58 | """
59 | content = request.get_json()
60 | config = transConfigService.updateConf(content)
61 | return json.dumps(config.serialize())
62 |
63 |
64 | @web.route("/api/transfer/conf/", methods=['DELETE'])
65 | def deleteTransConf(cid):
66 | """ 删除转移配置
67 | """
68 | iid = int(cid)
69 | transConfigService.deleteConf(iid)
70 | return Response(status=200)
71 |
72 | # record
73 |
74 |
75 | @web.route("/api/transfer/record", methods=['GET'])
76 | def getTransRecord():
77 | """ 查询
78 | """
79 | pagenum = int(request.args.get('page'))
80 | size = int(request.args.get('size'))
81 | status = request.args.get('status')
82 | # 排序 cnsubtag|status|updatetime,descending|ascending
83 | sortprop = request.args.get('sortprop')
84 | sortorder = request.args.get('sortorder')
85 | # 模糊查询
86 | blur = request.args.get('blur')
87 | if not blur:
88 | blur = ''
89 | if not sortprop:
90 | sortprop = ''
91 | sortorder = 'desc'
92 |
93 | infos = transrecordService.queryByPage(pagenum, size, status, sortprop, sortorder, blur)
94 | data = []
95 | for i in infos.items:
96 | data.append(i.serialize())
97 | ret = dict()
98 | ret['data'] = data
99 | ret['total'] = infos.total
100 | ret['pages'] = infos.pages
101 | ret['page'] = pagenum
102 | taskinfo = taskService.getTask('transfer')
103 | if taskinfo.status == 2:
104 | ret['running'] = True
105 | ret['tasktotal'] = taskinfo.total
106 | ret['taskfinished'] = taskinfo.finished
107 | else:
108 | ret['running'] = False
109 | return json.dumps(ret)
110 |
111 |
112 | @web.route("/api/transfer/record", methods=['PUT'])
113 | def editTransferRecord():
114 | content = request.get_json()
115 | info = transrecordService.queryByPath(content.get('srcpath'))
116 | transrecordService.editRecord(info, content.get('linkpath'), content.get('destpath'),
117 | content.get('status'), content.get('ignored'), content.get('locked'),
118 | content.get('topfolder'), content.get('secondfolder'), content.get('forcedname'),
119 | content.get('isepisode'), content.get('season'), content.get('episode'),
120 | content.get('renameAllTop'), content.get('renameAllSub'), content.get('deadtime'))
121 | return Response(status=200)
122 |
123 |
124 | @web.route("/api/transfer/record", methods=['DELETE'])
125 | def deleteTransferRecordIds():
126 | content = request.get_json()
127 | ids = content.get('ids')
128 | delsrc = content.get('delsrc')
129 | delrecords = transrecordService.deleteByIds(ids, delsrc)
130 | if delsrc:
131 | localconfig = localConfService.getConfig()
132 | cleanTorrents(delrecords, localconfig)
133 | return Response(status=200)
134 |
--------------------------------------------------------------------------------
/src/controller/viewsctrl.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 | from flask import render_template, Response
4 | from . import web
5 |
6 |
7 | @web.route("/")
8 | def index():
9 | return render_template('index.html')
10 |
11 |
12 | @web.route("/imgs/")
13 | def imgs(imageid):
14 | localPath = os.path.dirname(os.path.abspath(__file__))
15 | with open(os.path.join(localPath, "..", "..", "docs", "imgs", imageid), 'rb') as f:
16 | image = f.read()
17 | resp = Response(image, mimetype="image/jpeg")
18 | return resp
19 |
--------------------------------------------------------------------------------
/src/downloader/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | # TODO
4 | # 关联 qbit tr
5 | # 删除文件的时候,需要同步删除种子文件
6 |
--------------------------------------------------------------------------------
/src/downloader/qbittorrent.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suwmlee/ikaros/e29997560bedf8ae70900ea4c1d4d66a09759883/src/downloader/qbittorrent.py
--------------------------------------------------------------------------------
/src/downloader/transmission.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 | import transmission_rpc
4 |
5 | class Transmission():
6 |
7 | trsession = None
8 |
9 | protocol = 'http'
10 | host = None
11 | port = None
12 | username = None
13 | password = None
14 |
15 | fields = ["id", "name", "status", "downloadDir", "error", "errorString"]
16 |
17 | def __init__(self, url: str, username, password):
18 | pis = url.split(':')
19 | pp = pis[len(pis)-1]
20 | if pp.strip('/').isdigit():
21 | self.port = pp.strip('/')
22 | pis.remove(pp)
23 | if url.startswith('http'):
24 | self.protocol = pis[0]
25 | pis.remove(pis[0])
26 | self.host = ''.join(pis).strip('/')
27 | if not self.port and self.protocol == 'https':
28 | self.port = 443
29 | self.username = username
30 | self.password = password
31 |
32 |
33 | def login(self):
34 | try:
35 | self.trsession = transmission_rpc.Client(host=self.host,
36 | port=self.port,
37 | protocol=self.protocol,
38 | username=self.username,
39 | password=self.password,
40 | timeout=10)
41 | return self.trsession
42 | except Exception as ex:
43 | print(ex)
44 | return None
45 |
46 | def getTorrents(self, ids=None, fields=None):
47 | """ 按条件读取种子信息
48 | :param ids: ID列表,为空则读取所有
49 | :param fields: 标签
50 | """
51 | if not self.trsession:
52 | return []
53 | if isinstance(ids, list):
54 | ids = [int(x) for x in ids]
55 | elif ids:
56 | ids = int(ids)
57 | torrents = self.trsession.get_torrents(ids=ids, arguments=fields)
58 | return torrents
59 |
60 | def searchByName(self, name):
61 | torrents = self.getTorrents(fields=self.fields)
62 | results = []
63 | for i in torrents:
64 | if i.name == name:
65 | results.append(i)
66 | return results
67 |
68 | def searchByPath(self, path):
69 | retry = 3
70 | for i in range(retry):
71 | name = os.path.basename(path)
72 | tt = self.searchByName(name)
73 | if len(tt):
74 | return tt
75 | else:
76 | path = os.path.dirname(path)
77 | return []
78 |
79 | def getTorrentFiles(self, id):
80 | if not self.trsession:
81 | return None
82 | torrent = self.trsession.get_torrent(id)
83 | if torrent:
84 | return torrent.files()
85 | else:
86 | return None
87 |
88 | def removeTorrent(self, id, delete=False):
89 | if not self.trsession:
90 | return None
91 | self.trsession.remove_torrent([id], delete_data=delete)
92 |
--------------------------------------------------------------------------------
/src/images/CNSUB.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suwmlee/ikaros/e29997560bedf8ae70900ea4c1d4d66a09759883/src/images/CNSUB.png
--------------------------------------------------------------------------------
/src/images/HACK.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suwmlee/ikaros/e29997560bedf8ae70900ea4c1d4d66a09759883/src/images/HACK.png
--------------------------------------------------------------------------------
/src/images/LEAK.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suwmlee/ikaros/e29997560bedf8ae70900ea4c1d4d66a09759883/src/images/LEAK.png
--------------------------------------------------------------------------------
/src/images/UNCENSORED.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Suwmlee/ikaros/e29997560bedf8ae70900ea4c1d4d66a09759883/src/images/UNCENSORED.png
--------------------------------------------------------------------------------
/src/model/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | def load_models():
3 | from . import config
4 | from . import record
5 | from . import task
6 |
--------------------------------------------------------------------------------
/src/model/config.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from sqlalchemy import Column, Integer, String, Boolean
4 | from .. import db
5 |
6 |
7 | class _ScrapingConfigs(db.Model):
8 | """
9 | main_mode:
10 | :1 移动刮削
11 | link_type:
12 | :0 移动文件
13 | :1 软链接
14 | :2 硬链接
15 | :2 整理
16 | :3 直接刮削
17 | """
18 | __tablename__ = 'scrapingconfigs'
19 |
20 | id = Column(Integer, primary_key=True)
21 | main_mode = Column(Integer, default=1)
22 | auto_watch = Column(Boolean, default=False)
23 |
24 | scraping_folder = Column(String, default='/media')
25 | failed_folder = Column(String, default='/media/failed')
26 | success_folder = Column(String, default='/media/output')
27 | link_type = Column(Integer, default=1)
28 | soft_prefix = Column(String, default='/media')
29 | is_sym_relative_path = Column(Boolean, default=False)
30 | failed_move = Column(Boolean, default=False)
31 | threads_num = Column(Integer, default=5)
32 |
33 | site_sources = Column(String, default="")
34 | location_rule = Column(String, default="actor+'/'+number+' '+title")
35 | naming_rule = Column(String, default="number+' '+title")
36 | max_title_len = Column(Integer, default=50)
37 | update_check = Column(Boolean, default=False)
38 | morestoryline = Column(Boolean, default=True)
39 |
40 | extrafanart_enable = Column(Boolean, default=False)
41 | extrafanart_folder = Column(String, default='extrafanart', server_default='extrafanart')
42 | watermark_enable = Column(Boolean, default=True, comment='enable water mark')
43 | watermark_size = Column(Integer, default=9)
44 | watermark_location = Column(Integer, default=2)
45 |
46 | escape_folders = Column(String, default="failed,output")
47 | escape_literals = Column(String, default="\()/")
48 | escape_size = Column(Integer, default=0)
49 |
50 | transalte_enable = Column(Boolean, default=False)
51 | transalte_to_sc = Column(Boolean, default=False)
52 | transalte_values = Column(String, default="title,outline")
53 |
54 | cookies_javdb = Column(String, default="")
55 | cookies_javlib = Column(String, default="")
56 | refresh_url = Column(String, default='')
57 | remark = Column(String, default='备注')
58 |
59 | def serialize(self):
60 | return {
61 | 'id': self.id,
62 | 'main_mode': self.main_mode,
63 | 'auto_watch': self.auto_watch,
64 | 'link_type': self.link_type,
65 | 'soft_prefix': self.soft_prefix,
66 | 'scraping_folder': self.scraping_folder,
67 | 'success_folder': self.success_folder,
68 | 'failed_folder': self.failed_folder,
69 | 'threads_num': self.threads_num,
70 | 'location_rule': self.location_rule,
71 | 'naming_rule': self.naming_rule,
72 | 'site_sources': self.site_sources,
73 | 'morestoryline': self.morestoryline,
74 | 'extrafanart_enable': self.extrafanart_enable,
75 | 'extrafanart_folder': self.extrafanart_folder,
76 | 'watermark_enable': self.watermark_enable,
77 | 'watermark_location': self.watermark_location,
78 | 'watermark_size': self.watermark_size,
79 | 'escape_folders': self.escape_folders,
80 | 'escape_size': self.escape_size,
81 | 'cookies_javdb': self.cookies_javdb,
82 | 'cookies_javlib': self.cookies_javlib,
83 | 'refresh_url': self.refresh_url,
84 | 'remark': self.remark,
85 | 'is_sym_relative_path': self.is_sym_relative_path
86 | }
87 |
88 |
89 | class _TransferConfigs(db.Model):
90 | __tablename__ = 'transferconfigs'
91 |
92 | id = Column(Integer, primary_key=True)
93 | auto_watch = Column(Boolean, default=False)
94 | source_folder = Column(String, default='/media')
95 | soft_prefix = Column(String, default='/volume1/Media')
96 | linktype = Column(Integer, default=0)
97 | output_folder = Column(String, default='/media/output')
98 | escape_folder = Column(String, default='Sample,sample')
99 | is_sym_relative_path = Column(Boolean, default=False)
100 | escape_size = Column(Integer, default=0)
101 | clean_others = Column(Boolean, default=False)
102 | replace_CJK = Column(Boolean, default=False)
103 | fix_series = Column(Boolean, default=False)
104 | refresh_url = Column(String, default='')
105 | remark = Column(String, default='备注')
106 |
107 | def serialize(self):
108 | return {
109 | 'id': self.id,
110 | 'auto_watch': self.auto_watch,
111 | 'source_folder': self.source_folder,
112 | 'linktype': self.linktype,
113 | 'soft_prefix': self.soft_prefix,
114 | 'output_folder': self.output_folder,
115 | 'escape_folder': self.escape_folder,
116 | 'escape_size': self.escape_size,
117 | 'clean_others': self.clean_others,
118 | 'replace_CJK': self.replace_CJK,
119 | 'fix_series': self.fix_series,
120 | 'refresh_url': self.refresh_url,
121 | 'remark': self.remark,
122 | "is_sym_relative_path": self.is_sym_relative_path,
123 | }
124 |
125 |
126 | class _AutoConfigs(db.Model):
127 | __tablename__ = 'autoconfigs'
128 |
129 | id = Column(Integer, primary_key=True)
130 | original = Column(String, default="", comment="需要替换的前缀")
131 | prefixed = Column(String, default="", comment="前缀")
132 | scrapingfolders = Column(String, default="", comment="以;间隔")
133 | transferfolders = Column(String, default="", comment="以;间隔")
134 | scrapingconfs = Column(String, default="", comment="以;间隔")
135 | transferconfs = Column(String, default="", comment="以;间隔")
136 | remark = Column(String, default='备注')
137 |
138 | def serialize(self):
139 | return {
140 | 'id': self.id,
141 | 'original': self.original,
142 | 'prefixed': self.prefixed,
143 | 'scrapingfolders': self.scrapingfolders,
144 | 'transferfolders': self.transferfolders,
145 | 'scrapingconfs': self.scrapingconfs,
146 | 'transferconfs': self.transferconfs,
147 | 'remark': self.remark
148 | }
149 |
150 |
151 | class _LocalConfigs(db.Model):
152 | __tablename__ = 'localconfigs'
153 |
154 | id = Column(Integer, primary_key=True)
155 | tg_token = Column(String, default="")
156 | tg_chatid = Column(String, default="")
157 | wechat_corpid = Column(String, default="")
158 | wechat_corpsecret = Column(String, default="")
159 | wechat_agentid = Column(String, default="")
160 |
161 | proxy_enable = Column(Boolean, default=False)
162 | proxy_type = Column(String, default='socks5h')
163 | proxy_address = Column(String, default='127.0.0.1:1080')
164 |
165 | task_clean = Column(Boolean, default=False)
166 |
167 | tr_url = Column(String, default="")
168 | tr_username = Column(String, default="")
169 | tr_passwd = Column(String, default="")
170 | tr_prefix = Column(String, default="")
171 |
172 | loglevel = Column(Integer, default=20)
173 |
174 | def serialize(self):
175 | return {
176 | 'id': self.id,
177 | 'tg_token': self.tg_token,
178 | 'tg_chatid': self.tg_chatid,
179 | 'wechat_corpid': self.wechat_corpid,
180 | 'wechat_corpsecret': self.wechat_corpsecret,
181 | 'wechat_agentid': self.wechat_agentid,
182 | 'proxy_enable': self.proxy_enable,
183 | 'proxy_type': self.proxy_type,
184 | 'proxy_address': self.proxy_address,
185 | 'task_clean': self.task_clean,
186 | 'tr_url': self.tr_url,
187 | 'tr_username': self.tr_username,
188 | 'tr_passwd': self.tr_passwd,
189 | 'tr_prefix': self.tr_prefix,
190 | }
191 |
--------------------------------------------------------------------------------
/src/model/record.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | 刮削/转移记录
4 | 关键参数:
5 | status:
6 | 0 等待
7 | 1 成功
8 | 2 失败
9 | 3 跳过
10 | 4 进行中
11 | ignored: 忽略
12 | locked: 锁定, 不再进行重命名等
13 | deleted: 实际内容已经删除
14 | """
15 | import datetime
16 | from sqlalchemy import Column, Integer, String, Boolean, DateTime, BigInteger
17 | from .. import db
18 |
19 |
20 | class _ScrapingRecords(db.Model):
21 | """ 刮削记录
22 | """
23 | __tablename__ = 'scrapingrecords'
24 |
25 | id = Column(Integer, primary_key=True)
26 | srcname = Column(String, default='')
27 | srcpath = Column(String, default='')
28 | srcsize = Column(BigInteger, default=0)
29 | status = Column(Integer, default=0)
30 | ignored = Column(Boolean, default=False)
31 | locked = Column(Boolean, default=False)
32 | deleted = Column(Boolean, default=False)
33 |
34 | scrapingname = Column(String, default='', comment='used name for scraping')
35 | cdnum = Column(Integer, default=0, comment='cd num')
36 | cnsubtag = Column(Boolean, default=False, comment='cn tag')
37 | leaktag = Column(Boolean, default=False, comment='leak tag')
38 | uncensoredtag = Column(Boolean, default=False, comment='uncensored tag')
39 | hacktag = Column(Boolean, default=False, comment='hack tag')
40 |
41 | # 指定刮削的网站与地址
42 | # 手动设置
43 | specifiedsource = Column(String, default='', comment='specified scraping site')
44 | specifiedurl = Column(String, default='', comment='specified scraping site url')
45 |
46 | linktype = Column(Integer, comment='ln type')
47 | destname = Column(String, default='', comment='final name')
48 | destpath = Column(String, default='', comment='final path')
49 | updatetime = Column(DateTime, default=datetime.datetime.now)
50 | deadtime = Column(DateTime, default=None, comment='time to delete files')
51 |
52 | def __init__(self, basename, basepath):
53 | self.srcname = basename
54 | self.srcpath = basepath
55 |
56 | def serialize(self):
57 | return {
58 | 'id': self.id,
59 | 'srcname': self.srcname,
60 | 'srcpath': self.srcpath,
61 | 'srcsize': self.srcsize,
62 | 'status': self.status,
63 | 'ignored': self.ignored,
64 | 'locked': self.locked,
65 | 'deleted': self.deleted,
66 | 'scrapingname': self.scrapingname,
67 | 'cdnum': self.cdnum,
68 | 'cnsubtag': self.cnsubtag,
69 | 'leaktag': self.leaktag,
70 | 'uncensoredtag': self.uncensoredtag,
71 | 'hacktag': self.hacktag,
72 | 'specifiedsource': self.specifiedsource,
73 | 'specifiedurl': self.specifiedurl,
74 | 'linktype': self.linktype,
75 | 'destname': self.destname,
76 | 'destpath': self.destpath,
77 | 'updatetime': self.updatetime.strftime("%Y/%m/%d %H:%M:%S") if self.updatetime else '',
78 | 'deadtime': self.deadtime.strftime("%Y/%m/%d %H:%M:%S") if self.deadtime else '',
79 | }
80 |
81 |
82 | class _TransRecords(db.Model):
83 | """ 转移记录
84 | """
85 | __tablename__ = 'transrecords'
86 |
87 | id = Column(Integer, primary_key=True)
88 | srcname = Column(String, default='')
89 | srcpath = Column(String, default='')
90 | srcsize = Column(BigInteger, default=0)
91 | srcfolder = Column(String, default='')
92 |
93 | status = Column(Integer, default=0)
94 | ignored = Column(Boolean, default=False)
95 | locked = Column(Boolean, default=False)
96 | deleted = Column(Boolean, default=False)
97 |
98 | forcedname = Column(String, default='', comment='forced name')
99 | topfolder = Column(String, default='')
100 | # 电影类,次级目录;如果是剧集则以season为准
101 | secondfolder = Column(String, default='')
102 | isepisode = Column(Boolean, default=False)
103 | season = Column(Integer, default=-1)
104 | episode = Column(Integer, default=-1)
105 | # 链接使用的地址,可能与docker内地址不同
106 | linkpath = Column(String, default='')
107 | destpath = Column(String, default='')
108 | updatetime = Column(DateTime, default=datetime.datetime.utcnow)
109 | deadtime = Column(DateTime, default=None, comment='time to delete files')
110 |
111 | def __init__(self, basename, basepath):
112 | self.srcname = basename
113 | self.srcpath = basepath
114 |
115 | def serialize(self):
116 | return {
117 | 'id': self.id,
118 | 'srcname': self.srcname,
119 | 'srcpath': self.srcpath,
120 | 'srcsize': self.srcsize,
121 | 'srcfolder': self.srcfolder,
122 | 'status': self.status,
123 | 'ignored': self.ignored,
124 | 'locked': self.locked,
125 | 'deleted': self.deleted,
126 | 'topfolder': self.topfolder,
127 | 'secondfolder': self.secondfolder,
128 | 'isepisode': self.isepisode,
129 | 'season': self.season,
130 | 'episode': self.episode,
131 | 'forcedname': self.forcedname,
132 | 'linkpath': self.linkpath,
133 | 'destpath': self.destpath,
134 | 'updatetime': self.updatetime.strftime("%Y/%m/%d %H:%M:%S") if self.updatetime else '',
135 | 'deadtime': self.deadtime.strftime("%Y/%m/%d %H:%M:%S") if self.deadtime else '',
136 | }
137 |
--------------------------------------------------------------------------------
/src/model/task.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from sqlalchemy import Column, Integer, String, Boolean, DateTime, BigInteger
4 | from .. import db
5 |
6 |
7 | class _Task(db.Model):
8 | """ status
9 | 0: wait
10 | 1: finished
11 | 2: runing
12 | """
13 | __tablename__ = 'task'
14 |
15 | id = Column(Integer, primary_key=True)
16 | name = Column(String, default='task')
17 | cid = Column(Integer, default=0)
18 | status = Column(Integer, default=0)
19 |
20 | total = Column(Integer, default=0)
21 | finished = Column(Integer, default=0)
22 |
23 | def __init__(self, name):
24 | self.name = name
25 |
26 |
27 | class _AutoTask(db.Model):
28 | """ 自动任务,成功的任务自动删除
29 | status 0 未进行
30 | 1 进行中
31 | """
32 | __tablename__ = 'autotask'
33 |
34 | id = Column(Integer, primary_key=True)
35 | path = Column(String, default='', comment="客户端传入路径")
36 | status = Column(Integer, default=0, comment="状态")
37 |
38 | def __init__(self, path):
39 | self.path = path
40 |
41 | def serialize(self):
42 | return {
43 | 'id': self.id,
44 | 'path': self.path,
45 | 'status': self.status,
46 | }
47 |
--------------------------------------------------------------------------------
/src/notifications/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from .telegram import Telegram
4 | from .wechat import WeChat
5 |
6 | class Notification():
7 |
8 | telegram = None
9 | wechat = None
10 |
11 | def __init__(self) -> None:
12 | self.telegram = Telegram()
13 | self.wechat = WeChat()
14 |
15 | def sendtext(self, text:str):
16 | self.telegram.sendtext(text)
17 | self.wechat.sendtext(text)
18 |
19 | def isWeEnabled(self):
20 | return self.wechat.updateConfig()
21 |
22 | def isTgEnabled(self):
23 | return self.telegram.updateConfig()
24 |
25 | def sendWeNews(self, title, description, picurl, url):
26 | self.wechat.sendnews(title, description, picurl, url)
27 |
28 | def sendWeMarkdown(self, text):
29 | self.wechat.sendmarkdown(text)
30 |
31 | def sendTgphoto(self, text:str, picpath):
32 | self.telegram.sendphoto(text, picpath)
33 |
34 | def sendTgMarkdown(self, text):
35 | self.telegram.sendmarkdown(text)
36 |
37 |
38 | notificationService = Notification()
39 |
--------------------------------------------------------------------------------
/src/notifications/telegram.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from scrapinglib import httprequest
4 | from flask import current_app
5 | from ..service.configservice import localConfService
6 |
7 |
8 | class Telegram():
9 |
10 | token = None
11 | chatid = None
12 |
13 | def updateConfig(self):
14 | config = localConfService.getConfig()
15 | if config.tg_chatid and config.tg_token:
16 | self.token = config.tg_token
17 | self.chatid = config.tg_chatid
18 | return True
19 | return False
20 |
21 | def sendtext(self, text: str):
22 | """ 使用telegram bot发送文本消息
23 | """
24 | if self.updateConfig():
25 | url = "https://api.telegram.org/bot{}/sendMessage?chat_id={}&text={}".format(self.token, self.chatid, text)
26 | configProxy = localConfService.getProxyConfig()
27 | proxies = configProxy.proxies() if configProxy.enable else None
28 | try:
29 | httprequest.get(url, proxies=proxies)
30 | except Exception as ex:
31 | current_app.logger.debug(ex)
32 | pass
33 |
34 | def sendmarkdown(self, text: str):
35 | """ 使用telegram bot发送文本消息
36 | """
37 | if self.updateConfig():
38 | params = {'chat_id': self.chatid, 'text': text, 'parse_mode': 'markdown'}
39 | url = "https://api.telegram.org/bot{}/sendMessage".format(self.token)
40 | configProxy = localConfService.getProxyConfig()
41 | proxies = configProxy.proxies() if configProxy.enable else None
42 | try:
43 | httprequest.post(url, params, proxies=proxies)
44 | except Exception as ex:
45 | current_app.logger.debug(ex)
46 | pass
47 |
48 | def sendphoto(self, caption: str, photopath):
49 | """ 使用telegram bot发送文本消息
50 | """
51 | if self.updateConfig():
52 | params = {'chat_id': self.chatid, 'caption': caption, 'parse_mode': 'markdown'}
53 | with open(photopath, 'rb') as pic:
54 | files = {'photo': pic}
55 | url = "https://api.telegram.org/bot{}/sendPhoto".format(self.token)
56 | configProxy = localConfService.getProxyConfig()
57 | proxies = configProxy.proxies() if configProxy.enable else None
58 | try:
59 | httprequest.post(url, params, files=files, proxies=proxies)
60 | except Exception as ex:
61 | current_app.logger.debug(ex)
62 | pass
63 |
--------------------------------------------------------------------------------
/src/notifications/wechat.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import requests
4 | from datetime import datetime
5 | from flask import current_app
6 | from ..service.configservice import localConfService
7 |
8 |
9 | class WeChat():
10 |
11 | corpid = None
12 | corpsecret = None
13 | agentid = None
14 |
15 | access_token = None
16 | expires_in = None
17 | get_token_time = None
18 |
19 | headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3100.0 Safari/537.36",
20 | "content-type": "charset=utf8"}
21 | timeout = 20
22 |
23 | def updateConfig(self):
24 | config = localConfService.getConfig()
25 | if config.wechat_corpid and config.wechat_corpsecret and config.wechat_agentid:
26 | self.corpid = config.wechat_corpid
27 | self.corpsecret = config.wechat_corpsecret
28 | self.agentid = config.wechat_agentid
29 | return True
30 | return False
31 |
32 | def sendtext(self, text: str):
33 | """ 使用 企业微信 发送文本消息
34 | """
35 | if self.updateConfig():
36 | url = "https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={}".format(self.updateAccessToken())
37 | for i in range(3):
38 | requestContent = {
39 | "touser": "@all",
40 | "msgtype": "text",
41 | "agentid": self.agentid,
42 | "text": {
43 | "content": text
44 | },
45 | "safe": 0,
46 | "enable_id_trans": 0,
47 | "enable_duplicate_check": 0
48 | }
49 | try:
50 | result = requests.post(url, json=requestContent, headers= self.headers, timeout= self.timeout)
51 | if result:
52 | ret = result.json()
53 | if ret['errcode'] == 0:
54 | current_app.logger.error("[!] 推送: 微信消息 {}".format(ret['errmsg']))
55 | break
56 | except:
57 | pass
58 |
59 | def sendmarkdown(self, text: str):
60 | """ 使用 企业微信 发送 markdown 消息
61 | """
62 | if self.updateConfig():
63 | url = "https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={}".format(self.updateAccessToken())
64 | for i in range(3):
65 | requestContent = {
66 | "touser": "@all",
67 | "msgtype": "markdown",
68 | "agentid": self.agentid,
69 | "markdown": {
70 | "content": text
71 | },
72 | "safe": 0,
73 | "enable_id_trans": 0,
74 | "enable_duplicate_check": 0
75 | }
76 | try:
77 | result = requests.post(url, json=requestContent, headers= self.headers, timeout= self.timeout)
78 | if result:
79 | ret = result.json()
80 | if ret['errcode'] == 0:
81 | current_app.logger.error("[!] 推送: 微信消息 {}".format(ret['errmsg']))
82 | break
83 | except:
84 | pass
85 |
86 | def sendnews(self, title, description, picurl, linkurl):
87 | """ 使用 企业微信 发送 markdown 消息
88 | """
89 | if self.updateConfig():
90 | url = "https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={}".format(self.updateAccessToken())
91 | for i in range(3):
92 | requestContent = {
93 | "touser": "@all",
94 | "msgtype": "news",
95 | "agentid": self.agentid,
96 | "news": {
97 | "articles" : [
98 | {
99 | "title" : title,
100 | "description" : description,
101 | "url" : linkurl,
102 | "picurl" : picurl
103 | }
104 | ]
105 | },
106 | "safe": 0,
107 | "enable_id_trans": 0,
108 | "enable_duplicate_check": 0
109 | }
110 | try:
111 | result = requests.post(url, json=requestContent, headers= self.headers, timeout= self.timeout)
112 | if result:
113 | ret = result.json()
114 | if ret['errcode'] == 0:
115 | current_app.logger.error("[!] 推送: 微信消息 {}".format(ret['errmsg']))
116 | break
117 | except:
118 | pass
119 |
120 | def updateAccessToken(self):
121 | """ 获取 access_token,具有时效性
122 | """
123 | have_valid_token = False
124 | if self.access_token:
125 | current_time = datetime.now()
126 | if (current_time - self.get_token_time).seconds < self.expires_in:
127 | have_valid_token = True
128 |
129 | if not have_valid_token:
130 | if not self.corpid or not self.corpsecret:
131 | return None
132 | try:
133 | tokenurl = "https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid={}&corpsecret={}".format(self.corpid, self.corpsecret)
134 | response = requests.get(tokenurl, headers= self.headers, timeout= self.timeout)
135 | if response:
136 | ret = response.json()
137 | if ret['errcode'] == 0:
138 | self.get_token_time = datetime.now()
139 | self.access_token = ret['access_token']
140 | self.expires_in = ret['expires_in']
141 | else:
142 | current_app.logger.error("[!] 推送: 微信消息 {}".format(ret['errmsg']))
143 | except Exception as e:
144 | current_app.logger.error("[!] 推送:获取微信access_token错误")
145 | current_app.logger.error(e)
146 | return None
147 | return self.access_token
148 |
--------------------------------------------------------------------------------
/src/service/configservice.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | ''' 刮削配置
3 | '''
4 | from ..model.config import _ScrapingConfigs, _TransferConfigs, _AutoConfigs, _LocalConfigs
5 | from .. import db
6 |
7 |
8 | class ScrapingConfService():
9 |
10 | def getConfiglist(self) -> list[_ScrapingConfigs]:
11 | configs = _ScrapingConfigs.query.all()
12 | if not configs:
13 | config = _ScrapingConfigs()
14 | db.session.add(config)
15 | db.session.commit()
16 | configs = []
17 | configs.append(config)
18 | return configs
19 | return configs
20 |
21 | def getConfig(self, sid):
22 | config = _ScrapingConfigs.query.filter_by(id=sid).first()
23 | if not config:
24 | configs = _ScrapingConfigs.query.all()
25 | if not configs:
26 | config = _ScrapingConfigs()
27 | db.session.add(config)
28 | db.session.commit()
29 | return config
30 |
31 | def updateConfig(self, content):
32 | cid = None
33 | if 'id' in content and content['id']:
34 | cid = content['id']
35 | config = _ScrapingConfigs.query.filter_by(id=cid).first()
36 | if not config:
37 | config = _ScrapingConfigs()
38 | db.session.add(config)
39 | for singlekey in content.keys():
40 | if hasattr(config, singlekey) and singlekey != 'id':
41 | value = getattr(config, singlekey)
42 | newvalue = content.get(singlekey)
43 | if value != newvalue:
44 | setattr(config, singlekey, newvalue)
45 | db.session.commit()
46 | return config
47 |
48 | def deleteConf(self, cid):
49 | config = _ScrapingConfigs.query.filter_by(id=cid).first()
50 | if config:
51 | db.session.delete(config)
52 | db.session.commit()
53 |
54 |
55 | class TransConfService():
56 | """ 转移模块服务
57 | """
58 |
59 | def getConfigById(self, cid) -> _TransferConfigs:
60 | config = _TransferConfigs.query.filter_by(id=cid).first()
61 | return config
62 |
63 | def getConfiglist(self) -> list[_TransferConfigs]:
64 | configs = _TransferConfigs.query.all()
65 | if not configs:
66 | config = _TransferConfigs()
67 | db.session.add(config)
68 | db.session.commit()
69 | configs = []
70 | configs.append(config)
71 | return configs
72 | return configs
73 |
74 | def updateConf(self, content):
75 | cid = None
76 | if 'id' in content and content['id']:
77 | cid = content['id']
78 | config = _TransferConfigs.query.filter_by(id=cid).first()
79 | if not config:
80 | config = _TransferConfigs()
81 | db.session.add(config)
82 | for singlekey in content.keys():
83 | if hasattr(config, singlekey) and singlekey != 'id':
84 | value = getattr(config, singlekey)
85 | newvalue = content.get(singlekey)
86 | if value != newvalue:
87 | setattr(config, singlekey, newvalue)
88 | db.session.commit()
89 | return config
90 |
91 | def deleteConf(self, cid):
92 | config = _TransferConfigs.query.filter_by(id=cid).first()
93 | if config:
94 | db.session.delete(config)
95 | db.session.commit()
96 |
97 |
98 | class ProxyConfig():
99 | """ Proxy Config
100 | """
101 | SUPPORT_PROXY_TYPE = ("http", "socks5", "socks5h")
102 |
103 | enable = False
104 | address = ""
105 | timeout = 5
106 | retry = 3
107 | proxytype = "socks5"
108 |
109 | def __init__(self, enable, address, timeout=5, retry=3, proxytype='socks5') -> None:
110 | """ Initial Proxy
111 | """
112 | self.enable = enable
113 | self.address = address
114 | self.timeout = timeout
115 | self.retry = retry
116 | self.proxytype = proxytype
117 |
118 | def proxies(self):
119 | ''' 获得代理参数
120 | '''
121 | if self.address:
122 | if self.proxytype in self.SUPPORT_PROXY_TYPE:
123 | proxies = {"http": self.proxytype + "://" + self.address,
124 | "https": self.proxytype + "://" + self.address}
125 | else:
126 | proxies = {"http": "http://" + self.address,
127 | "https": "https://" + self.address}
128 | else:
129 | proxies = {}
130 |
131 | return proxies
132 |
133 |
134 | class AutoConfService():
135 | """ 自动化配置
136 | """
137 | def getConfig(self):
138 | config = _AutoConfigs.query.filter_by(id=1).first()
139 | if not config:
140 | config = _AutoConfigs()
141 | db.session.add(config)
142 | db.session.commit()
143 | return config
144 |
145 | def updateConfig(self, content):
146 | changed = False
147 | config = self.getConfig()
148 | for singlekey in content.keys():
149 | if hasattr(config, singlekey):
150 | value = getattr(config, singlekey)
151 | newvalue = content.get(singlekey)
152 | if value != newvalue:
153 | setattr(config, singlekey, newvalue)
154 | changed = True
155 | if changed:
156 | db.session.commit()
157 | return True
158 |
159 |
160 | class LocalConfService():
161 | """ 通知推送配置
162 | """
163 | def getConfig(self):
164 | config = _LocalConfigs.query.filter_by(id=1).first()
165 | if not config:
166 | config = _LocalConfigs()
167 | db.session.add(config)
168 | db.session.commit()
169 | return config
170 |
171 | def updateLoglvl(self, lvl):
172 | config = self.getConfig()
173 | config.loglevel = lvl
174 | db.session.commit()
175 |
176 | def updateConfig(self, content):
177 | changed = False
178 | config = self.getConfig()
179 | for singlekey in content.keys():
180 | if hasattr(config, singlekey):
181 | value = getattr(config, singlekey)
182 | newvalue = content.get(singlekey)
183 | if value != newvalue:
184 | setattr(config, singlekey, newvalue)
185 | changed = True
186 | if changed:
187 | db.session.commit()
188 | return True
189 |
190 | def getProxyConfig(self):
191 | config = _LocalConfigs.query.filter_by(id=1).first()
192 | proxyConfig = ProxyConfig(config.proxy_enable, config.proxy_address, proxytype=config.proxy_type)
193 | return proxyConfig
194 |
195 |
196 | scrapingConfService = ScrapingConfService()
197 | transConfigService = TransConfService()
198 | autoConfigService = AutoConfService()
199 | localConfService = LocalConfService()
200 |
--------------------------------------------------------------------------------
/src/service/schedulerservice.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | 调度服务
4 | """
5 | from flask_apscheduler import APScheduler
6 |
7 |
8 | class SchedulerService():
9 | """
10 | """
11 |
12 | def init(self, app):
13 | self.scheduler = APScheduler()
14 | self.scheduler.init_app(app)
15 |
16 | def addJob(self, id:str, func, args, seconds:int):
17 | ''' 增加`interval`类型任务
18 | '''
19 | self.scheduler.add_job(id=id, func=func, args=args, trigger='interval', seconds=seconds, timezone="Asia/Shanghai")
20 |
21 | def start(self):
22 | self.scheduler.start()
23 |
24 | schedulerService = SchedulerService()
25 |
--------------------------------------------------------------------------------
/src/service/taskservice.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | ''' task
3 | '''
4 | from ..model.task import _Task, _AutoTask
5 | from .. import db
6 |
7 |
8 | class TaskService():
9 |
10 | def getTask(self, taskname) -> _Task:
11 | task = _Task.query.filter_by(name=taskname).first()
12 | if not task:
13 | task = _Task(taskname)
14 | db.session.add(task)
15 | db.session.commit()
16 | return task
17 |
18 | def haveRunningTask(self):
19 | ntask = _Task.query.filter_by(status=2).first()
20 | if ntask:
21 | return True
22 | return False
23 |
24 | def updateTaskStatus(self, task: _Task, status: int):
25 | """ status
26 | 0: wait
27 | 1: finished
28 | 2: runing
29 | """
30 | if task.status != status:
31 | task.status = status
32 | if status == 1:
33 | task.total = 0
34 | task.finished = 0
35 | db.session.commit()
36 |
37 | def updateTaskNum(self, task: _Task, total, finished = 0):
38 | """ Update total num
39 | """
40 | if task.total != total:
41 | task.total = total
42 | task.finished = finished
43 | db.session.commit()
44 |
45 | def updateTaskFinished(self, task: _Task, num):
46 | """ Update finished num
47 | """
48 | if task.finished != num:
49 | task.finished = num
50 | db.session.commit()
51 |
52 | def commit(self):
53 | db.session.commit()
54 |
55 |
56 | class AutoTaskService():
57 |
58 | def init(self, path):
59 | task = _AutoTask(path)
60 | db.session.add(task)
61 | db.session.commit()
62 | return task
63 |
64 | def reset(self):
65 | """ 重置
66 | """
67 | tasks = _AutoTask.query.filter_by(status=1).all()
68 | if tasks:
69 | for t in tasks:
70 | t.status = 0
71 | self.commit()
72 |
73 | def getTasks(self):
74 | return _AutoTask.query.all()
75 |
76 | def getFirst(self):
77 | return _AutoTask.query.first()
78 |
79 | def getRunning(self):
80 | return _AutoTask.query.filter_by(status=1).first()
81 |
82 | def getPath(self, path):
83 | return _AutoTask.query.filter_by(path=path).first()
84 |
85 | def commit(self):
86 | db.session.commit()
87 |
88 | def deleteTask(self, cid):
89 | task = _AutoTask.query.filter_by(id=cid).first()
90 | if task:
91 | db.session.delete(task)
92 | db.session.commit()
93 |
94 |
95 | taskService = TaskService()
96 | autoTaskService = AutoTaskService()
97 |
--------------------------------------------------------------------------------
/src/utils/filehelper.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import os
3 | import pathlib
4 | import re
5 | import errno
6 | import shutil
7 | import stat
8 | import logging
9 | from flask import current_app
10 |
11 | video_type = ['.mp4', '.avi', '.rmvb', '.wmv', '.strm',
12 | '.mov', '.mkv', '.flv', '.ts', '.m2ts','.webm', '.iso']
13 | ext_type = ['.ass', '.srt', '.sub', '.ssa', '.smi', '.idx', '.sup',
14 | '.psb', '.usf', '.xss', '.ssf', '.rt', '.lrc', '.sbv', '.vtt', '.ttml']
15 |
16 | video_filter = ['*.mp4', '*.avi', '*.rmvb', '*.wmv', '*.strm',
17 | '*.mov', '*.mkv', '*.flv', '*.ts', '*.m2ts', '*.webm', '*.iso']
18 | ext_filter = ['*.ass', '*.srt', '*.sub', '*.ssa', '*.smi', '*.idx', '*.sup',
19 | '*.psb', '*.usf', '*.xss', '*.ssf', '*.rt', '*.lrc', '*.sbv', '*.vtt', '*.ttml']
20 |
21 |
22 | def creatFolder(foldername):
23 | """ 创建文件
24 | """
25 | if not os.path.exists(foldername + '/'):
26 | try:
27 | os.makedirs(foldername + '/')
28 | except Exception as e:
29 | logger().info("[-]failed!can not be make Failed output folder\n[-](Please run as Administrator)")
30 | logger().error(e)
31 | return
32 |
33 |
34 | def checkFolderhasMedia(folder):
35 | """ 检测文件夹内是否有视频文件
36 | """
37 | if not os.path.isdir(folder):
38 | if os.path.exists(folder):
39 | return True
40 | return False
41 | for root, dirs, files in os.walk(folder, topdown=False):
42 | for file in files:
43 | if file.lower().endswith(tuple(video_type)):
44 | return True
45 | return False
46 |
47 |
48 | def cleanFolder(foldername):
49 | """ 删除并重新创建文件夹
50 | """
51 | try:
52 | shutil.rmtree(foldername)
53 | except:
54 | pass
55 | creatFolder(foldername)
56 |
57 |
58 | def cleanbySuffix(folder, suffix):
59 | """ 删除匹配后缀的文件
60 | """
61 | dirs = os.listdir(folder)
62 | for file in dirs:
63 | f = os.path.join(folder, file)
64 | if os.path.isdir(f):
65 | cleanbySuffix(f, suffix)
66 | elif os.path.splitext(f)[1].lower() in suffix:
67 | logger().info("clean file by suffix [{}]".format(f))
68 | os.remove(f)
69 |
70 |
71 | def cleanbyNameSuffix(folder, basename, suffix):
72 | """ 根据名称和后缀删除文件
73 | """
74 | dirs = os.listdir(folder)
75 | for file in dirs:
76 | f = os.path.join(folder, file)
77 | fname, fsuffix = os.path.splitext(file)
78 | if os.path.isdir(f):
79 | cleanbyNameSuffix(f, basename, suffix)
80 | elif fsuffix.lower() in suffix and fname.startswith(basename):
81 | logger().debug("clean by name & suffix [{}]".format(f))
82 | os.remove(f)
83 |
84 |
85 | def cleanExtraMedia(folder):
86 | """ 删除多余的媒体文件(没有匹配的视频文件)
87 | """
88 | dirs = os.listdir(folder)
89 | vlists = []
90 | for vf in dirs:
91 | if os.path.splitext(vf)[1].lower() in video_type:
92 | fname, fsuffix = os.path.splitext(vf)
93 | vlists.append(fname)
94 | for file in dirs:
95 | f = os.path.join(folder, file)
96 | if os.path.isdir(f) and file != "extrafanart":
97 | cleanExtraMedia(f)
98 | else:
99 | cleanflag = True
100 | if file.lower().startswith(('fanart', 'poster', 'tvshow', 'season', 'landscape')):
101 | cleanflag = False
102 | else:
103 | for s in vlists:
104 | if file.startswith(s):
105 | cleanflag = False
106 | break
107 | if cleanflag:
108 | logger().debug("clean extra media file [{}]".format(f))
109 | os.remove(f)
110 |
111 |
112 | def cleanFolderWithoutSuffix(folder, suffix):
113 | """ 删除无匹配后缀文件的目录
114 | """
115 | hassuffix = False
116 | dirs = os.listdir(folder)
117 | for file in dirs:
118 | f = os.path.join(folder, file)
119 | if os.path.isdir(f):
120 | hastag = cleanFolderWithoutSuffix(f, suffix)
121 | if hastag:
122 | hassuffix = True
123 | elif os.path.splitext(f)[1].lower() in suffix:
124 | hassuffix = True
125 | if not hassuffix:
126 | logger().info("clean empty media folder [{}]".format(folder))
127 | shutil.rmtree(folder)
128 | return hassuffix
129 |
130 |
131 | def cleanFolderbyFilter(folder, filter):
132 | """ 根据过滤名删除文件
133 |
134 | 如果目录下所有文件都被删除,将删除文件夹
135 | """
136 | cleanAll = True
137 | dirs = os.listdir(folder)
138 | for file in dirs:
139 | f = os.path.join(folder, file)
140 | if os.path.isdir(f):
141 | cleanAll = False
142 | else:
143 | if filter in file:
144 | logger().info("clean folder by filter [{}]".format(f))
145 | os.remove(f)
146 | else:
147 | cleanAll = False
148 | if cleanAll:
149 | shutil.rmtree(folder)
150 |
151 |
152 | def cleanFilebyFilter(folder, filter):
153 | """ 根据过滤名删除文件
154 |
155 | 只当前目录,不递归删除
156 | 未含分集标识的filter不能删除带有分集标识的文件
157 | """
158 | dirs = os.listdir(folder)
159 | for file in dirs:
160 | f = os.path.join(folder, file)
161 | if not os.path.isdir(f):
162 | if file.startswith(filter):
163 | # 未分集到分集 重复删除分集内容
164 | if '-CD' in file.upper():
165 | if '-CD' in filter.upper():
166 | logger().info("clean file [{}]".format(f))
167 | os.remove(f)
168 | else:
169 | logger().info("clean file [{}]".format(f))
170 | os.remove(f)
171 |
172 |
173 | def moveSubs(srcfolder, destfolder, basename, newname, saved=True):
174 | """ 移动字幕
175 | :param saved True: 复制字幕 False: 移动字幕
176 | """
177 | dirs = os.listdir(srcfolder)
178 | for item in dirs:
179 | (path, ext) = os.path.splitext(item)
180 | if ext.lower() in ext_type and path.startswith(basename):
181 | src_file = os.path.join(srcfolder, item)
182 | newpath = path.replace(basename, newname)
183 | logger().debug("[-] - copy sub " + src_file)
184 | newfile = os.path.join(destfolder, newpath + ext)
185 | if saved:
186 | shutil.copyfile(src_file, newfile)
187 | else:
188 | shutil.move(src_file, newfile)
189 | # modify permission
190 | os.chmod(newfile, stat.S_IRWXU | stat.S_IRGRP |
191 | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH)
192 |
193 |
194 | def moveSubsbyFilepath(srcpath, destpath, saved=True):
195 | """ 根据文件名匹配字幕,并移动到指定目录
196 | :param saved True: 复制字幕 False: 移动字幕
197 | """
198 | srcfolder, srcname = os.path.split(srcpath)
199 | srcbasename, srcext = os.path.splitext(srcname)
200 | destfolder, destname = os.path.split(destpath)
201 | destbasename, destext = os.path.splitext(destname)
202 | moveSubs(srcfolder, destfolder, srcbasename, destbasename, saved)
203 |
204 |
205 | def forceSymlink(srcpath, dstpath, is_relative=False):
206 | """ create symlink
207 | https://stackoverflow.com/questions/8299386/modifying-a-symlink-in-python
208 | """
209 | try:
210 | if is_relative:
211 | srcpath = os.path.relpath(srcpath, os.path.dirname(dstpath))
212 | os.symlink(srcpath, dstpath)
213 | except OSError as e:
214 | if e.errno == errno.EEXIST:
215 | os.remove(dstpath)
216 | os.symlink(srcpath, dstpath)
217 | else:
218 | raise e
219 |
220 |
221 | def forceHardlink(srcpath, dstpath):
222 | """ create hard link
223 | """
224 | try:
225 | os.link(srcpath, dstpath)
226 | except OSError as e:
227 | if e.errno == errno.EEXIST:
228 | os.remove(dstpath)
229 | os.link(srcpath, dstpath)
230 | else:
231 | raise e
232 |
233 | def checkFileExists(filepath):
234 | """ 检测文件是否存在
235 | 软/硬链接
236 | """
237 | if os.path.exists(filepath):
238 | return True
239 | elif pathlib.Path(filepath).is_symlink():
240 | return True
241 | else:
242 | return False
243 |
244 | def linkFile(srcpath, dstpath, linktype=1, is_relative=False):
245 | """ 链接文件
246 |
247 | params: linktype: `1` 软链接 `2` 硬链接
248 |
249 | https://stackoverflow.com/questions/41941401/how-to-find-out-if-a-folder-is-a-hard-link-and-get-its-real-path
250 | """
251 | if os.path.exists(dstpath) and os.path.samefile(srcpath, dstpath) and linktype == 2:
252 | logger().debug("[!] same file already exists")
253 | elif pathlib.Path(dstpath).is_symlink() and os.readlink(dstpath) == srcpath and linktype == 1:
254 | logger().debug("[!] link file already exists")
255 | else:
256 | dstfolder = os.path.dirname(dstpath)
257 | if not os.path.exists(dstfolder):
258 | os.makedirs(dstfolder)
259 | logger().debug("[-] create link from [{}] to [{}]".format(srcpath, dstpath))
260 | if linktype == 1:
261 | forceSymlink(srcpath, dstpath, is_relative)
262 | else:
263 | forceHardlink(srcpath, dstpath)
264 |
265 |
266 | def replaceCJK(base: str):
267 | """ 尝试替换 CJK 字符
268 | https://stackoverflow.com/questions/1366068/whats-the-complete-range-for-chinese-characters-in-unicode
269 |
270 | https://www.unicode.org/charts/charindex.html
271 |
272 | eg: 你好 [4k修复] (实例1)
273 | """
274 | tmp = base
275 | for n in re.findall('[\(\[\(](.*?)[\)\]\)]', base):
276 | if re.findall('[\u3000-\u33FF\u4e00-\u9fff]+', n):
277 | try:
278 | cop = re.compile("[\(\[\(]" + n + "[\)\]\)]")
279 | tmp = cop.sub('', tmp)
280 | except:
281 | pass
282 | tmp = re.sub('[\u3000-\u33FF\u4e00-\u9fff]+', '', tmp)
283 | tmp = cleanParentheses(tmp)
284 | tmp = re.sub(r'(\W)\1+', r'\1', tmp).lstrip(' !?@#$.::]))').rstrip(' !?@#$.::[((')
285 | return tmp
286 |
287 |
288 | def cleanParentheses(input: str):
289 | tag = True
290 | while tag:
291 | if "()" in input or "[]" in input:
292 | input = input.replace("()", "").replace("[]", "")
293 | else:
294 | tag = False
295 | return input
296 |
297 |
298 | def replaceRegex(base: str, regex: str):
299 | cop = re.compile(regex, re.IGNORECASE | re.X | re.S)
300 | base = cop.sub('', base)
301 | base = re.sub(r'(\W)\1+', r'\1', base).lstrip(' !?@#$.::]))').rstrip(' !?@#$.::[((')
302 | return base
303 |
304 |
305 | def logger() -> logging.Logger:
306 | """
307 | prevent app_context error
308 | """
309 | if current_app:
310 | return current_app.logger
311 | return logging.getLogger('src')
312 |
--------------------------------------------------------------------------------
/src/utils/number_parser.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | import logging
4 |
5 | G_spat = re.compile(
6 | "^\w+\.(cc|com|net|me|club|jp|tv|xyz|biz|wiki|info|tw|us|de)@|^22-sht\.me|"
7 | "^(fhd|hd|sd|1080p|720p|4K)(-|_)|"
8 | "(-|_)(fhd|hd|sd|1080p|720p|4K|x264|x265|uncensored|hack|leak)",
9 | re.IGNORECASE)
10 |
11 |
12 | class FileNumInfo():
13 | """ 解析文件番号信息
14 | """
15 |
16 | def __init__(self, filepath: str):
17 | self.num = get_number(filepath)
18 |
19 | self.chs_tag = False
20 | self.uncensored_tag = False
21 | self.leak_tag = False
22 | self.hack_tag = False
23 | self.multipart_tag = False
24 | self.special = False
25 | self.part = ''
26 |
27 | if self.num and is_uncensored(self.num):
28 | self.uncensored_tag = True
29 | filepath = filepath.lower()
30 | if '流出' in filepath or '-leak' in filepath or '_leak' in filepath \
31 | or '-uncensored' in filepath or '_uncensored' in filepath:
32 | self.leak_tag = True
33 | if '破解' in filepath or '-hack' in filepath or '_hack' in filepath or '-u' in filepath or '_u' in filepath:
34 | self.hack_tag = True
35 |
36 | cnlist = ['中文', '字幕', '-c.', '_c.', '_c_', '-c-', '-uc', '_uc']
37 | for single in cnlist:
38 | if single in filepath:
39 | self.chs_tag = True
40 | break
41 | if re.search(r'[-_]C(\.\w+$|-\w+)|\d+ch(\.\w+$|-\w+)', filepath, re.I):
42 | self.chs_tag = True
43 |
44 | basename = os.path.basename(filepath)
45 | self.originalname = os.path.splitext(basename)[0]
46 | self.part = self.checkPart(basename)
47 | if self.part:
48 | self.multipart_tag = True
49 | self.special = self.checkSp(basename)
50 |
51 | def fixedName(self):
52 | name = self.num
53 | if self.special:
54 | return self.originalname
55 | if self.uncensored_tag:
56 | name += '-uncensored'
57 | if self.leak_tag:
58 | name += '-leak'
59 | if self.hack_tag:
60 | name += '-hack'
61 | if self.chs_tag:
62 | name += '-C'
63 | if self.multipart_tag:
64 | name += self.part
65 | return name
66 |
67 | def updateCD(self, cdnum):
68 | self.multipart_tag = True
69 | self.part = '-CD' + str(cdnum)
70 |
71 | def isPartOneOrSingle(self):
72 | if not self.multipart_tag or self.part == '-CD1' or self.part == '-CD01':
73 | return True
74 | return False
75 |
76 | @staticmethod
77 | def checkPart(filename):
78 | try:
79 | if '_cd' in filename or '-cd' in filename:
80 | prog = re.compile("(?:-|_)cd\d{1,2}", re.IGNORECASE | re.X | re.S)
81 | result = prog.findall(filename)
82 | if result:
83 | part = str(result[0]).upper().replace('_', '-')
84 | return part
85 | prog = re.compile("(?:-|_)\d{1,2}$", re.IGNORECASE | re.X | re.S)
86 | bname = os.path.splitext(filename)[0]
87 | result = prog.findall(bname)
88 | if result:
89 | part = str(result[0]).upper().replace('_', '-')
90 | if 'CD' not in part:
91 | part = part.replace('-', '-CD')
92 | return part
93 | except:
94 | return
95 |
96 | @staticmethod
97 | def checkSp(filename):
98 | try:
99 | prog = re.compile("(?:-|_)sp(?:_|-|$)", re.IGNORECASE | re.X | re.S)
100 | bname = os.path.splitext(filename)[0]
101 | result = prog.findall(bname)
102 | if result and len(result) == 1:
103 | return True
104 | except:
105 | return False
106 |
107 |
108 | def get_number(file_path: str) -> str:
109 | """ 获取番号
110 | """
111 | try:
112 | basename = os.path.basename(file_path)
113 | file_subpath = os.path.dirname(file_path)
114 | file_subpath = os.path.basename(file_subpath)
115 | (filename, ext) = os.path.splitext(basename)
116 | file_number = rules_parser(filename)
117 | if file_number is None:
118 | # 文件名不包含,查看文件夹
119 | file_number = rules_parser(file_subpath)
120 | if file_number:
121 | return file_number
122 |
123 | logging.getLogger().debug(f"[!] 特殊番号: {file_path}")
124 | if '字幕组' in filename or 'SUB' in filename.upper() or re.match(r'[\u30a0-\u30ff]+', filename):
125 | filename = G_spat.sub("", filename)
126 | filename = re.sub("\[.*?\]","",filename)
127 | filename = filename.replace(".chs", "").replace(".cht", "")
128 | file_number = str(re.findall(r'(.+?)\.', filename)).strip(" [']")
129 | return file_number
130 | elif '-' in filename or '_' in filename: # 普通提取番号 主要处理包含减号-和_的番号
131 | filename = G_spat.sub("", filename)
132 | filename = str(re.sub("\[\d{4}-\d{1,2}-\d{1,2}\] - ", "", filename)) # 去除文件名中时间
133 | filename = re.sub("[-_]cd\d{1,2}", "", filename, flags=re.IGNORECASE)
134 | if not re.search("-|_", filename): # 去掉-CD1之后再无-的情况,例如n1012-CD1.wmv
135 | return str(re.search(r'\w+', filename[:filename.find('.')], re.A).group())
136 | file_number = os.path.splitext(filename)
137 | filename = re.search(r'[\w\-_]+', filename, re.A)
138 | if filename:
139 | file_number = str(filename.group())
140 | else:
141 | file_number = file_number[0]
142 | file_number = re.sub("(-|_)c$", "", file_number, flags=re.IGNORECASE)
143 | if re.search("\d+ch$", file_number, flags=re.I):
144 | file_number = file_number[:-2]
145 | return file_number.upper()
146 | else: # 提取不含减号-的番号,FANZA CID
147 | # 欧美番号匹配规则
148 | oumei = re.search(r'[a-zA-Z]+\.\d{2}\.\d{2}\.\d{2}', basename)
149 | if oumei:
150 | return oumei.group()
151 | try:
152 | return str(
153 | re.findall(r'(.+?)\.',
154 | str(re.search('([^<>/\\\\|:""\\*\\?]+)\\.\\w+$', basename).group()))).strip(
155 | "['']").replace('_', '-')
156 | except:
157 | return str(re.search(r'(.+?)\.', basename)[0])
158 | except Exception as e:
159 | logging.getLogger().error(e)
160 | return
161 |
162 |
163 | # 定义多个匹配规则
164 | rules = [
165 | lambda x: re.search(r'\d{6}(-|_)\d{2,3}', x, re.I).group(),
166 | lambda x: re.search(r'x-art\.\d{2}\.\d{2}\.\d{2}', x, re.I).group(),
167 | lambda x: ''.join(['xxx-av-', re.findall(r'xxx-av[^\d]*(\d{3,5})[^\d]*', x, re.I)[0]]),
168 | lambda x: 'heydouga-' + '-'.join(re.findall(r'(\d{4})[\-_](\d{3,4})[^\d]*', x, re.I)[0]),
169 | lambda x: 'HEYZO-' + re.findall(r'heyzo[^\d]*(\d{4})', x, re.I)[0],
170 | lambda x: re.search(r'mdbk(-|_)(\d{4})', x, re.I).group(),
171 | lambda x: re.search(r'mdtm(-|_)(\d{4})', x, re.I).group(),
172 | lambda x: re.search(r's2mbd(-|_)(\d{3})', x, re.I).group(),
173 | lambda x: re.search(r's2m(-|_)(\d{3})', x, re.I).group(),
174 | lambda x: re.search(r'fc2(-|_)(\d{5,7})', x, re.I).group(),
175 | lambda x: re.search(r'([A-Za-z]{2,6}\-?\d{3,4})', x, re.I).group(),
176 | ]
177 |
178 |
179 | def rules_parser(filename: str):
180 | """ lower filename
181 | """
182 | filename = filename.upper()
183 | for rule in rules:
184 | try:
185 | if 'FC2' in filename:
186 | filename = filename.replace('PPV', '').replace('--', '-').replace('_', '-').replace(' ', '')
187 | file_number = rule(filename)
188 | if file_number:
189 | return file_number
190 | except:
191 | pass
192 | return
193 |
194 |
195 | class Cache_uncensored_conf:
196 | prefix = None
197 |
198 | def is_empty(self):
199 | return bool(self.prefix is None)
200 |
201 | def set(self, v: list):
202 | if not v or not len(v) or not len(v[0]):
203 | raise ValueError('input prefix list empty or None')
204 | s = v[0]
205 | if len(v) > 1:
206 | for i in v[1:]:
207 | s += f"|{i}.+"
208 | self.prefix = re.compile(s, re.I)
209 |
210 | def check(self, number):
211 | if self.prefix is None:
212 | raise ValueError('No init re compile')
213 | return self.prefix.match(number)
214 |
215 |
216 | G_cache_uncensored_conf = Cache_uncensored_conf()
217 |
218 |
219 | def is_uncensored(number):
220 | if re.match(
221 | r'[\d-]{4,}|\d{6}_\d{2,3}|(cz|gedo|k|n|red-|se)\d{2,4}|heyzo.+|xxx-av-.+|heydouga-.+|x-art\.\d{2}\.\d{2}\.\d{2}',
222 | number,
223 | re.I
224 | ):
225 | return True
226 | uncensored_prefix = "S2M,BT,LAF,SMD,SMBD,SM3D2DBD,SKY-,SKYHD,CWP,CWDV,CWBD,CW3D2DBD,MKD,MKBD,MXBD,MK3D2DBD,MCB3DBD,MCBD,RHJ,MMDV"
227 | if G_cache_uncensored_conf.is_empty():
228 | G_cache_uncensored_conf.set(uncensored_prefix.split(','))
229 | return G_cache_uncensored_conf.check(number)
230 |
231 |
232 | if __name__ == "__main__":
233 | # 测试
234 | test_path = [
235 | "/media/sdmua-001-c.mkv",
236 | "/media/kmhrs-023-C.mkv",
237 | "/media/sekao-023-C.mkv",
238 | "/media/sekao-023-leak.mkv",
239 | "/media/FC2-PPV-1234567.mkv",
240 | "/media/FC2PPV-1234567.mkv",
241 | "/meida/fc2-ppv-1234567-xxx.com.mp4",
242 | "/media/FC2-PPV-1111223/1111223.mp4",
243 | "/media/FC2-1123456-1.mp4",
244 | "/media/FC2PPV-1123457/FC2PPV-1123457-2.mp4",
245 | "/media/111234_123 女人/trailers/trailer.mp4",
246 | "/media/Miku Ohashi/調子に乗ったS嬢X苛められたM嬢 大橋未久(011015_780).mp4",
247 | "/meida/S2M-001-FHD/S2MBD-001.mp4",
248 | "/media/FC2-PPV-1112345/④えりか旅行本編.mp4",
249 | "/media/SIRO-1234-C.mkv",
250 | "/media/MXGS-1234-C.mkv",
251 | "/media/dv-1234-C.mkv",
252 | "/media/pred-1234-C.mkv",
253 | ]
254 | def convert_emoji(bool_tag):
255 | if bool_tag:
256 | return "✅"
257 | return "-"
258 |
259 | for t in test_path:
260 | fin = FileNumInfo(t)
261 | print(f"===============================")
262 | print(f"解析 {t} :")
263 | print(f" 番号: {fin.num}")
264 | print(f" 中文: {convert_emoji(fin.chs_tag)} 无码: {convert_emoji(fin.uncensored_tag)} 流出: {convert_emoji(fin.leak_tag)} 破解: {convert_emoji(fin.hack_tag)}")
265 | print(f" 多集: {convert_emoji(fin.multipart_tag)} 特典: {convert_emoji(fin.special)}")
266 |
--------------------------------------------------------------------------------
/src/utils/regex.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import re
4 |
5 |
6 | def regexMatch(basename, reg):
7 | """ 正则匹配
8 | """
9 | prog = re.compile(reg, re.IGNORECASE | re.X | re.S)
10 | result = prog.findall(basename)
11 | return result
12 |
13 |
14 | def extractEpNum(single: str):
15 | """ 提取剧集编号
16 | 1. 头尾匹配 空格 [] 第话
17 | 2. 剔除头尾修饰字符
18 | 3. 校验含有数字
19 | 4. 如果不包含E,仍需校验是否是年份,个位数
20 | """
21 | left = single[0]
22 | right = single[-1:]
23 | if left == right or (left == '[' and right == ']') or (left == '第' and right in '話话集'):
24 |
25 | result = single.lstrip('第.EPep\[ ')
26 | result = result.rstrip('話话集]. ')
27 |
28 | if bool(re.search(r'\d', result)):
29 | if not bool(re.search(r'[Ee]', single)):
30 | if len(result) == 1:
31 | return None
32 | match = re.match(r'.*([1-3][0-9]{3})', result)
33 | if match:
34 | return None
35 | return result
36 | else:
37 | return result
38 | return None
39 |
40 |
41 | def matchSeason(filename: str):
42 | """
43 | >>> matchSeason("Fights.Break.Sphere.2018.S02.WEB-DL.1080p.H264.AAC-TJUPT")
44 | 2
45 | >>> matchSeason("疑犯追踪S01-S05.Person.of.Interest.2011-2016.1080p.Blu-ray.x265.AC3£cXcY@FRDS") is None
46 | True
47 | >>> matchSeason("Yes.Prime.Minister.COMPLETE.PACK.DVD.x264-P2P") is None
48 | True
49 | """
50 | regx = "(?:s|season)(\d{2})"
51 | nameresult = regexMatch(filename, regx)
52 | if nameresult and len(nameresult) == 1:
53 | strnum = nameresult[0]
54 | return int(strnum)
55 | return None
56 |
57 |
58 | def matchEpPart(basename):
59 | """ 正则匹配单集编号
60 |
61 | >>> matchEpPart("生徒会役員共* 09 (BDrip 1920x1080 HEVC-YUV420P10 FLAC)")
62 | ' 09 '
63 | >>> matchEpPart("[Rip] SLAM DUNK 第013話「湘北VS陵南 燃える主将!」(BDrip 1440x1080 H264 FLAC)")
64 | '第013話'
65 | >>> matchEpPart("[Rip] SLAM DUNK [013]「湘北VS陵南 燃える主将!」(BDrip 1440x1080 H264 FLAC)")
66 | '[013]'
67 | >>> matchEpPart("[Rip] SLAM DUNK [13.5]「湘北VS陵南 燃える主将!」(BDrip 1440x1080 H264 FLAC)")
68 | '[13.5]'
69 | >>> matchEpPart("[Rip] SLAM DUNK [13v2]「湘北VS陵南 燃える主将!」(BDrip 1440x1080 H264 FLAC)")
70 | '[13v2]'
71 | >>> matchEpPart("[Rip] SLAM DUNK [13(OA)]「湘北VS陵南 燃える主将!」(BDrip 1440x1080 H264 FLAC)")
72 | '[13(OA)]'
73 | >>> matchEpPart("[Neon Genesis Evangelion][23(Video)][BDRIP][1440x1080][H264_FLACx2]")
74 | '[23(Video)]'
75 | >>> matchEpPart("[Studio] Fullmetal Alchemist꞉ Brotherhood [01][Ma10p_1080p][x265_flac]")
76 | '[01]'
77 | >>> matchEpPart("[raws][Code Geass Lelouch of the Rebellion R2][15][BDRIP][Hi10P FLAC][1920X1080]")
78 | '[15]'
79 | >>> matchEpPart("[raws][High School Of The Dead][01][BDRIP][HEVC Main10P FLAC][1920X1080]")
80 | '[01]'
81 | >>> matchEpPart("[Studio] Steins;Gate 0 [01][Ma10p_1080p][x265_flac]")
82 | '[01]'
83 | >>> matchEpPart("Steins;Gate 2011 EP01 [BluRay 1920x1080p 23.976fps x264-Hi10P FLAC]")
84 | ' EP01 '
85 | >>> matchEpPart("Fate Stay Night [Unlimited Blade Works] 2014 - EP01 [BD 1920x1080 AVC-yuv444p10 FLAC PGSx2 Chap]")
86 | ' EP01 '
87 | >>> matchEpPart("Fate Zero EP01 [BluRay 1920x1080p 23.976fps x264-Hi10P FLAC PGSx2]")
88 | ' EP01 '
89 | >>> matchEpPart("[AI-Raws&ANK-Raws] Initial D First Stage 01 (BDRip 960x720 x264 DTS-HD Hi10P)[044D7040]")
90 | ' 01 '
91 | >>> matchEpPart("[AI-Raws&ANK-Raws] Initial D First Stage [05] (BDRip 960x720 x264 DTS-HD Hi10P)[044D7040]")
92 | '[05]'
93 |
94 | >>> matchEpPart("Shadow.2021.E11.WEB-DL.4k.H265.60fps.AAC.2Audio")
95 | '.E11.'
96 | >>> matchEpPart("Shadow 2021 E11 WEB-DL 4k H265 AAC 2Audio")
97 | ' E11 '
98 | >>> matchEpPart("Shadow.2021.第11集.WEB-DL.4k.H265.60fps.AAC.2Audio")
99 | '第11集'
100 | >>> matchEpPart("Shadow.2021.E13v2.WEB-DL.4k.H265.60fps.AAC.2Audio")
101 | '.E13v2.'
102 | >>> matchEpPart("Shadow.2021.E14(OA).WEB-DL.4k.H265.60fps.AAC.2Audio")
103 | '.E14(OA).'
104 | >>> matchEpPart("S03/Person.of.Interest.EP01.2013.1080p.Blu-ray.x265.10bit.AC3")
105 | '.EP01.'
106 | >>> matchEpPart("Slam.Dunk.22.Ma10p.1080p.x265.flac")
107 | '.22.'
108 |
109 | >>> matchEpPart("Person.of.Interest.S03E01.2013.1080p.Blu-ray.x265.10bit.AC3") is None
110 | True
111 | """
112 | regexs = [
113 | "第\d*[話话集]",
114 | "[ ]ep?[0-9.\(videoa\)]*[ ]",
115 | "\.ep?[0-9\(videoa\)]*\.",
116 | "\.\d{2,3}(?:v\d)?[\(videoa\)]*\.",
117 | "[ ]\d{2,3}(?:\.\d|v\d)?[\(videoa\)]*[ ]",
118 | "\[(?:e|ep)?[0-9.v]*(?:\(oa\)|\(video\))?\]",
119 | ]
120 | for regex in regexs:
121 | results = regexMatch(basename, regex)
122 | if results and len(results) == 1:
123 | return results[0]
124 | return None
125 |
126 |
127 | def matchSeries(basename):
128 | regstr = "s(\d{1,2})ep?(\d{1,4})"
129 | results = regexMatch(basename, regstr)
130 | if results and len(results) > 0:
131 | season = int(results[0][0])
132 | ep = int(results[0][1])
133 | return season, ep
134 | return None, None
135 |
136 |
137 | def simpleMatchEp(basename: str):
138 | """ 针对已经强制season但未能正常解析出ep的名字
139 |
140 | >>> simpleMatchEp("01 呵呵呵")
141 | 1
142 | >>> simpleMatchEp("02_哈哈哈")
143 | 2
144 | >>> simpleMatchEp("03.嘿嘿嘿")
145 | 3
146 | >>> simpleMatchEp("04. 嘿嘿嘿")
147 | 4
148 | >>> simpleMatchEp("05 - 嘿嘿嘿")
149 | 5
150 | >>> simpleMatchEp("06")
151 | 6
152 | """
153 | if basename.isdigit():
154 | return int(basename)
155 | regstr = "^(\d{1,3}) ?(_|-|.)? ?([^\W\d]+)"
156 | results = re.findall(regstr, basename)
157 | if results and len(results) == 1:
158 | epnunm = int(results[0][0])
159 | return epnunm
160 | return None
161 |
162 |
163 | if __name__ == "__main__":
164 | import doctest
165 | doctest.testmod(verbose=True)
166 |
--------------------------------------------------------------------------------
/src/wscontroller/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | from flask_sock import Sock
3 |
4 |
5 | wsocket = Sock()
6 |
7 |
8 | def init(app):
9 | from . import wsloger
10 |
11 | wsocket.init_app(app)
12 |
--------------------------------------------------------------------------------
/src/wscontroller/wsloger.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import time
4 | import os.path
5 | from collections import deque
6 | from . import wsocket
7 |
8 | NUM_LINES = 1000
9 | HEARTBEAT_INTERVAL = 15
10 |
11 |
12 | @wsocket.route('/ws/logstream')
13 | def logstream(websocket):
14 |
15 | try:
16 | localPath = os.path.dirname(os.path.abspath(__file__))
17 | log_path = os.path.join(localPath, "..", "..", "data", "web.log")
18 |
19 | if not os.path.isfile(log_path):
20 | raise ValueError('Not found log')
21 |
22 | with open(log_path, encoding='utf8') as f:
23 | content = ''.join(deque(f, NUM_LINES))
24 | websocket.send(content)
25 | while True:
26 | content = f.read()
27 | if content:
28 | websocket.send(content)
29 | else:
30 | time.sleep(10)
31 | except Exception as e:
32 | websocket.close()
33 |
--------------------------------------------------------------------------------
/web/static/README.md:
--------------------------------------------------------------------------------
1 |
2 | ### static for flask & VUE
3 |
4 | [static resource](https://github.com/Suwmlee/Spike/tree/release)
5 |
6 | 将除`index.html`外的文件放到此目录
7 |
--------------------------------------------------------------------------------
/web/templates/README.md:
--------------------------------------------------------------------------------
1 |
2 | ### templates for flask & VUE
3 |
4 | [index resource](https://github.com/Suwmlee/Spike/tree/release)
5 |
6 | 将`index.html`文件放到此目录
7 |
--------------------------------------------------------------------------------