├── .dockerignore ├── .env ├── .github └── workflows │ ├── docker.yml │ └── rust.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── README.md ├── README.zh-CN.md ├── assets ├── good.jpg ├── jiascheduler-arch.png ├── job-edit.png ├── qrcode-qq-group.jpg ├── run-list.png ├── scheduler-dashboard.png ├── scheduler-history.png ├── server.png └── webssh.png ├── automate ├── Cargo.toml └── src │ ├── bridge.rs │ ├── bridge │ ├── client.rs │ ├── msg.rs │ ├── protocol.rs │ └── server.rs │ ├── bus.rs │ ├── comet.rs │ ├── comet │ ├── handler.rs │ ├── logic.rs │ ├── macros.rs │ └── types.rs │ ├── lib.rs │ ├── scheduler.rs │ ├── scheduler │ ├── cmd.rs │ ├── executor.rs │ ├── file.rs │ ├── scheduler.rs │ └── types.rs │ └── ssh.rs ├── crates ├── entity │ ├── Cargo.toml │ └── src │ │ ├── entity │ │ ├── agent_release_version.rs │ │ ├── casbin_rule.rs │ │ ├── executor.rs │ │ ├── instance.rs │ │ ├── instance_group.rs │ │ ├── instance_role.rs │ │ ├── job.rs │ │ ├── job_bundle_script.rs │ │ ├── job_exec_history.rs │ │ ├── job_organizer.rs │ │ ├── job_organizer_process.rs │ │ ├── job_organizer_release.rs │ │ ├── job_organizer_release_edge.rs │ │ ├── job_organizer_release_node.rs │ │ ├── job_organizer_task.rs │ │ ├── job_organizer_task_result.rs │ │ ├── job_running_status.rs │ │ ├── job_schedule_history.rs │ │ ├── job_supervisor.rs │ │ ├── job_timer.rs │ │ ├── mod.rs │ │ ├── prelude.rs │ │ ├── role.rs │ │ ├── tag.rs │ │ ├── tag_resource.rs │ │ ├── team.rs │ │ ├── team_member.rs │ │ ├── user.rs │ │ ├── user_role.rs │ │ └── user_server.rs │ │ └── lib.rs ├── leader-election │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── service │ ├── Cargo.toml │ └── src │ │ ├── config.rs │ │ ├── lib.rs │ │ ├── logic │ │ ├── executor.rs │ │ ├── instance.rs │ │ ├── job.rs │ │ ├── job │ │ │ ├── bundle_script.rs │ │ │ ├── dashboard.rs │ │ │ ├── exec_history.rs │ │ │ ├── schedule.rs │ │ │ ├── supervisor.rs │ │ │ ├── timer.rs │ │ │ └── types.rs │ │ ├── migration.rs │ │ ├── migration │ │ │ └── v100.rs │ │ ├── mod.rs │ │ ├── role.rs │ │ ├── ssh.rs │ │ ├── tag.rs │ │ ├── team.rs │ │ ├── types.rs │ │ └── user.rs │ │ └── state.rs └── utils │ ├── Cargo.toml │ └── src │ ├── lib.rs │ └── macros.rs ├── dist └── .gitignore ├── docker-compose.yml ├── examples ├── supervisor.rs └── ws.rs ├── migration ├── .env ├── Cargo.toml ├── README.md ├── sql │ ├── m20250412_add_job_soft_deleted │ │ ├── down.sql │ │ └── up.sql │ ├── m20250420_modify_job_index │ │ ├── down.sql │ │ └── up.sql │ ├── v1_0_0 │ │ ├── down.sql │ │ └── up.sql │ ├── v1_1_0_001 │ │ ├── down.sql │ │ └── up.sql │ └── v1_1_0_002 │ │ ├── down.sql │ │ └── up.sql └── src │ ├── lib.rs │ ├── m20250412_add_job_soft_deleted.rs │ ├── m20250420_modify_job_index.rs │ ├── main.rs │ ├── v1_0_0_create_table.rs │ ├── v1_1_0_001_create_table.rs │ └── v1_1_0_002_create_table.rs ├── openapi-derive ├── Cargo.toml ├── src │ └── lib.rs └── tests │ └── test.rs ├── openapi ├── .env ├── Cargo.toml └── src │ ├── api.rs │ ├── api │ ├── executor.rs │ ├── file.rs │ ├── instance.rs │ ├── job.rs │ ├── manage.rs │ ├── migration.rs │ ├── role.rs │ ├── tag.rs │ ├── team.rs │ ├── terminal.rs │ ├── user.rs │ └── utils.rs │ ├── error.rs │ ├── job.rs │ ├── lib.rs │ ├── macros.rs │ ├── middleware │ ├── auth.rs │ ├── mod.rs │ └── team_permission.rs │ ├── migration.rs │ ├── response.rs │ └── utils.rs ├── restapi ├── comet.http └── openapi.http └── src └── bin ├── agent.rs ├── comet.rs ├── console.rs └── jiascheduler.rs /.dockerignore: -------------------------------------------------------------------------------- 1 | docs 2 | .vscode 3 | .idea 4 | .git# 5 | /target 6 | -------------------------------------------------------------------------------- /.env: -------------------------------------------------------------------------------- 1 | WORKCONF=/data/jiascheduler 2 | WORKDATA=/data/jiascheduler -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: Docker Build and Push 2 | 3 | on: 4 | push: 5 | branches: ["main"] 6 | tags: 7 | - "*" 8 | 9 | env: 10 | DOCKER_USERNAME: iwannay 11 | IMAGE_NAME: iwannay/jiascheduler 12 | 13 | jobs: 14 | docker: 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v4 19 | 20 | - name: Get tag name or default to latest 21 | id: get-tag 22 | run: | 23 | if [[ $GITHUB_REF == refs/tags/* ]]; then 24 | TAG_NAME=${GITHUB_REF#refs/tags/} 25 | else 26 | TAG_NAME="latest" 27 | fi 28 | echo "Tag name: $TAG_NAME" 29 | echo "::set-output name=tag_name::$TAG_NAME" 30 | 31 | - name: Log in to Docker Hub 32 | uses: docker/login-action@v3 33 | with: 34 | username: ${{ env.DOCKER_USERNAME }} 35 | # 进入 GitHub 仓库 -> Settings -> Secrets and variables -> Actions -> Repository secrets 36 | # 创建DOCKER_PAT变量,填入你的Docker令牌 37 | password: ${{ secrets.DOCKER_PAT }} # 使用 DOCKER_PAT 作为密码 38 | 39 | - name: Set up Docker Buildx 40 | uses: docker/setup-buildx-action@v3 41 | 42 | - name: Build and push Docker image 43 | uses: docker/build-push-action@v5 44 | with: 45 | context: . 46 | file: Dockerfile 47 | push: true 48 | tags: | 49 | ${{ env.IMAGE_NAME }}:${{ steps.get-tag.outputs.tag_name }} 50 | 51 | - name: Verify pushed image 52 | run: docker pull ${{ env.IMAGE_NAME }}:${{ steps.get-tag.outputs.tag_name }} 53 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: ["main"] 6 | pull_request: 7 | branches: ["main"] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | - name: Build 19 | run: cargo build --verbose 20 | - name: Run tests 21 | run: cargo test --verbose 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | .vscode 3 | log 4 | dist/* 5 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "automate", 4 | "openapi", 5 | "openapi-derive", 6 | "migration", 7 | "crates/leader-election", 8 | "crates/entity", 9 | "crates/service", 10 | "crates/utils", 11 | ] 12 | 13 | [package] 14 | name = "jiascheduler" 15 | version = "1.1.5" 16 | edition = "2024" 17 | 18 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 19 | 20 | 21 | [profile.release] 22 | strip = true 23 | 24 | [[bin]] 25 | name = "jiascheduler-console" 26 | path = "src/bin/console.rs" 27 | 28 | [[bin]] 29 | name = "jiascheduler-agent" 30 | path = "src/bin/agent.rs" 31 | 32 | [[bin]] 33 | name = "jiascheduler-comet" 34 | path = "src/bin/comet.rs" 35 | 36 | [[bin]] 37 | name = "jiascheduler" 38 | path = "src/bin/jiascheduler.rs" 39 | 40 | # [target.aarch64-unknown-linux-gnu] 41 | # linker = "aarch64-linux-gnu-gcc" 42 | 43 | # [target.aarch64-unknown-linux-musl] 44 | # linker = "aarch64-linux-musl-ld" 45 | 46 | [dependencies] 47 | poem.workspace = true 48 | clap.workspace = true 49 | futures-util.workspace = true 50 | tracing.workspace = true 51 | tracing-subscriber.workspace = true 52 | tokio-tungstenite.workspace = true 53 | url.workspace = true 54 | anyhow.workspace = true 55 | local-ip-address.workspace = true 56 | moka.workspace = true 57 | serde_json.workspace = true 58 | bytes.workspace = true 59 | serde.workspace = true 60 | file-rotate.workspace = true 61 | tokio.workspace = true 62 | tokio-nsq.workspace = true 63 | cron.workspace = true 64 | nanoid.workspace = true 65 | redis.workspace = true 66 | futures.workspace = true 67 | redis-macros.workspace = true 68 | tokio-cron-scheduler.workspace = true 69 | uuid.workspace = true 70 | automate.workspace = true 71 | openapi.workspace = true 72 | watchexec-supervisor.workspace = true 73 | service.workspace = true 74 | 75 | # terminal-keycode = "1.1.1" 76 | 77 | 78 | [workspace.dependencies] 79 | # diesel = { version = "2.1.4", features = ["mysql"] } 80 | poem = { version = "3.1.1", features = [ 81 | "anyhow", 82 | "redis-session", 83 | "websocket", 84 | "embed", 85 | "static-files", 86 | ] } 87 | poem-openapi = { version = "5.1.1", features = ["rapidoc"] } 88 | tokio = { version = "1.43.0", features = ["full"] } 89 | clap = { version = "4.5.17", features = ["derive"] } 90 | futures-util = "0.3.29" 91 | tracing = "0.1.40" 92 | tracing-subscriber = "0.3.18" 93 | tokio-tungstenite = "0.23.1" 94 | url = "2.5.0" 95 | anyhow = "1.0.75" 96 | local-ip-address = "0.6.1" 97 | moka = { version = "0.12.1", features = ["future"] } 98 | serde_json = "1.0.108" 99 | bytes = "1.5.0" 100 | serde = "1.0.193" 101 | file-rotate = "0.8.0" 102 | openapi-derive = { path = "openapi-derive" } 103 | redis = { version = "0.27", features = [ 104 | "json", 105 | "aio", 106 | "tokio-comp", 107 | "connection-manager", 108 | ] } 109 | sea-orm = { version = "1.1.7", features = [ 110 | "macros", 111 | "with-json", 112 | "with-chrono", 113 | "with-rust_decimal", 114 | "with-bigdecimal", 115 | "with-uuid", 116 | "with-time", 117 | "sqlx-mysql", 118 | # "runtime-tokio-native-tls", 119 | "debug-print", 120 | "runtime-tokio-rustls", 121 | ] } 122 | syn = "2.0.48" 123 | quote = "1.0.35" 124 | proc-macro2 = "1.0.76" 125 | thiserror = "2.0.11" 126 | tokio-nsq = "0.14.0" 127 | cron = "0.15.0" 128 | tokio-cron-scheduler = "0.13.0" 129 | nanoid = "0.4.0" 130 | uuid = "*" 131 | futures = "*" 132 | serde_repr = "0.1.18" 133 | russh = "0.44.0" 134 | russh-sftp = "2.0.1" 135 | russh-keys = "0.44.0" 136 | redis-macros = "0.5.1" 137 | config = "*" 138 | chrono = { version = "0.4.38", features = ["serde"] } 139 | rust-crypto = "*" 140 | automate = { path = "automate" } 141 | openapi = { path = "openapi" } 142 | migration = { path = "migration" } 143 | leader-election = { path = "crates/leader-election" } 144 | entity = { path = "crates/entity" } 145 | service = { path = "crates/service" } 146 | utils = { path = "crates/utils" } 147 | sea-query = "0.32.2" 148 | rust-embed = "*" 149 | reqwest = { version = "*", features = ["json"] } 150 | evalexpr = "12.0.2" 151 | watchexec-supervisor = "*" 152 | sea-orm-adapter = "0.4.0" 153 | simple_crypt = "*" 154 | rustc-serialize = "0.3.25" 155 | async-trait = "0.1.81" 156 | toml = "0.8.19" 157 | shellexpand = "3.1.0" 158 | git-version = "0.3.9" 159 | rand = "0.9.0" 160 | http = "1.1.0" 161 | sql-builder = "3.1.1" 162 | mac_address = "1.1.7" 163 | nix = { version = "0.29.0", features = ["signal"] } 164 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # 第一阶段:构建前端 2 | FROM node:18 AS frontend-builder 3 | WORKDIR /app/frontend 4 | 5 | # 克隆前端代码 6 | RUN apt update && apt install -y git && rm -rf /var/lib/apt/lists/* 7 | RUN git clone --depth=1 https://github.com/jiawesoft/jiascheduler-ui.git . 8 | RUN npm install -g pnpm && pnpm install --no-frozen-lockfile 9 | 10 | # 编译前端 11 | RUN pnpm build 12 | 13 | # 第二阶段:构建后端 14 | FROM rust:latest AS backend-builder 15 | WORKDIR /app 16 | 17 | # 复制 Rust 依赖文件,以便利用缓存 18 | COPY Cargo.toml Cargo.lock ./ 19 | 20 | # 创建 src 目录,防止 cargo build 失败 21 | RUN mkdir src && echo "fn main() {}" > src/main.rs 22 | 23 | # 预先构建依赖,缓存编译结果 24 | RUN cargo build --release --verbose || true 25 | 26 | # 复制前端编译产物到后端的 dist 目录 27 | COPY --from=frontend-builder /app/frontend/dist /app/dist 28 | 29 | # 复制后端代码并编译 30 | COPY ./ ./ 31 | RUN cargo build --release --verbose 32 | 33 | # 第三阶段:构建最终运行环境 34 | FROM ubuntu:latest 35 | WORKDIR /app 36 | 37 | # 安装必要依赖 38 | RUN apt update && apt install -y ca-certificates 39 | 40 | # 设置时区环境变量 41 | ENV TZ=Asia/Shanghai 42 | 43 | # 安装 tzdata 包并配置时区(非交互模式) 44 | RUN DEBIAN_FRONTEND=noninteractive apt-get install -y tzdata && \ 45 | ln -fs /usr/share/zoneinfo/$TZ /etc/localtime && \ 46 | echo $TZ > /etc/timezone && \ 47 | apt-get clean && \ 48 | dpkg-reconfigure --frontend noninteractive tzdata && \ 49 | rm -rf /var/lib/apt/lists/* 50 | 51 | 52 | # 复制后端可执行文件 53 | COPY --from=backend-builder /app/target/release/jiascheduler-console /app/ 54 | COPY --from=backend-builder /app/target/release/jiascheduler-comet /app/ 55 | COPY --from=backend-builder /app/target/release/jiascheduler-agent /app/ 56 | 57 | # 设置运行时环境变量(如有需要) 58 | ENV RUST_LOG=info 59 | 60 | # 暴露必要端口 61 | EXPOSE 9090 3000 62 | 63 | # 启动命令(默认启动 jiascheduler-console) 64 | CMD ["./jiascheduler-console", "--bind-addr", "0.0.0.0:9090"] 65 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This is free and unencumbered software released into the public domain. 2 | 3 | Anyone is free to copy, modify, publish, use, compile, sell, or 4 | distribute this software, either in source code form or as a compiled 5 | binary, for any purpose, commercial or non-commercial, and by any 6 | means. 7 | 8 | In jurisdictions that recognize copyright laws, the author or authors 9 | of this software dedicate any and all copyright interest in the 10 | software to the public domain. We make this dedication for the benefit 11 | of the public at large and to the detriment of our heirs and 12 | successors. We intend this dedication to be an overt act of 13 | relinquishment in perpetuity of all present and future rights to this 14 | software under copyright law. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. 23 | 24 | For more information, please refer to 25 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Jiascheduler 2 | 3 | **English** · [简体中文](./README.zh-CN.md) · [Wiki](https://github.com/jiawesoft/jiascheduler/wiki/Install) 4 | 5 | An open-source, high-performance, scalable task scheduler written in Rust, supporting dynamic configuration. It can push user scripts to tens of thousands of instances simultaneously and collect execution results in real time. 6 | 7 | Jiascheduler does not require script execution nodes to be on the same network. It incorporates an ingenious network penetration model, allowing a single console to manage nodes across different subnets. For example, you can use https://jiascheduler.iwannay.cn to push scripts for execution on Tencent Cloud, Alibaba Cloud, and Amazon Cloud simultaneously, or even deploy scripts on your home computer. 8 | 9 | To facilitate node management, Jiascheduler also provides a powerful web SSH terminal, supporting multi-session operations, split-screen, file uploads, downloads, and more. 10 | 11 | ## Architecture 12 | 13 | ![Architecture](./assets/jiascheduler-arch.png) 14 | 15 | ## Quick start 16 | 17 | ### [💖 Jiascheduler download click here 💖 ](https://github.com/jiawesoft/jiascheduler/releases) 18 | 19 | [https://jiascheduler.iwannay.cn](https://jiascheduler.iwannay.cn) 20 | 21 | guest account:guest Password:guest 22 | 23 | In addition to using the test server provided in the demo address, you can also deploy your own Agent. Once successfully deployed, the Agent will automatically connect to the jiascheduler online console. Through the console, you can check the Agent's status, execute scripts, view execution results, and initiate SSH connections. 24 | 25 | ```bash 26 | # Only use job scheduling capability 27 | ./jiascheduler-agent --comet-addr ws://115.159.194.153:3000 --assign-username guest --assign-password guest 28 | 29 | # Utilize job scheduling and webssh capabilities 30 | ./jiascheduler-agent --comet-addr ws://115.159.194.153:3000 --assign-username guest --assign-password guest --ssh-user your_ssh_user --ssh-port 22 --ssh-password your_ssh_user_password --namespace home 31 | ``` 32 | 33 | If you need to log off the node, simply exit the agent 34 | 35 | ### Single-Instance Deployment 36 | 37 | Jiascheduler consists of four executable programs: 38 | 39 | 1. jiascheduler-console: The console service, which provides the web console interface. 40 | 41 | 2. jiascheduler-comet: The connection layer service, which offers a unified access layer for agents to connect. 42 | 43 | 3. jiascheduler-agent: The local agent program, responsible for executing tasks. 44 | 45 | 4. jiascheduler: A bundled version of the above three services, designed for simple and quick deployment on a single node. 46 | It’s important to note that the bundled jiascheduler service also supports connections from different agents. 47 | Even if you deploy the bundled version of jiascheduler, you can still deploy additional comet and agent instances. 48 | 49 | For single-instance deployment, you only need to execute the following: 50 | 51 | ```bash 52 | // Access localhost:9090 via a browser to complete the initial setup. 53 | // After the initial setup, the configuration file will be loaded, and there is no need to pass `--bind-addr` for subsequent restarts. 54 | // The default path for the generated configuration file is $HOME/.jiascheduler/console.toml. 55 | ./jiascheduler --bind-addr 0.0.0.0:9090 56 | ``` 57 | 58 | ### Docker Deployment 59 | 60 | Create a `.env` file in the same directory as `docker-compose.yml` with the following content: 61 | 62 | ```shell 63 | WORKCONF=/data/jiascheduler 64 | WORKDATA=/data/jiascheduler 65 | ``` 66 | 67 | The `console.toml` file has a default path of /root/.jiascheduler/console.toml in the container. If this configuration file does not exist, accessing the console page will redirect you to the initialization setup page. 68 | 69 | If the `console.toml` file exists, accessing the console page will directly take you to the login page. Below is a reference configuration. Save the following content as `console.toml` and place it in the `$WORKCONF/.jiascheduler` directory. 70 | 71 | ```yml 72 | debug = false 73 | bind_addr = "0.0.0.0:9090" 74 | api_url = "" 75 | redis_url = "redis://default:3DGiuazc7wkAppV3@redis" 76 | comet_secret = "rYzBYE+cXbtdMg==" 77 | database_url = "mysql://root:kytHmeBR4Vg@mysql:3306/jiascheduler" 78 | 79 | [encrypt] 80 | private_key = "QGr0LLnFFt7mBFrfol2gy" 81 | 82 | [admin] 83 | username = "admin" 84 | password = "qTQhiMiLCb" 85 | ``` 86 | 87 | After executing docker compose up -d, access 0.0.0.0:9090 to enter the console interface. 88 | 89 | Below is a reference Docker configuration: 90 | 91 | [docker-compose.yml](docker-compose.yml) 92 | 93 | ## Screenshot 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 |
Jiascheduler job editJiascheduler run list
Jiascheduler scheduler historyJiascheduler scheduler dashboard
Jiascheduler serverJiascheduler webssh
112 | 113 | ## Help video 114 | 115 | https://www.bilibili.com/video/BV19wzKYVEHL 116 | 117 | ## Sponsorship 118 | 119 | **wechat:** cg1472580369 120 | 121 | 122 | -------------------------------------------------------------------------------- /README.zh-CN.md: -------------------------------------------------------------------------------- 1 | # jiascheduler 2 | 3 | **简体中文** · [English](./README.md) · [Wiki](https://github.com/jiawesoft/jiascheduler/wiki/Install) 4 | 5 | 一款用 rust 编写的开源高性能,可扩展,支持动态配置的任务调度器,能够同时推送用户脚本到数以万计的实例运行,并实时收集执行的结果。 6 | 7 | jiascheduler 执行脚本的节点不需要都在同一个网络,其内部设计了一个精巧的网络穿透模型可以用一个控制台管理不同子网的节点;例如,你可以在 https://jiascheduler.iwannay.cn 同时往腾讯云, 阿里云,亚马逊云推送脚本执行,当然你可以往家里的电脑部署脚本执行。 8 | 9 | 为了方便对节点进行管理,jiascheduler 同时提供了一个功能强大的 webssh 终端,支持多会话操作,分屏,上传,下载等。 10 | 11 | Github 地址:https://github.com/jiawesoft/jiascheduler 12 | 13 | ## 架构图 14 | 15 | ![架构图](./assets/jiascheduler-arch.png) 16 | 17 | ## 快速开始 18 | 19 | ### [💖 jiascheduler 下载点击这里 💖 ](https://github.com/jiawesoft/jiascheduler/releases) 20 | 21 | [https://jiascheduler.iwannay.cn](https://jiascheduler.iwannay.cn) 22 | 访客账号:guest 密码:guest 23 | 24 | 除了使用演示地址中的测试服务器,你也可以自己部署 Agent,部署成功的 Agent 将自动接入 jiascheduler 在线控制台,你可以通过控制台查看 Agent 的状态,执行脚本,查看执行结果,发起 ssh 连接。 25 | 26 | ### 接入在线控制台 27 | 28 | 以下演示了如何将自己本地实例接入 jiaschduler 在线控制台 29 | 30 | ```bash 31 | # 仅使用作业调度能力 32 | ./jiascheduler-agent --comet-addr ws://115.159.194.153:3000 --assign-username guest --assign-password guest 33 | 34 | # 使用作业调度能力和webssh能力 35 | ./jiascheduler-agent --comet-addr ws://115.159.194.153:3000 --assign-username guest --assign-password guest --ssh-user your_ssh_user --ssh-port 22 --ssh-password your_ssh_user_password --namespace home 36 | ``` 37 | 38 | 如果你需要下线节点,只需要退出 Agent 即可 39 | 40 | ### 单实例部署 41 | 42 | jiascheduler 一共有四个执行程序,分别是 43 | 44 | 1. jiascheduler-console: 控制台服务,用于提供 web 控制台服务 45 | 2. jiascheduler-comet: 连接层服务,用于提供一个统一的接入层供 agent 连接 46 | 3. jiascheduler-agent: 本地 agent 层序,用于执行作业 47 | 4. jiascheduler: 以上三个服务的合并打包版本,用于单节点简易快速部署。 48 | 需要注意的是,jiascheduler 打包服务也支持不同的 agent 接入。 49 | 部署了 jiascheduler 合并版本, 你依旧可以再部署多个 comet 和 agent 50 | 51 | 单实例部署时仅需要执行以下内容 52 | 53 | ```bash 54 | // 通过浏览器访问 localhost:9090, 完成初始化安装 55 | // 初始化安装后会加载配置文件再次启动无需传入--bind-addr 56 | // 生成的配置文件路径默认为$HOME/.jiascheduler/console.toml 57 | ./jiascheduler --bind-addr 0.0.0.0:9090 58 | 59 | ``` 60 | 61 | ### docker 部署 62 | 63 | 在 docker-compose.yml 同目录下创建.env 文件,内容如下 64 | 65 | ```shell 66 | WORKCONF=/data/jiascheduler 67 | WORKDATA=/data/jiascheduler 68 | ``` 69 | 70 | `console.toml` 在容器中默认路径为`/root/.jiascheduler/console.toml`,如果没有该配置文件,则访问 console 页面时会进入初始化安装页面 71 | 72 | 如果存在 `console.toml` 文件,访问 console 页面则直接跳到登录页面,参考配置如下,将以下内容保存为 `console.toml`,放`$WORKCONF/.jiascheduler` 目录下 73 | 74 | ```yml 75 | debug = false 76 | bind_addr = "0.0.0.0:9090" 77 | api_url = "" 78 | redis_url = "redis://default:3DGiuazc7wkAppV3@redis" 79 | comet_secret = "rYzBYE+cXbtdMg==" 80 | database_url = "mysql://root:kytHmeBR4Vg@mysql:3306/jiascheduler" 81 | 82 | [encrypt] 83 | private_key = "QGr0LLnFFt7mBFrfol2gy" 84 | 85 | [admin] 86 | username = "admin" 87 | password = "qTQhiMiLCb" 88 | ``` 89 | 90 | 执行 docker compose up -d 后访问 0.0.0.0:9090 进入控制台界面 91 | 92 | docker 参考配置如下 93 | 94 | [docker-compose.yml](docker-compose.yml) 95 | 96 | ## 软件截图 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 |
Jiascheduler job editJiascheduler run list
Jiascheduler scheduler historyJiascheduler scheduler dashboard
Jiascheduler serverJiascheduler webssh
115 | 116 | ## 帮助视频 117 | 118 | https://www.bilibili.com/video/BV19wzKYVEHL 119 | 120 | ## 赞助 121 | 122 | **wechat:** cg1472580369 123 | 124 | 125 | -------------------------------------------------------------------------------- /assets/good.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiawesoft/jiascheduler/3545713e12a4fa9b3224493a15793117a9436823/assets/good.jpg -------------------------------------------------------------------------------- /assets/jiascheduler-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiawesoft/jiascheduler/3545713e12a4fa9b3224493a15793117a9436823/assets/jiascheduler-arch.png -------------------------------------------------------------------------------- /assets/job-edit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiawesoft/jiascheduler/3545713e12a4fa9b3224493a15793117a9436823/assets/job-edit.png -------------------------------------------------------------------------------- /assets/qrcode-qq-group.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiawesoft/jiascheduler/3545713e12a4fa9b3224493a15793117a9436823/assets/qrcode-qq-group.jpg -------------------------------------------------------------------------------- /assets/run-list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiawesoft/jiascheduler/3545713e12a4fa9b3224493a15793117a9436823/assets/run-list.png -------------------------------------------------------------------------------- /assets/scheduler-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiawesoft/jiascheduler/3545713e12a4fa9b3224493a15793117a9436823/assets/scheduler-dashboard.png -------------------------------------------------------------------------------- /assets/scheduler-history.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiawesoft/jiascheduler/3545713e12a4fa9b3224493a15793117a9436823/assets/scheduler-history.png -------------------------------------------------------------------------------- /assets/server.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiawesoft/jiascheduler/3545713e12a4fa9b3224493a15793117a9436823/assets/server.png -------------------------------------------------------------------------------- /assets/webssh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiawesoft/jiascheduler/3545713e12a4fa9b3224493a15793117a9436823/assets/webssh.png -------------------------------------------------------------------------------- /automate/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "automate" 3 | edition = "2024" 4 | publish = false 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | poem.workspace = true 10 | clap.workspace = true 11 | futures-util.workspace = true 12 | tracing.workspace = true 13 | tracing-subscriber.workspace = true 14 | tokio-tungstenite.workspace = true 15 | url.workspace = true 16 | anyhow.workspace = true 17 | local-ip-address.workspace = true 18 | moka.workspace = true 19 | serde_json.workspace = true 20 | bytes.workspace = true 21 | serde.workspace = true 22 | file-rotate.workspace = true 23 | tokio.workspace = true 24 | tokio-nsq.workspace = true 25 | cron.workspace = true 26 | nanoid.workspace = true 27 | redis.workspace = true 28 | futures.workspace = true 29 | redis-macros.workspace = true 30 | tokio-cron-scheduler.workspace = true 31 | uuid.workspace = true 32 | chrono.workspace = true 33 | reqwest.workspace = true 34 | watchexec-supervisor.workspace = true 35 | rand.workspace = true 36 | async-trait.workspace = true 37 | russh.workspace = true 38 | russh-keys.workspace = true 39 | russh-sftp.workspace = true 40 | serde_repr.workspace = true 41 | mac_address.workspace = true 42 | nix.workspace = true 43 | 44 | [target.'cfg(unix)'.dependencies] 45 | users = "0.11.0" 46 | -------------------------------------------------------------------------------- /automate/src/bridge.rs: -------------------------------------------------------------------------------- 1 | // mod bridge; 2 | pub mod client; 3 | pub mod msg; 4 | pub mod protocol; 5 | // pub mod server; 6 | 7 | use std::{collections::HashMap, sync::Arc, time::Duration}; 8 | 9 | use anyhow::{anyhow, Context, Ok, Result}; 10 | use serde_json::Value; 11 | use tokio::{ 12 | sync::{ 13 | mpsc::{self, Sender}, 14 | Mutex, 15 | }, 16 | time::timeout, 17 | }; 18 | use tracing::info; 19 | 20 | use crate::bridge::msg::Msg; 21 | 22 | use self::msg::{MsgKind, MsgReqKind, MsgState}; 23 | 24 | #[derive(Clone)] 25 | pub struct Bridge { 26 | // server: WsServer, 27 | server_clients: Arc>)>>>>, 28 | } 29 | 30 | impl Default for Bridge { 31 | fn default() -> Self { 32 | Self::new() 33 | } 34 | } 35 | 36 | impl Bridge { 37 | pub fn new() -> Self { 38 | Self { 39 | // server: WsServer::new(), 40 | server_clients: Arc::new(Mutex::new(HashMap::new())), 41 | } 42 | } 43 | 44 | pub async fn append_client( 45 | &mut self, 46 | key: impl Into, 47 | client: Sender<(Msg, Option>)>, 48 | ) { 49 | self.server_clients.lock().await.insert(key.into(), client); 50 | } 51 | 52 | pub async fn remove_client(&mut self, key: String) { 53 | self.server_clients.lock().await.remove(&key); 54 | } 55 | 56 | pub async fn send_msg(&self, key: &str, data: MsgReqKind) -> Result { 57 | let msg = Msg { 58 | id: 0, 59 | data: MsgKind::Request(data), 60 | }; 61 | let (tx, mut rx) = mpsc::channel::(1); 62 | 63 | match self.server_clients.lock().await.get(key) { 64 | Some(sender) => sender.send((msg, Some(tx.clone()))).await?, 65 | None => return Err(anyhow::anyhow!("not found client {}", key)), 66 | } 67 | 68 | let resp = timeout(Duration::from_secs(90), rx.recv()) 69 | .await 70 | .context("receive message timeout")? 71 | .context("failed receives the next value for the receiver.")?; 72 | 73 | return match resp { 74 | MsgState::Completed(v) => Ok(v), 75 | MsgState::Err(e) => Err(anyhow!(e)), 76 | }; 77 | } 78 | 79 | pub fn handle_msg(&mut self, msg: String) -> String { 80 | info!("handle msg {msg}"); 81 | 82 | format!("pong {msg}") 83 | } 84 | 85 | pub fn handle_msg2(&mut self, msg: Value) -> Result { 86 | info!("handle msg {msg}"); 87 | 88 | // match serde_json::from_value::(msg)? { 89 | // Msg::DispathJobMsg(msg) => todo!(), 90 | // } 91 | 92 | // format!("pong {msg}"); 93 | todo!() 94 | } 95 | 96 | // pub async fn poll(&mut self, mut handler: F) 97 | // where 98 | // F: FnMut(String) + Send + Sync + 'static, 99 | // { 100 | // let mut msg_receiver = self.server.recv().await; 101 | // match tokio::spawn(async move { 102 | // while let Some(v) = msg_receiver.recv().await { 103 | // handler(v); 104 | // } 105 | // }) 106 | // .await 107 | // { 108 | // Ok(_) => todo!(), 109 | // Err(e) => error!("{e}"), 110 | // } 111 | // } 112 | } 113 | -------------------------------------------------------------------------------- /automate/src/bridge/msg.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, time::Duration}; 2 | 3 | use anyhow::Error; 4 | use chrono::{DateTime, Local}; 5 | use serde::{Deserialize, Serialize}; 6 | use serde_json::Value; 7 | use tokio::sync::mpsc::Sender; 8 | 9 | use crate::{ 10 | comet::handler::SecretHeader, 11 | scheduler::types::{ 12 | BaseJob, BundleOutput, JobAction, RunStatus, RuntimeAction, ScheduleStatus, ScheduleType, 13 | }, 14 | }; 15 | 16 | pub enum MsgState { 17 | Completed(Value), 18 | Err(Error), 19 | } 20 | 21 | #[derive(Clone)] 22 | pub struct TransactionMsg { 23 | pub tx: Sender, 24 | pub id: u64, 25 | } 26 | 27 | impl TransactionMsg { 28 | pub fn new(tx: Sender, id: u64) -> Self { 29 | Self { tx, id } 30 | } 31 | } 32 | 33 | #[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] 34 | pub struct SftpReadDirParams { 35 | pub user: String, 36 | pub password: String, 37 | pub ip: String, 38 | pub port: u16, 39 | pub dir: Option, 40 | } 41 | 42 | #[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] 43 | pub struct SftpUploadParams { 44 | pub ip: String, 45 | pub port: u16, 46 | pub user: String, 47 | pub password: String, 48 | pub filepath: String, 49 | pub data: Vec, 50 | } 51 | 52 | #[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] 53 | pub struct SftpDownloadParams { 54 | pub ip: String, 55 | pub port: u16, 56 | pub user: String, 57 | pub password: String, 58 | pub filepath: String, 59 | } 60 | 61 | #[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] 62 | pub struct SftpRemoveParams { 63 | pub ip: String, 64 | pub port: u16, 65 | pub user: String, 66 | pub password: String, 67 | pub remove_type: String, 68 | pub filepath: String, 69 | } 70 | 71 | #[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] 72 | pub enum MsgReqKind { 73 | DispatchJobRequest(DispatchJobParams), 74 | RuntimeActionRequest(RuntimeActionParams), 75 | PullJobRequest(Value), 76 | SftpReadDirRequest(SftpReadDirParams), 77 | SftpUploadRequest(SftpUploadParams), 78 | SftpDownloadRequest(SftpDownloadParams), 79 | SftpRemoveRequest(SftpRemoveParams), 80 | Auth(AuthParams), 81 | UpdateJobRequest(UpdateJobParams), 82 | HeartbeatRequest(HeartbeatParams), 83 | } 84 | 85 | #[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] 86 | pub enum MsgKind { 87 | Response(Value), 88 | Request(MsgReqKind), 89 | } 90 | 91 | #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] 92 | pub struct Msg { 93 | pub id: u64, 94 | pub data: MsgKind, 95 | } 96 | 97 | #[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] 98 | pub struct DispatchJobParams { 99 | pub base_job: BaseJob, 100 | pub schedule_id: String, 101 | pub instance_id: Option, 102 | #[serde(default)] 103 | pub run_id: String, 104 | pub fields: Option>, 105 | pub timer_expr: Option, 106 | pub restart_interval: Option, 107 | pub is_sync: bool, 108 | pub created_user: String, 109 | pub action: JobAction, 110 | } 111 | 112 | #[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] 113 | pub struct RuntimeActionParams { 114 | pub eid: String, 115 | pub fields: Option>, 116 | pub is_sync: bool, 117 | pub created_user: String, 118 | pub action: RuntimeAction, 119 | } 120 | 121 | #[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] 122 | pub struct HeartbeatParams { 123 | pub namespace: String, 124 | pub mac_addr: String, 125 | pub source_ip: String, 126 | } 127 | 128 | impl HeartbeatParams { 129 | pub fn get_endpoint(&self) -> String { 130 | if self.namespace != "" { 131 | format!("{}:{}", self.namespace, self.source_ip) 132 | } else { 133 | format!("{}", self.source_ip) 134 | } 135 | } 136 | } 137 | 138 | #[derive(Deserialize, Serialize, PartialEq, Debug, Clone, Default)] 139 | pub struct UpdateJobParams { 140 | pub schedule_id: String, 141 | pub schedule_type: Option, 142 | pub base_job: BaseJob, 143 | pub instance_id: String, 144 | pub bind_ip: String, 145 | pub bind_namespace: String, 146 | pub run_status: Option, 147 | pub schedule_status: Option, 148 | pub exit_code: Option, 149 | pub exit_status: Option, 150 | pub stdout: Option, 151 | pub stderr: Option, 152 | pub created_user: String, 153 | pub bundle_output: Option>, 154 | pub run_id: String, 155 | pub start_time: Option>, 156 | pub end_time: Option>, 157 | pub prev_time: Option>, 158 | pub next_time: Option>, 159 | pub is_timeout: bool, 160 | } 161 | 162 | impl UpdateJobParams { 163 | pub fn bundle_output2json(bundle_output: Option>) -> Option { 164 | match bundle_output { 165 | Some(v) => Some( 166 | serde_json::to_string(&v).expect("failed convert bundle_output to json string"), 167 | ), 168 | None => None, 169 | } 170 | } 171 | } 172 | 173 | #[derive(Deserialize, Serialize, PartialEq, Debug, Clone, Default)] 174 | pub struct BundleOutputParams { 175 | pub eid: String, 176 | pub exit_code: Option, 177 | pub exit_status: Option, 178 | pub stdout: Option, 179 | pub stderr: Option, 180 | } 181 | 182 | impl BundleOutputParams { 183 | pub fn parse(value: &BundleOutput) -> Option> { 184 | match value { 185 | BundleOutput::Output(_) => None, 186 | BundleOutput::Bundle(v) => Some( 187 | v.iter() 188 | .map(|v| BundleOutputParams { 189 | eid: v.0.to_owned(), 190 | exit_code: { 191 | if v.1.status.success() { 192 | v.1.status.code() 193 | } else { 194 | v.1.status.code().or(Some(9)) 195 | } 196 | }, 197 | exit_status: Some(v.1.status.to_string()), 198 | stdout: Some(String::from_utf8_lossy(&v.1.stdout).to_string()), 199 | stderr: Some(String::from_utf8_lossy(&v.1.stderr).to_string()), 200 | }) 201 | .collect::>(), 202 | ), 203 | } 204 | } 205 | } 206 | 207 | #[derive(Deserialize, Serialize, PartialEq, Debug, Clone, Default)] 208 | pub struct AuthParams { 209 | pub agent_ip: String, 210 | pub secret: String, 211 | pub is_initialized: bool, 212 | } 213 | 214 | #[derive(Serialize, Deserialize, Debug)] 215 | pub struct AgentOnlineParams { 216 | pub agent_ip: String, 217 | pub namespace: String, 218 | pub mac_addr: String, 219 | pub is_initialized: bool, 220 | pub secret_header: SecretHeader, 221 | } 222 | 223 | #[derive(Serialize, Deserialize, Debug)] 224 | pub struct AgentOfflineParams { 225 | pub agent_ip: String, 226 | pub mac_addr: String, 227 | } 228 | -------------------------------------------------------------------------------- /automate/src/bridge/protocol.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, Result}; 2 | use bytes::{BufMut, BytesMut}; 3 | 4 | use super::msg::Msg; 5 | 6 | pub struct Protocol {} 7 | 8 | impl Protocol { 9 | const REQ_MARK: u8 = 0; 10 | const RESP_MARK: u8 = 1; 11 | 12 | pub fn is_response(data: &Vec) -> bool { 13 | data[0] == Self::RESP_MARK 14 | } 15 | 16 | pub fn pack_request(data: Msg) -> Vec { 17 | let mut b = BytesMut::new(); 18 | b.put_u8(Self::REQ_MARK); 19 | b.extend(serde_json::to_vec(&data).unwrap()); 20 | b.to_vec() 21 | } 22 | 23 | pub fn unpack_request(data: Vec) -> Result { 24 | if data[0] != Self::REQ_MARK { 25 | return Err(anyhow!("invalid request msg format")); 26 | } 27 | let val = &data[1..]; 28 | Ok(serde_json::from_slice::(val)?) 29 | } 30 | 31 | pub fn pack_response(data: Msg) -> Vec { 32 | let mut b = BytesMut::new(); 33 | b.put_u8(Self::RESP_MARK); 34 | let data = serde_json::to_vec(&data).unwrap(); 35 | b.extend(data); 36 | b.to_vec() 37 | } 38 | 39 | pub fn unpack_response(data: Vec) -> Result { 40 | if data[0] != Self::RESP_MARK { 41 | return Err(anyhow!("invalid response msg format")); 42 | } 43 | let val = &data[1..]; 44 | Ok(serde_json::from_slice::(val)?) 45 | } 46 | } 47 | 48 | #[test] 49 | fn pack_request() { 50 | use crate::bridge::msg::MsgKind; 51 | use serde_json::json; 52 | let old = Msg { 53 | id: 12, 54 | data: MsgKind::Request(crate::bridge::msg::MsgReqKind::PullJobRequest( 55 | json!({"hello":"world"}), 56 | )), 57 | }; 58 | 59 | let data = Protocol::pack_request(old.clone()); 60 | 61 | match Protocol::unpack_request(data) { 62 | Ok(new) => { 63 | assert!(old == new, "a:{:?}, b:{:?} not equal", old, new,) 64 | } 65 | Err(_) => todo!(), 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /automate/src/bridge/server.rs: -------------------------------------------------------------------------------- 1 | 2 | 3 | use tokio::{ 4 | sync::{ 5 | mpsc::{self, Receiver, Sender}, 6 | }, 7 | }; 8 | 9 | use poem::{ 10 | web::{ 11 | websocket::{Message, WebSocket}, 12 | }, IntoResponse, 13 | }; 14 | 15 | use futures_util::{SinkExt, StreamExt}; 16 | use tracing::error; 17 | 18 | pub struct WsServer { 19 | ws_channel: (Sender, Option>), 20 | msg_channel: (Sender, Option>), 21 | } 22 | 23 | impl Default for WsServer { 24 | fn default() -> Self { 25 | Self::new() 26 | } 27 | } 28 | 29 | impl WsServer { 30 | pub fn new() -> Self { 31 | let (ws_sender, ws_receiver) = mpsc::channel::(10); 32 | let (msg_sender, msg_receiver) = mpsc::channel::(10); 33 | Self { 34 | ws_channel: (ws_sender, Some(ws_receiver)), 35 | msg_channel: (msg_sender, Some(msg_receiver)), 36 | } 37 | } 38 | 39 | fn msg_sender(&self) -> Sender { 40 | self.msg_channel.0.clone() 41 | } 42 | 43 | pub async fn recv(&mut self) -> Receiver { 44 | self.msg_channel.1.take().expect("invalid msg receiver") 45 | } 46 | 47 | async fn poll(&mut self, mut handler: F) 48 | where 49 | F: FnMut(String) + Send + Sync + 'static, 50 | { 51 | let mut msg_receiver = self.msg_channel.1.take().expect("invalid msg receiver"); 52 | match tokio::spawn(async move { 53 | while let Some(v) = msg_receiver.recv().await { 54 | handler(v); 55 | } 56 | }) 57 | .await 58 | { 59 | Ok(_) => todo!(), 60 | Err(e) => error!("{e}"), 61 | } 62 | } 63 | 64 | pub async fn serve_ws(&mut self, ws: WebSocket) -> impl IntoResponse { 65 | let msg_sender = self.msg_channel.0.clone(); 66 | 67 | let mut ws_receiver = self.ws_channel.1.take().expect("invalid receiver"); 68 | 69 | ws.on_upgrade(move |socket| async move { 70 | let (mut sink, mut stream) = socket.split(); 71 | 72 | tokio::spawn(async move { 73 | while let Some(Ok(msg)) = stream.next().await { 74 | if let Message::Text(text) = msg { 75 | match msg_sender.send(format!("read: {text}")).await { 76 | Err(e) => { 77 | error!("{e}"); 78 | break; 79 | } 80 | Ok(_) => todo!(), 81 | } 82 | } 83 | } 84 | }); 85 | 86 | tokio::spawn(async move { 87 | while let Some(msg) = ws_receiver.recv().await { 88 | if sink.send(Message::Text(msg)).await.is_err() { 89 | error!("err!"); 90 | break; 91 | } 92 | } 93 | }); 94 | }) 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /automate/src/bus.rs: -------------------------------------------------------------------------------- 1 | use std::pin::Pin; 2 | 3 | use anyhow::Result; 4 | use futures::Future; 5 | use local_ip_address::local_ip; 6 | use redis::{ 7 | from_redis_value, 8 | streams::{StreamMaxlen, StreamReadOptions, StreamReadReply}, 9 | AsyncCommands, Client, 10 | }; 11 | use redis_macros::{FromRedisValue, ToRedisArgs}; 12 | use serde::{Deserialize, Serialize}; 13 | 14 | use tracing::{debug, error, info, warn}; 15 | 16 | use crate::bridge::msg::{AgentOfflineParams, AgentOnlineParams, HeartbeatParams, UpdateJobParams}; 17 | 18 | #[derive(Debug, Serialize, Deserialize, FromRedisValue, ToRedisArgs)] 19 | pub enum Msg { 20 | UpdateJob(UpdateJobParams), 21 | Heartbeat(HeartbeatParams), 22 | AgentOnline(AgentOnlineParams), 23 | AgentOffline(AgentOfflineParams), 24 | } 25 | 26 | #[derive(Clone)] 27 | pub struct Bus { 28 | pub redis_client: Client, 29 | } 30 | 31 | impl Bus { 32 | pub const JOB_TOPIC: &'static str = "jiascheduler:job:event"; 33 | pub const CONSUMER_GROUP: &'static str = "jiascheduler-group"; 34 | 35 | pub fn new(redis_client: Client) -> Self { 36 | Self { redis_client } 37 | } 38 | 39 | pub async fn update_job(&self, msg: UpdateJobParams) -> Result { 40 | self.send_msg(&[("event", Msg::UpdateJob(msg))]).await 41 | } 42 | 43 | pub async fn heartbeat(&self, msg: HeartbeatParams) -> Result { 44 | self.send_msg(&[("event", Msg::Heartbeat(msg))]).await 45 | } 46 | 47 | pub async fn agent_online(&self, msg: AgentOnlineParams) -> Result { 48 | self.send_msg(&[("event", Msg::AgentOnline(msg))]).await 49 | } 50 | 51 | pub async fn agent_offline(&self, msg: AgentOfflineParams) -> Result { 52 | self.send_msg(&[("event", Msg::AgentOffline(msg))]).await 53 | } 54 | 55 | pub async fn send_msg<'a>(&self, items: &'a [(&'a str, Msg)]) -> Result { 56 | let mut conn = self.redis_client.get_multiplexed_async_connection().await?; 57 | 58 | let v: String = conn.xadd(Self::JOB_TOPIC, "*", items).await?; 59 | Ok(v) 60 | } 61 | 62 | pub async fn recv( 63 | &self, 64 | mut cb: impl Sync 65 | + Send 66 | + FnMut(String, Msg) -> Pin> + Send>>, 67 | ) -> Result { 68 | let mut conn = self.redis_client.get_multiplexed_async_connection().await?; 69 | 70 | let ret: String = conn 71 | .xgroup_create_mkstream(Self::JOB_TOPIC, Self::CONSUMER_GROUP, "$") 72 | .await 73 | .map_or_else( 74 | |e| { 75 | warn!("failed create stream group - {}", e); 76 | "".to_string() 77 | }, 78 | |v| v, 79 | ); 80 | 81 | info!("create stream group {}", ret); 82 | 83 | let opts = StreamReadOptions::default() 84 | .group(Self::CONSUMER_GROUP, local_ip()?.to_string()) 85 | .block(100) 86 | .count(100); 87 | 88 | loop { 89 | let ret: StreamReadReply = conn 90 | .xread_options(&[Self::JOB_TOPIC], &[">"], &opts) 91 | .await?; 92 | 93 | match conn 94 | .xtrim::<_, u64>(Self::JOB_TOPIC, StreamMaxlen::Equals(5000)) 95 | .await 96 | { 97 | Ok(n) => debug!("trim stream {} {n} entries", Self::JOB_TOPIC), 98 | Err(e) => error!("failed to trim stream - {e}"), 99 | }; 100 | 101 | for stream_key in ret.keys { 102 | let msg_key = stream_key.key; 103 | 104 | for stream_id in stream_key.ids { 105 | for (k, v) in stream_id.map { 106 | let ret = match from_redis_value::(&v) { 107 | Ok(msg) => cb(k, msg).await, 108 | Err(e) => { 109 | error!("failed to parse redis val - {e}"); 110 | Ok(()) 111 | } 112 | }; 113 | 114 | if let Err(e) = ret { 115 | error!("failed to handle msg - {e}"); 116 | } 117 | 118 | let _: i32 = conn 119 | .xack( 120 | msg_key.clone(), 121 | Self::CONSUMER_GROUP, 122 | &[stream_id.id.clone()], 123 | ) 124 | .await 125 | .map_or_else( 126 | |v| { 127 | error!("faile to exec xack - {}", v); 128 | 0 129 | }, 130 | |v| v, 131 | ); 132 | } 133 | } 134 | } 135 | } 136 | } 137 | } 138 | 139 | #[tokio::test] 140 | async fn test_bus() { 141 | let redis_client = 142 | redis::Client::open("redis://:wang@127.0.0.1").expect("failed connect to redis"); 143 | let bus = Bus::new(redis_client); 144 | bus.send_msg(&[( 145 | "event", 146 | Msg::UpdateJob(UpdateJobParams { 147 | exit_code: Some(1), 148 | ..Default::default() 149 | }), 150 | )]) 151 | .await 152 | .unwrap(); 153 | 154 | bus.send_msg(&[( 155 | "event", 156 | Msg::UpdateJob(UpdateJobParams { 157 | exit_code: Some(2), 158 | ..Default::default() 159 | }), 160 | )]) 161 | .await 162 | .unwrap(); 163 | 164 | bus.recv(|key, val| { 165 | Box::pin(async move { 166 | println!("key:{key} val:{}", serde_json::to_string(&val).unwrap()); 167 | Ok(()) 168 | }) 169 | }) 170 | .await 171 | .unwrap(); 172 | } 173 | -------------------------------------------------------------------------------- /automate/src/comet/logic.rs: -------------------------------------------------------------------------------- 1 | use std::net::IpAddr; 2 | 3 | use crate::{ 4 | bridge::msg::{ 5 | AgentOfflineParams, AgentOnlineParams, HeartbeatParams, MsgReqKind, UpdateJobParams, 6 | }, 7 | bus::Bus, 8 | get_endpoint, LinkPair, 9 | }; 10 | use anyhow::{Ok, Result}; 11 | use local_ip_address::local_ip; 12 | use redis::{AsyncCommands, FromRedisValue, RedisResult}; 13 | 14 | use serde_json::{json, Value}; 15 | 16 | use super::types::{self}; 17 | 18 | #[derive(Clone)] 19 | pub struct Logic { 20 | pub redis_client: redis::Client, 21 | local_ip: IpAddr, 22 | bus: Bus, 23 | } 24 | 25 | impl Logic { 26 | pub fn new(redis: redis::Client) -> Self { 27 | Self { 28 | local_ip: local_ip().expect("failed get local ip"), 29 | redis_client: redis.clone(), 30 | bus: Bus::new(redis), 31 | } 32 | } 33 | 34 | pub fn get_agent_key(&self, ip: impl Into, mac_addr: impl Into) -> String { 35 | get_endpoint(ip, mac_addr) 36 | } 37 | 38 | async fn set_link_pair>( 39 | &self, 40 | namespace: T, 41 | ip: T, 42 | mac_addr: T, 43 | port: u16, 44 | ) -> Result<()> { 45 | let mut conn = self.get_async_connection().await?; 46 | let key = self.get_agent_key(ip.into(), mac_addr.into()); 47 | let ret = conn 48 | .set_ex( 49 | key, 50 | types::LinkPair { 51 | comet_addr: format!("{}:{}", self.local_ip.to_string(), port), 52 | namespace: namespace.into(), 53 | }, 54 | 90, 55 | ) 56 | .await?; 57 | Ok(ret) 58 | } 59 | 60 | pub async fn get_link_pair>( 61 | &self, 62 | agent_ip: T, 63 | mac_addr: T, 64 | ) -> Result<(String, types::LinkPair)> { 65 | let (agent_ip, mac_addr) = (agent_ip.into(), mac_addr.into()); 66 | let mut conn = self.redis_client.get_multiplexed_async_connection().await?; 67 | let key = self.get_agent_key(agent_ip.clone(), mac_addr.clone()); 68 | let val = conn.get(key.clone()).await?; 69 | 70 | if val == redis::Value::Nil { 71 | anyhow::bail!("Agent {agent_ip}:{mac_addr} not registered, please deploy first"); 72 | } 73 | 74 | Ok((key.clone(), LinkPair::from_redis_value(&val)?)) 75 | } 76 | 77 | pub async fn get_async_connection(&self) -> RedisResult { 78 | self.redis_client.get_multiplexed_async_connection().await 79 | } 80 | 81 | pub async fn dispath(&self, req: types::DispatchJobRequest) -> Result<(String, MsgReqKind)> { 82 | let pair = self.get_link_pair(&req.agent_ip, &req.mac_addr).await?; 83 | Ok((pair.0, MsgReqKind::DispatchJobRequest(req.dispatch_params))) 84 | } 85 | 86 | pub async fn sfpt_read_dir( 87 | &self, 88 | req: types::SftpReadDirRequest, 89 | ) -> Result<(String, MsgReqKind)> { 90 | let key = self.get_agent_key(&req.agent_ip, &req.mac_addr); 91 | let msg = MsgReqKind::SftpReadDirRequest(req.params); 92 | Ok((key, msg)) 93 | } 94 | 95 | pub async fn sftp_upload(&self, req: types::SftpUploadRequest) -> Result<(String, MsgReqKind)> { 96 | let key = self.get_agent_key(&req.agent_ip, &req.mac_addr); 97 | let msg = MsgReqKind::SftpUploadRequest(req.params); 98 | Ok((key, msg)) 99 | } 100 | 101 | pub async fn sftp_download( 102 | &self, 103 | req: types::SftpDownloadRequest, 104 | ) -> Result<(String, MsgReqKind)> { 105 | let key = self.get_agent_key(&req.agent_ip, &req.mac_addr); 106 | let msg = MsgReqKind::SftpDownloadRequest(req.params); 107 | Ok((key, msg)) 108 | } 109 | 110 | pub async fn sftp_remove(&self, req: types::SftpRemoveRequest) -> Result<(String, MsgReqKind)> { 111 | let key = self.get_agent_key(&req.agent_ip, &req.mac_addr); 112 | let msg = MsgReqKind::SftpRemoveRequest(req.params); 113 | Ok((key, msg)) 114 | } 115 | 116 | pub async fn runtime_action( 117 | &self, 118 | req: types::RuntimeActionRequest, 119 | ) -> Result<(String, MsgReqKind)> { 120 | let pair = self.get_link_pair(&req.agent_ip, &req.mac_addr).await?; 121 | Ok((pair.0, MsgReqKind::RuntimeActionRequest(req.action_params))) 122 | } 123 | 124 | pub async fn update_job(&self, req: UpdateJobParams) -> Result { 125 | self.bus.update_job(req).await?; 126 | Ok(json!(null)) 127 | } 128 | 129 | pub async fn agent_online(&self, req: AgentOnlineParams) -> Result { 130 | self.bus.agent_online(req).await?; 131 | Ok(json!(null)) 132 | } 133 | 134 | pub async fn agent_offline(&self, req: AgentOfflineParams) -> Result { 135 | self.bus.agent_offline(req).await?; 136 | Ok(json!(null)) 137 | } 138 | 139 | pub async fn heartbeat(&self, v: HeartbeatParams, port: u16) -> Result { 140 | self.set_link_pair(&v.namespace, &v.source_ip, &v.mac_addr, port) 141 | .await?; 142 | self.bus.heartbeat(v).await?; 143 | Ok(json!({"data":"heartbeat success"})) 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /automate/src/comet/macros.rs: -------------------------------------------------------------------------------- 1 | /// Return api response 2 | /// 3 | /// ``` 4 | /// // return success 5 | /// return_response!() 6 | /// 7 | /// // return success with data 8 | /// return_response!("ok") 9 | /// 10 | /// ``` 11 | /// 12 | /// 13 | #[macro_export] 14 | macro_rules! return_response { 15 | () => { 16 | return poem::web::Json(serde_json::json!({ 17 | "code":20000, 18 | "data":null, 19 | "msg":"success", 20 | })) 21 | }; 22 | ($data:expr) => { 23 | return poem::web::Json(serde_json::json!({ 24 | "code":20000, 25 | "data":Some($data), 26 | "msg":"success", 27 | })) 28 | }; 29 | ($data:expr,$msg:expr) => { 30 | return poem::web::Json(serde_json::json!({ 31 | "code":20000, 32 | "data":Some($data), 33 | "msg":$msg, 34 | })) 35 | }; 36 | (code: $code:expr, $msg:expr) => { 37 | return poem::web::Json(serde_json::json!({ 38 | "code":$code, 39 | "data": null, 40 | "msg":$msg, 41 | })) 42 | }; 43 | (json: $val:expr) => { 44 | return poem::web::Json($val) 45 | }; 46 | } 47 | -------------------------------------------------------------------------------- /automate/src/comet/types.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | use crate::bridge::msg::{ 4 | DispatchJobParams, RuntimeActionParams, SftpDownloadParams, SftpReadDirParams, 5 | SftpRemoveParams, SftpUploadParams, 6 | }; 7 | use redis_macros::{FromRedisValue, ToRedisArgs}; 8 | use serde_repr::*; 9 | 10 | #[derive(Serialize, Deserialize, Debug)] 11 | pub struct DispatchJobRequest { 12 | pub agent_ip: String, 13 | pub mac_addr: String, 14 | pub dispatch_params: DispatchJobParams, 15 | } 16 | 17 | #[derive(Serialize, Deserialize, Debug)] 18 | pub struct RuntimeActionRequest { 19 | pub agent_ip: String, 20 | pub mac_addr: String, 21 | pub action_params: RuntimeActionParams, 22 | } 23 | 24 | #[derive(Serialize, Deserialize, Debug)] 25 | pub struct SftpReadDirRequest { 26 | pub agent_ip: String, 27 | pub mac_addr: String, 28 | pub namespace: String, 29 | pub params: SftpReadDirParams, 30 | } 31 | 32 | #[derive(Serialize, Deserialize, Debug)] 33 | pub struct SftpUploadRequest { 34 | pub agent_ip: String, 35 | pub mac_addr: String, 36 | pub namespace: String, 37 | pub params: SftpUploadParams, 38 | } 39 | 40 | #[derive(Serialize, Deserialize, Debug)] 41 | pub struct SftpRemoveRequest { 42 | pub agent_ip: String, 43 | pub mac_addr: String, 44 | pub namespace: String, 45 | pub params: SftpRemoveParams, 46 | } 47 | 48 | #[derive(Serialize, Deserialize, Debug)] 49 | pub struct SftpDownloadRequest { 50 | pub agent_ip: String, 51 | pub mac_addr: String, 52 | pub namespace: String, 53 | pub params: SftpDownloadParams, 54 | } 55 | 56 | #[derive(Serialize, Clone, FromRedisValue, Deserialize, ToRedisArgs)] 57 | pub struct LinkPair { 58 | pub namespace: String, 59 | pub comet_addr: String, 60 | } 61 | impl ToString for LinkPair { 62 | fn to_string(&self) -> String { 63 | serde_json::to_string(self).unwrap() 64 | } 65 | } 66 | 67 | #[derive(Debug, Deserialize_repr, Serialize_repr)] 68 | #[repr(u8)] 69 | pub enum MsgType { 70 | Resize = 1, 71 | Data = 2, 72 | Ping = 3, 73 | } 74 | 75 | #[derive(Debug, Serialize, Deserialize)] 76 | pub struct Msg { 77 | pub r#type: MsgType, 78 | #[serde(default)] 79 | pub msg: String, 80 | #[serde(default)] 81 | pub cols: u32, 82 | #[serde(default)] 83 | pub rows: u32, 84 | } 85 | 86 | #[derive(Deserialize)] 87 | pub struct WebSshQuery { 88 | pub namespace: String, 89 | pub cols: u32, 90 | pub rows: u32, 91 | } 92 | 93 | #[derive(Deserialize, Serialize)] 94 | pub struct SshLoginParams { 95 | pub cols: u32, 96 | pub rows: u32, 97 | pub namespace: String, 98 | pub user: String, 99 | pub password: String, 100 | pub port: u16, 101 | pub ip: String, 102 | pub mac_addr: String, 103 | } 104 | -------------------------------------------------------------------------------- /automate/src/lib.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use local_ip_address::local_ip; 3 | use std::net::IpAddr; 4 | use std::sync::{Mutex, OnceLock}; 5 | 6 | pub mod bridge; 7 | pub mod comet; 8 | pub mod scheduler; 9 | pub mod ssh; 10 | pub use bridge::msg::DispatchJobParams; 11 | pub use comet::logic::Logic; 12 | pub use comet::types::{ 13 | DispatchJobRequest, LinkPair, SftpDownloadRequest, SftpReadDirRequest, SftpRemoveRequest, 14 | SftpUploadRequest, 15 | }; 16 | use reqwest::Client; 17 | pub use scheduler::types::BaseJob; 18 | pub use scheduler::types::JobAction; 19 | 20 | pub mod bus; 21 | 22 | static LOCAL_IP: OnceLock = OnceLock::new(); 23 | static HTTP_CLIENT: OnceLock = OnceLock::new(); 24 | static COMET_ADDR: Mutex> = Mutex::new(None); 25 | 26 | pub fn get_local_ip() -> IpAddr { 27 | let ip = LOCAL_IP.get_or_init(|| local_ip().expect("failed get local ip")); 28 | ip.to_owned() 29 | } 30 | 31 | pub fn get_endpoint(ip: impl Into, mac_address: impl Into) -> String { 32 | let (ip, mac_address) = (ip.into(), mac_address.into()); 33 | format!("jiascheduler:ins:{ip}:{mac_address}") 34 | } 35 | 36 | pub fn get_http_client() -> Client { 37 | let c = HTTP_CLIENT.get_or_init(|| reqwest::Client::new()); 38 | c.clone() 39 | } 40 | 41 | pub fn set_comet_addr(addr: impl Into) { 42 | COMET_ADDR.lock().unwrap().replace(addr.into()); 43 | } 44 | 45 | pub fn get_comet_addr() -> Option { 46 | COMET_ADDR.lock().unwrap().clone() 47 | } 48 | 49 | pub fn get_mac_address() -> Result { 50 | match mac_address::get_mac_address()? { 51 | Some(ma) => Ok(ma.to_string()), 52 | None => anyhow::bail!("No MAC address found."), 53 | } 54 | } 55 | 56 | #[test] 57 | fn test_get_mac_address() { 58 | let ret = get_mac_address(); 59 | assert_eq!(ret.is_ok(), true); 60 | } 61 | 62 | /// convert DateTime to local time(String) 63 | #[macro_export] 64 | macro_rules! local_time { 65 | ($time:expr) => { 66 | $time 67 | .with_timezone(&chrono::Local) 68 | .naive_local() 69 | .to_string() 70 | }; 71 | } 72 | 73 | #[macro_export] 74 | macro_rules! run_id { 75 | () => { 76 | chrono::Local::now().format("%Y%m%d%H%M%S").to_string() 77 | }; 78 | } 79 | -------------------------------------------------------------------------------- /automate/src/scheduler.rs: -------------------------------------------------------------------------------- 1 | mod cmd; 2 | pub(self) mod executor; 3 | pub(self) mod file; 4 | pub mod scheduler; 5 | pub mod types; 6 | 7 | pub use scheduler::*; 8 | -------------------------------------------------------------------------------- /automate/src/scheduler/cmd.rs: -------------------------------------------------------------------------------- 1 | use std::{ffi::OsStr, process::Output, time::Duration}; 2 | 3 | use anyhow::{anyhow, Result}; 4 | use bytes::BufMut; 5 | 6 | use tokio::{ 7 | io::{AsyncBufReadExt, AsyncRead, AsyncWriteExt}, 8 | process::Command, 9 | sync::mpsc::{Receiver, UnboundedSender}, 10 | }; 11 | use tracing::{error, info}; 12 | 13 | async fn read_to_end( 14 | io: &mut Option, 15 | tx: UnboundedSender, 16 | ) -> std::io::Result> { 17 | let mut vec = Vec::new(); 18 | if let Some(io) = io.as_mut() { 19 | let mut reader = tokio::io::BufReader::new(io); 20 | loop { 21 | let mut line = String::new(); 22 | let n = reader.read_line(&mut line).await?; 23 | 24 | if n == 0 { 25 | break; 26 | } 27 | 28 | if let Err(e) = tx.send(line.clone()) { 29 | error!("failed send job lot - {e}"); 30 | } 31 | 32 | vec.put(line.as_bytes()); 33 | } 34 | } 35 | 36 | std::result::Result::Ok(vec) 37 | } 38 | 39 | pub struct Cmd<'a> { 40 | inner: Command, 41 | timeout: Option, 42 | read_code_from_stdin: (bool, &'a str), 43 | } 44 | 45 | impl<'a> Cmd<'a> { 46 | pub fn new>(program: T) -> Self { 47 | Self { 48 | inner: Command::new(program), 49 | read_code_from_stdin: (false, ""), 50 | timeout: None, 51 | } 52 | } 53 | 54 | pub fn get_ref(&mut self) -> &mut Command { 55 | &mut self.inner 56 | } 57 | 58 | pub fn work_dir(&mut self, dir: &str) -> &mut Self { 59 | self.inner.current_dir(dir); 60 | self 61 | } 62 | 63 | pub fn timeout(&mut self, timeout: u64) -> &mut Self { 64 | self.timeout = Some(Duration::from_secs(timeout)); 65 | self 66 | } 67 | 68 | #[cfg(unix)] 69 | pub fn work_user(&mut self, user: &str) -> Result<&mut Self> { 70 | let u = users::get_user_by_name(user).ok_or(anyhow!("invalid system user {user}"))?; 71 | self.inner.uid(u.uid()); 72 | Ok(self) 73 | } 74 | 75 | #[cfg(windows)] 76 | pub fn work_user(&mut self, _: &str) -> Result<&mut Self> { 77 | Ok(self) 78 | } 79 | 80 | pub fn read_code_from_stdin(mut self, code: &'a str) -> Self { 81 | self.read_code_from_stdin = (true, code); 82 | self 83 | } 84 | 85 | #[cfg(windows)] 86 | pub fn killpg(_pid: u32) -> Result<()> { 87 | Ok(()) 88 | } 89 | 90 | #[cfg(unix)] 91 | pub fn killpg(pid: u32) -> Result<()> { 92 | let pid = nix::unistd::Pid::from_raw(pid as i32); 93 | nix::sys::signal::killpg(pid, nix::sys::signal::SIGKILL)?; 94 | Ok(()) 95 | } 96 | 97 | pub async fn wait_with_output( 98 | &mut self, 99 | tx: UnboundedSender, 100 | mut kill_signal_rx: Receiver<()>, 101 | ) -> Result { 102 | // kill process group See https://github.com/rust-lang/rust/issues/115241 103 | #[cfg(unix)] 104 | let mut child = self.inner.process_group(0).spawn()?; 105 | #[cfg(windows)] 106 | let mut child = self.inner.spawn()?; 107 | 108 | if self.read_code_from_stdin.0 { 109 | if let Some(mut stdin_pipe) = child.stdin.take() { 110 | stdin_pipe 111 | .write_all(self.read_code_from_stdin.1.as_bytes()) 112 | .await?; 113 | } 114 | } 115 | 116 | let mut stdout_pipe = child.stdout.take(); 117 | let mut stderr_pipe = child.stderr.take(); 118 | 119 | let stdout_fut = read_to_end(&mut stdout_pipe, tx.clone()); 120 | let stderr_fut = read_to_end(&mut stderr_pipe, tx.clone()); 121 | 122 | let sleep = self.timeout.map_or( 123 | tokio::time::sleep(Duration::from_secs(86400 * 365 * 10)), 124 | |v| tokio::time::sleep(v), 125 | ); 126 | tokio::pin!(sleep); 127 | 128 | let pid = child.id().unwrap(); 129 | tokio::select! { 130 | _ = &mut sleep => { 131 | info!("timeout kill"); 132 | child.kill().await?; 133 | Self::killpg(pid)?; 134 | 135 | }, 136 | _ = kill_signal_rx.recv() => { 137 | info!("manual kill"); 138 | child.kill().await?; 139 | Self::killpg(pid)?; 140 | }, 141 | ret = child.wait() =>{ 142 | ret?; 143 | }, 144 | 145 | }; 146 | 147 | let (status, stdout, stderr) = 148 | futures_util::future::try_join3(child.wait(), stdout_fut, stderr_fut).await?; 149 | 150 | // Drop happens after `try_join` due to 151 | drop(stdout_pipe); 152 | drop(stderr_pipe); 153 | 154 | Ok(Output { 155 | status, 156 | stderr, 157 | stdout, 158 | }) 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /automate/src/scheduler/executor.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use file_rotate::{FileRotate, compression::Compression, suffix::AppendCount}; 3 | 4 | use std::io::Write; 5 | 6 | use std::path::PathBuf; 7 | use std::sync::Arc; 8 | use std::{ 9 | collections::HashMap, 10 | process::{Output, Stdio}, 11 | }; 12 | use tokio::sync::mpsc::Receiver; 13 | 14 | use tokio::sync::{Mutex, mpsc}; 15 | use tracing::error; 16 | 17 | use crate::scheduler::cmd::Cmd; 18 | 19 | use super::types::{BaseJob, BundleOutput}; 20 | 21 | #[derive(Default)] 22 | pub struct ExecutorBuilder { 23 | pub job: BaseJob, 24 | output_dir: String, 25 | disable_log: bool, 26 | pub env: HashMap, 27 | } 28 | 29 | #[allow(unused)] 30 | impl ExecutorBuilder { 31 | pub fn new() -> Self { 32 | Self { 33 | output_dir: String::from("./log"), 34 | ..Default::default() 35 | } 36 | } 37 | 38 | pub fn job(mut self, job: BaseJob) -> Self { 39 | self.job = job; 40 | self 41 | } 42 | 43 | pub fn output_dir(mut self, log_dir: impl Into) -> Self { 44 | self.output_dir = log_dir.into(); 45 | self 46 | } 47 | 48 | pub fn disable_write_log(mut self, disable: bool) -> Self { 49 | self.disable_log = disable; 50 | self 51 | } 52 | 53 | pub fn env(mut self, k: String, v: String) -> Self { 54 | self.env.insert(k, v); 55 | self 56 | } 57 | 58 | pub fn build(self) -> Executor { 59 | Executor { 60 | job: self.job, 61 | output_dir: self.output_dir, 62 | env: self.env, 63 | disable_log: self.disable_log, 64 | } 65 | } 66 | } 67 | 68 | pub struct Ctx { 69 | pub kill_signal_rx: Receiver<()>, 70 | } 71 | 72 | pub struct Executor { 73 | job: BaseJob, 74 | output_dir: String, 75 | disable_log: bool, 76 | env: HashMap, 77 | } 78 | 79 | impl Executor { 80 | pub fn builder() -> ExecutorBuilder { 81 | ExecutorBuilder::new() 82 | } 83 | 84 | pub fn get_log_file_path(&self) -> PathBuf { 85 | PathBuf::from(&self.output_dir).join(format!("{}.log", self.job.eid)) 86 | } 87 | 88 | pub async fn run(&self, mut ctx: Ctx) -> Result { 89 | if self.job.bundle_script.is_none() { 90 | let output = self 91 | .exec( 92 | ctx, 93 | self.job.cmd_name.clone(), 94 | self.job.args.clone(), 95 | self.job.code.clone(), 96 | ) 97 | .await?; 98 | 99 | return Ok(BundleOutput::Output(output)); 100 | } 101 | 102 | let kill_signal_tx: Arc>>> = Arc::new(Mutex::new(vec![])); 103 | let kill_signal_tx_clone = kill_signal_tx.clone(); 104 | let mut outputs = HashMap::new(); 105 | 106 | let handler = tokio::spawn(async move { 107 | match ctx.kill_signal_rx.recv().await { 108 | Some(v) => { 109 | for s in kill_signal_tx_clone.lock().await.to_vec() { 110 | if let Err(e) = s.send(v).await { 111 | error!("failed to send kill singal {e}"); 112 | } 113 | } 114 | } 115 | None => { 116 | error!("failed to recv kill signal"); 117 | } 118 | }; 119 | }); 120 | 121 | for v in self.job.bundle_script.clone().unwrap().clone().into_iter() { 122 | let (tx, kill_signal_rx) = mpsc::channel::<()>(1); 123 | kill_signal_tx.lock().await.push(tx); 124 | let output = self 125 | .exec( 126 | Ctx { kill_signal_rx }, 127 | v.cmd_name.clone(), 128 | v.args.clone(), 129 | v.code.clone(), 130 | ) 131 | .await?; 132 | outputs.insert(v.eid, output); 133 | } 134 | 135 | handler.abort(); 136 | return Ok(BundleOutput::Bundle(outputs)); 137 | } 138 | 139 | async fn exec( 140 | &self, 141 | ctx: Ctx, 142 | cmd_name: String, 143 | args: Vec, 144 | code: String, 145 | ) -> Result { 146 | let mut cmd = Cmd::new(cmd_name); 147 | let mut args = args; 148 | if self.job.read_code_from_stdin { 149 | cmd = cmd.read_code_from_stdin(&code); 150 | cmd.get_ref().stdin(Stdio::piped()); 151 | } else { 152 | args.push(code.clone()); 153 | } 154 | 155 | if let Some(ref work_dir) = self.job.work_dir { 156 | cmd.work_dir(work_dir); 157 | } 158 | 159 | if let Some(ref work_user) = self.job.work_user { 160 | cmd.work_user(work_user)?; 161 | } 162 | if self.job.timeout > 0 { 163 | cmd.timeout(self.job.timeout); 164 | } 165 | 166 | for (key, val) in self.env.iter() { 167 | cmd.get_ref().env(key, val); 168 | } 169 | 170 | cmd.get_ref().args(&args); 171 | 172 | let (tx, mut rx) = mpsc::unbounded_channel::(); 173 | 174 | let filepath = self.get_log_file_path(); 175 | let mut logfile = if self.disable_log { 176 | None 177 | } else { 178 | Some(FileRotate::new( 179 | filepath, 180 | AppendCount::new(2), 181 | file_rotate::ContentLimit::Bytes(1 << 20), 182 | Compression::None, 183 | None, 184 | )) 185 | }; 186 | 187 | tokio::spawn(async move { 188 | while let Some(line) = rx.recv().await { 189 | if let Some(f) = logfile.as_mut() { 190 | if let Err(e) = write!(f, "{}", line) { 191 | error!("cannot write to log file - {e}"); 192 | } 193 | } 194 | } 195 | }); 196 | 197 | cmd.get_ref().stdout(Stdio::piped()); 198 | cmd.get_ref().stderr(Stdio::piped()); 199 | 200 | let output = cmd.wait_with_output(tx, ctx.kill_signal_rx).await?; 201 | 202 | Ok(output) 203 | } 204 | } 205 | 206 | #[tokio::test] 207 | async fn test_command_exec() { 208 | use nanoid::nanoid; 209 | use std::time::Duration; 210 | use tokio::time::sleep; 211 | use tracing::info; 212 | unsafe { 213 | std::env::set_var("RUST_LOG", "debug"); 214 | } 215 | 216 | tracing_subscriber::fmt::init(); 217 | let c = Executor::builder() 218 | .job(BaseJob { 219 | bundle_script: None, 220 | eid: nanoid!(), 221 | cmd_name: "bash".to_string(), 222 | code: "ls -alh;sleep 20;echo hello".into(), 223 | args: vec!["-c".to_string()], 224 | upload_file: None, 225 | read_code_from_stdin: false, 226 | timeout: 2, 227 | work_dir: None, 228 | work_user: None, 229 | max_retry: None, 230 | max_parallel: None, 231 | }) 232 | .build(); 233 | 234 | let (kill_signal_tx, kill_signal_rx) = mpsc::channel::<()>(1); 235 | tokio::spawn(async move { 236 | sleep(Duration::from_secs(1)).await; 237 | info!("start manual kill"); 238 | kill_signal_tx.send(()).await.unwrap(); 239 | info!("end manual kill"); 240 | }); 241 | let output = c.run(Ctx { kill_signal_rx }).await.unwrap(); 242 | 243 | println!("stdout: {:?}", output.get_stdout()); 244 | println!("stderr: {:?}", output.get_stderr()); 245 | println!("exit_status: {:?}", output.get_exit_status()); 246 | println!("exit_code: {:?}", output.get_exit_code()) 247 | } 248 | -------------------------------------------------------------------------------- /automate/src/scheduler/file.rs: -------------------------------------------------------------------------------- 1 | // use crate::get_http_client; 2 | 3 | use super::types::UploadFile; 4 | use anyhow::Result; 5 | use tokio::{ 6 | fs::{create_dir_all, File}, 7 | io::AsyncWriteExt, 8 | }; 9 | 10 | const UPLOAD_DIR: &str = "/tmp/jiascheduler-agent"; 11 | 12 | pub async fn try_download_file(_host: String, file: Option) -> Result<()> { 13 | let file = match file { 14 | Some(v) => v, 15 | None => return Ok(()), 16 | }; 17 | 18 | let data = if let Some(data) = file.data { 19 | data 20 | } else { 21 | return Ok(()); 22 | }; 23 | 24 | // let client = get_http_client(); 25 | // let data = client 26 | // .get(format!("http://{}/file/get/{}", host, file.filename)) 27 | // .send() 28 | // .await? 29 | // .bytes() 30 | // .await?; 31 | 32 | create_dir_all(UPLOAD_DIR).await?; 33 | let target_file = format!("{}/{}", UPLOAD_DIR, file.filename); 34 | let mut tmp_file = File::create(target_file.clone()).await?; 35 | tmp_file.write_all(&data).await?; 36 | Ok(()) 37 | } 38 | -------------------------------------------------------------------------------- /crates/entity/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "entity" 3 | edition = "2024" 4 | publish = false 5 | 6 | [dependencies] 7 | sea-orm.workspace = true 8 | serde.workspace = true 9 | -------------------------------------------------------------------------------- /crates/entity/src/entity/agent_release_version.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.15 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "agent_release_version")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub name: String, 12 | pub info: String, 13 | pub url: String, 14 | #[sea_orm(unique)] 15 | pub release_version: String, 16 | pub release_scope: i8, 17 | pub release_ip: Option, 18 | pub created_user: String, 19 | pub updated_user: String, 20 | pub created_time: DateTimeLocal, 21 | pub updated_time: DateTimeLocal, 22 | } 23 | 24 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 25 | pub enum Relation {} 26 | 27 | impl ActiveModelBehavior for ActiveModel {} 28 | -------------------------------------------------------------------------------- /crates/entity/src/entity/casbin_rule.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.15 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "casbin_rule")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: i32, 11 | pub ptype: String, 12 | pub v0: String, 13 | pub v1: String, 14 | pub v2: String, 15 | pub v3: String, 16 | pub v4: String, 17 | pub v5: String, 18 | } 19 | 20 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 21 | pub enum Relation {} 22 | 23 | impl ActiveModelBehavior for ActiveModel {} 24 | -------------------------------------------------------------------------------- /crates/entity/src/entity/executor.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "executor")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | #[sea_orm(unique)] 12 | pub name: String, 13 | pub command: String, 14 | pub platform: String, 15 | pub info: String, 16 | pub read_code_from_stdin: i8, 17 | pub created_user: String, 18 | pub updated_user: String, 19 | pub created_time: DateTimeLocal, 20 | pub updated_time: DateTimeLocal, 21 | } 22 | 23 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 24 | pub enum Relation {} 25 | 26 | impl ActiveModelBehavior for ActiveModel {} 27 | -------------------------------------------------------------------------------- /crates/entity/src/entity/instance.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "instance")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub instance_id: String, 12 | pub ip: String, 13 | pub mac_addr: String, 14 | pub namespace: String, 15 | pub instance_group_id: u64, 16 | pub info: String, 17 | pub status: i8, 18 | pub sys_user: String, 19 | pub password: String, 20 | pub ssh_port: u16, 21 | pub created_time: DateTimeLocal, 22 | pub updated_time: DateTimeLocal, 23 | } 24 | 25 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 26 | pub enum Relation {} 27 | 28 | impl ActiveModelBehavior for ActiveModel {} 29 | -------------------------------------------------------------------------------- /crates/entity/src/entity/instance_group.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "instance_group")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | #[sea_orm(unique)] 12 | pub name: String, 13 | pub info: String, 14 | pub created_user: String, 15 | pub created_time: DateTimeLocal, 16 | pub updated_time: DateTimeLocal, 17 | } 18 | 19 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 20 | pub enum Relation {} 21 | 22 | impl ActiveModelBehavior for ActiveModel {} 23 | -------------------------------------------------------------------------------- /crates/entity/src/entity/instance_role.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "instance_role")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub role_id: u64, 12 | pub instance_id: String, 13 | pub instance_group_id: u64, 14 | pub created_time: DateTimeLocal, 15 | } 16 | 17 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 18 | pub enum Relation {} 19 | 20 | impl ActiveModelBehavior for ActiveModel {} 21 | -------------------------------------------------------------------------------- /crates/entity/src/entity/job.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "job")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub eid: String, 12 | pub team_id: u64, 13 | pub executor_id: u64, 14 | pub job_type: String, 15 | #[sea_orm(unique)] 16 | pub name: String, 17 | #[sea_orm(column_type = "Text")] 18 | pub code: String, 19 | pub info: String, 20 | pub bundle_script: Option, 21 | pub upload_file: String, 22 | pub work_dir: String, 23 | pub work_user: String, 24 | pub timeout: u64, 25 | pub max_retry: u8, 26 | pub max_parallel: u8, 27 | pub completed_callback: Option, 28 | pub is_public: i8, 29 | pub display_on_dashboard: bool, 30 | pub created_user: String, 31 | pub updated_user: String, 32 | pub args: Option, 33 | pub created_time: DateTimeLocal, 34 | pub updated_time: DateTimeLocal, 35 | #[serde(default)] 36 | pub is_deleted: bool, 37 | pub deleted_at: Option, 38 | #[serde(default)] 39 | pub deleted_by: String, 40 | } 41 | 42 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 43 | pub enum Relation {} 44 | 45 | impl ActiveModelBehavior for ActiveModel {} 46 | -------------------------------------------------------------------------------- /crates/entity/src/entity/job_bundle_script.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "job_bundle_script")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub eid: String, 12 | pub executor_id: u64, 13 | pub team_id: u64, 14 | #[sea_orm(unique)] 15 | pub name: String, 16 | #[sea_orm(column_type = "Text")] 17 | pub code: String, 18 | pub info: String, 19 | pub created_user: String, 20 | pub updated_user: String, 21 | pub args: Option, 22 | pub created_time: DateTimeLocal, 23 | pub updated_time: DateTimeLocal, 24 | #[serde(default)] 25 | pub is_deleted: bool, 26 | pub deleted_at: Option, 27 | #[serde(default)] 28 | pub deleted_by: String, 29 | } 30 | 31 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 32 | pub enum Relation {} 33 | 34 | impl ActiveModelBehavior for ActiveModel {} 35 | -------------------------------------------------------------------------------- /crates/entity/src/entity/job_exec_history.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "job_exec_history")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub instance_id: String, 12 | pub schedule_id: String, 13 | pub eid: String, 14 | pub job_type: String, 15 | pub bundle_script_result: Option, 16 | pub exit_status: String, 17 | pub exit_code: i32, 18 | #[sea_orm(column_type = "Text")] 19 | pub output: String, 20 | pub start_time: Option, 21 | pub end_time: Option, 22 | pub run_id: String, 23 | pub created_time: DateTimeLocal, 24 | pub updated_time: DateTimeLocal, 25 | pub created_user: String, 26 | } 27 | 28 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 29 | pub enum Relation {} 30 | 31 | impl ActiveModelBehavior for ActiveModel {} 32 | -------------------------------------------------------------------------------- /crates/entity/src/entity/job_organizer.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.15 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "job_organizer")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub eid: String, 12 | pub name: String, 13 | pub nodes: Option, 14 | pub edges: Option, 15 | pub info: String, 16 | pub is_public: i8, 17 | pub created_user: String, 18 | pub updated_user: String, 19 | pub created_time: DateTimeLocal, 20 | pub updated_time: DateTimeLocal, 21 | } 22 | 23 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 24 | pub enum Relation {} 25 | 26 | impl ActiveModelBehavior for ActiveModel {} 27 | -------------------------------------------------------------------------------- /crates/entity/src/entity/job_organizer_process.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.15 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "job_organizer_process")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub name: String, 12 | pub organizer_id: u64, 13 | pub organizer_version: String, 14 | pub process_id: String, 15 | pub status: String, 16 | pub current_node: String, 17 | pub created_user: String, 18 | pub updated_user: String, 19 | pub created_time: DateTimeLocal, 20 | pub updated_time: DateTimeLocal, 21 | } 22 | 23 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 24 | pub enum Relation {} 25 | 26 | impl ActiveModelBehavior for ActiveModel {} 27 | -------------------------------------------------------------------------------- /crates/entity/src/entity/job_organizer_release.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.15 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "job_organizer_release")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub organizer_id: u64, 12 | pub version: String, 13 | pub name: String, 14 | pub info: String, 15 | pub is_public: i8, 16 | pub nodes: Option, 17 | pub edges: Option, 18 | pub created_user: String, 19 | pub created_time: DateTimeLocal, 20 | pub updated_time: DateTimeLocal, 21 | } 22 | 23 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 24 | pub enum Relation {} 25 | 26 | impl ActiveModelBehavior for ActiveModel {} 27 | -------------------------------------------------------------------------------- /crates/entity/src/entity/job_organizer_release_edge.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.15 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "job_organizer_release_edge")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub version: String, 12 | pub edge_id: String, 13 | pub edge_type: String, 14 | pub props: Option, 15 | pub source_node_id: String, 16 | pub target_node_id: String, 17 | pub edge_val: String, 18 | pub created_time: DateTimeLocal, 19 | pub updated_time: DateTimeLocal, 20 | } 21 | 22 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 23 | pub enum Relation {} 24 | 25 | impl ActiveModelBehavior for ActiveModel {} 26 | -------------------------------------------------------------------------------- /crates/entity/src/entity/job_organizer_release_node.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.15 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "job_organizer_release_node")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub version: String, 12 | pub node_id: String, 13 | pub name: String, 14 | pub node_type: String, 15 | pub flow_type: String, 16 | pub task_type: String, 17 | pub dispatch_data: Option, 18 | pub props: Option, 19 | #[sea_orm(column_type = "Text")] 20 | pub condition: String, 21 | pub bind_ip: Option, 22 | pub created_time: DateTimeLocal, 23 | pub updated_time: DateTimeLocal, 24 | } 25 | 26 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 27 | pub enum Relation {} 28 | 29 | impl ActiveModelBehavior for ActiveModel {} 30 | -------------------------------------------------------------------------------- /crates/entity/src/entity/job_organizer_task.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.15 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "job_organizer_task")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub process_id: String, 12 | pub node_id: String, 13 | pub status: String, 14 | pub output: String, 15 | pub bind_total: i32, 16 | pub restart_num: i32, 17 | pub created_time: DateTimeLocal, 18 | pub updated_time: DateTimeLocal, 19 | } 20 | 21 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 22 | pub enum Relation {} 23 | 24 | impl ActiveModelBehavior for ActiveModel {} 25 | -------------------------------------------------------------------------------- /crates/entity/src/entity/job_organizer_task_result.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.15 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "job_organizer_task_result")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub process_id: String, 12 | pub node_id: String, 13 | pub bind_ip: String, 14 | pub exit_code: i8, 15 | pub exit_status: String, 16 | #[sea_orm(column_type = "Text")] 17 | pub output: String, 18 | pub status: String, 19 | pub restart_num: i32, 20 | pub dispatch_result: Option, 21 | pub created_time: DateTimeLocal, 22 | pub updated_time: DateTimeLocal, 23 | } 24 | 25 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 26 | pub enum Relation {} 27 | 28 | impl ActiveModelBehavior for ActiveModel {} 29 | -------------------------------------------------------------------------------- /crates/entity/src/entity/job_running_status.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "job_running_status")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub instance_id: String, 12 | pub schedule_type: String, 13 | pub job_type: String, 14 | pub eid: String, 15 | pub schedule_id: String, 16 | pub schedule_status: String, 17 | pub run_status: String, 18 | pub exit_status: String, 19 | pub exit_code: i32, 20 | pub dispatch_result: Option, 21 | pub start_time: Option, 22 | pub end_time: Option, 23 | pub next_time: Option, 24 | pub prev_time: Option, 25 | pub updated_user: String, 26 | pub updated_time: DateTimeLocal, 27 | #[serde(default)] 28 | pub is_deleted: bool, 29 | pub deleted_at: Option, 30 | #[serde(default)] 31 | pub deleted_by: String, 32 | } 33 | 34 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 35 | pub enum Relation {} 36 | 37 | impl ActiveModelBehavior for ActiveModel {} 38 | -------------------------------------------------------------------------------- /crates/entity/src/entity/job_schedule_history.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "job_schedule_history")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub schedule_id: String, 12 | pub name: String, 13 | pub job_type: String, 14 | pub eid: String, 15 | pub dispatch_result: Option, 16 | pub schedule_type: String, 17 | pub action: String, 18 | pub dispatch_data: Option, 19 | pub snapshot_data: Option, 20 | pub created_user: String, 21 | pub updated_user: String, 22 | pub created_time: DateTimeLocal, 23 | pub updated_time: DateTimeLocal, 24 | #[serde(default)] 25 | pub is_deleted: bool, 26 | pub deleted_at: Option, 27 | #[serde(default)] 28 | pub deleted_by: String, 29 | } 30 | 31 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 32 | pub enum Relation {} 33 | 34 | impl ActiveModelBehavior for ActiveModel {} 35 | -------------------------------------------------------------------------------- /crates/entity/src/entity/job_supervisor.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "job_supervisor")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | #[sea_orm(unique)] 12 | pub name: String, 13 | pub eid: String, 14 | pub restart_interval: u64, 15 | pub info: String, 16 | pub created_user: String, 17 | pub updated_user: String, 18 | pub created_time: DateTimeLocal, 19 | pub updated_time: DateTimeLocal, 20 | #[serde(default)] 21 | pub is_deleted: bool, 22 | pub deleted_at: Option, 23 | #[serde(default)] 24 | pub deleted_by: String, 25 | } 26 | 27 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 28 | pub enum Relation {} 29 | 30 | impl ActiveModelBehavior for ActiveModel {} 31 | -------------------------------------------------------------------------------- /crates/entity/src/entity/job_timer.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "job_timer")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | #[sea_orm(unique)] 12 | pub name: String, 13 | pub eid: String, 14 | pub timer_expr: Option, 15 | pub job_type: String, 16 | pub info: String, 17 | pub created_user: String, 18 | pub updated_user: String, 19 | pub created_time: DateTimeLocal, 20 | pub updated_time: DateTimeLocal, 21 | #[serde(default)] 22 | pub is_deleted: bool, 23 | pub deleted_at: Option, 24 | #[serde(default)] 25 | pub deleted_by: String, 26 | } 27 | 28 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 29 | pub enum Relation {} 30 | 31 | impl ActiveModelBehavior for ActiveModel {} 32 | -------------------------------------------------------------------------------- /crates/entity/src/entity/mod.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | pub mod prelude; 4 | 5 | pub mod agent_release_version; 6 | pub mod casbin_rule; 7 | pub mod executor; 8 | pub mod instance; 9 | pub mod instance_group; 10 | pub mod instance_role; 11 | pub mod job; 12 | pub mod job_bundle_script; 13 | pub mod job_exec_history; 14 | pub mod job_organizer; 15 | pub mod job_organizer_process; 16 | pub mod job_organizer_release; 17 | pub mod job_organizer_release_edge; 18 | pub mod job_organizer_release_node; 19 | pub mod job_organizer_task; 20 | pub mod job_organizer_task_result; 21 | pub mod job_running_status; 22 | pub mod job_schedule_history; 23 | pub mod job_supervisor; 24 | pub mod job_timer; 25 | pub mod role; 26 | pub mod tag; 27 | pub mod tag_resource; 28 | pub mod team; 29 | pub mod team_member; 30 | pub mod user; 31 | pub mod user_server; 32 | -------------------------------------------------------------------------------- /crates/entity/src/entity/prelude.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | pub use super::agent_release_version::Entity as AgentReleaseVersion; 4 | pub use super::casbin_rule::Entity as CasbinRule; 5 | pub use super::executor::Entity as Executor; 6 | pub use super::instance::Entity as Instance; 7 | pub use super::instance_group::Entity as InstanceGroup; 8 | pub use super::instance_role::Entity as InstanceRole; 9 | pub use super::job::Entity as Job; 10 | pub use super::job_bundle_script::Entity as JobBundleScript; 11 | pub use super::job_exec_history::Entity as JobExecHistory; 12 | pub use super::job_organizer::Entity as JobOrganizer; 13 | pub use super::job_organizer_process::Entity as JobOrganizerProcess; 14 | pub use super::job_organizer_release::Entity as JobOrganizerRelease; 15 | pub use super::job_organizer_release_edge::Entity as JobOrganizerReleaseEdge; 16 | pub use super::job_organizer_release_node::Entity as JobOrganizerReleaseNode; 17 | pub use super::job_organizer_task::Entity as JobOrganizerTask; 18 | pub use super::job_organizer_task_result::Entity as JobOrganizerTaskResult; 19 | pub use super::job_running_status::Entity as JobRunningStatus; 20 | pub use super::job_schedule_history::Entity as JobScheduleHistory; 21 | pub use super::job_supervisor::Entity as JobSupervisor; 22 | pub use super::job_timer::Entity as JobTimer; 23 | pub use super::role::Entity as Role; 24 | pub use super::tag::Entity as Tag; 25 | pub use super::tag_resource::Entity as TagResource; 26 | pub use super::team::Entity as Team; 27 | pub use super::team_member::Entity as TeamMember; 28 | pub use super::user::Entity as User; 29 | pub use super::user_server::Entity as UserServer; 30 | -------------------------------------------------------------------------------- /crates/entity/src/entity/role.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "role")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | #[sea_orm(unique)] 12 | pub name: String, 13 | pub info: String, 14 | pub is_admin: bool, 15 | pub created_user: String, 16 | pub created_time: DateTimeLocal, 17 | pub updated_time: DateTimeLocal, 18 | } 19 | 20 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 21 | pub enum Relation {} 22 | 23 | impl ActiveModelBehavior for ActiveModel {} 24 | -------------------------------------------------------------------------------- /crates/entity/src/entity/tag.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.15 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "tag")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub tag_name: String, 12 | pub created_user: String, 13 | pub created_time: DateTimeLocal, 14 | } 15 | 16 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 17 | pub enum Relation {} 18 | 19 | impl ActiveModelBehavior for ActiveModel {} 20 | -------------------------------------------------------------------------------- /crates/entity/src/entity/tag_resource.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.15 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "tag_resource")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub tag_id: u64, 12 | pub resource_type: String, 13 | pub resource_id: u64, 14 | pub created_user: String, 15 | pub created_time: DateTimeLocal, 16 | } 17 | 18 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 19 | pub enum Relation {} 20 | 21 | impl ActiveModelBehavior for ActiveModel {} 22 | -------------------------------------------------------------------------------- /crates/entity/src/entity/team.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "team")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | #[sea_orm(unique)] 12 | pub name: String, 13 | pub info: String, 14 | pub created_user: String, 15 | pub updated_user: String, 16 | pub created_time: DateTimeLocal, 17 | pub updated_time: DateTimeLocal, 18 | } 19 | 20 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 21 | pub enum Relation {} 22 | 23 | impl ActiveModelBehavior for ActiveModel {} 24 | -------------------------------------------------------------------------------- /crates/entity/src/entity/team_member.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "team_member")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub team_id: u64, 12 | pub user_id: String, 13 | pub is_admin: bool, 14 | pub created_user: String, 15 | pub created_time: DateTimeLocal, 16 | } 17 | 18 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 19 | pub enum Relation {} 20 | 21 | impl ActiveModelBehavior for ActiveModel {} 22 | -------------------------------------------------------------------------------- /crates/entity/src/entity/user.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "user")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | #[sea_orm(unique)] 12 | pub user_id: String, 13 | #[sea_orm(unique)] 14 | pub username: String, 15 | pub nickname: String, 16 | pub is_root: bool, 17 | pub role_id: u64, 18 | pub salt: String, 19 | pub password: String, 20 | pub avatar: String, 21 | pub email: String, 22 | pub phone: String, 23 | pub gender: String, 24 | pub introduction: String, 25 | pub created_time: DateTimeLocal, 26 | pub updated_time: DateTimeLocal, 27 | } 28 | 29 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 30 | pub enum Relation {} 31 | 32 | impl ActiveModelBehavior for ActiveModel {} 33 | -------------------------------------------------------------------------------- /crates/entity/src/entity/user_role.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.15 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "user_role")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | #[sea_orm(unique)] 12 | pub name: String, 13 | pub info: String, 14 | pub created_time: DateTimeLocal, 15 | pub updated_time: DateTimeLocal, 16 | } 17 | 18 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 19 | pub enum Relation {} 20 | 21 | impl ActiveModelBehavior for ActiveModel {} 22 | -------------------------------------------------------------------------------- /crates/entity/src/entity/user_server.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize, Default)] 7 | #[sea_orm(table_name = "user_server")] 8 | pub struct Model { 9 | #[sea_orm(primary_key)] 10 | pub id: u64, 11 | pub user_id: String, 12 | pub instance_id: String, 13 | pub instance_group_id: i64, 14 | pub created_time: DateTimeLocal, 15 | } 16 | 17 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 18 | pub enum Relation {} 19 | 20 | impl ActiveModelBehavior for ActiveModel {} 21 | -------------------------------------------------------------------------------- /crates/entity/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod entity; 2 | pub use entity::*; 3 | -------------------------------------------------------------------------------- /crates/leader-election/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "leader-election" 3 | edition = "2024" 4 | publish = false 5 | 6 | [dependencies] 7 | redis.workspace = true 8 | nanoid.workspace = true 9 | tokio.workspace = true 10 | anyhow.workspace = true 11 | -------------------------------------------------------------------------------- /crates/leader-election/src/lib.rs: -------------------------------------------------------------------------------- 1 | use nanoid::nanoid; 2 | use redis::{AsyncCommands, Client, RedisResult}; 3 | use tokio::time::sleep; 4 | 5 | use std::{pin::Pin, time::Duration}; 6 | 7 | pub struct LeaderElection { 8 | redis_client: Client, 9 | key: String, 10 | id: String, 11 | ttl: i64, 12 | check_interval: Duration, 13 | } 14 | 15 | impl LeaderElection { 16 | pub fn new(client: Client, key: &str, ttl: i64) -> RedisResult { 17 | Ok(Self { 18 | redis_client: client, 19 | key: key.to_string(), 20 | id: format!("{}", nanoid!()), 21 | ttl, 22 | check_interval: Duration::from_secs((ttl / 2) as u64), 23 | }) 24 | } 25 | 26 | async fn acquire_leadership(&mut self) -> RedisResult { 27 | let mut conn = self.redis_client.get_multiplexed_async_connection().await?; 28 | 29 | let acquired: bool = conn.set_nx(&self.key, &self.id).await?; 30 | if acquired { 31 | conn.expire::<_, ()>(&self.key, self.ttl).await?; 32 | return Ok(true); 33 | } 34 | 35 | let current_id: Option = conn.get(&self.key).await?; 36 | if current_id.as_ref() == Some(&self.id) { 37 | conn.expire::<_, ()>(&self.key, self.ttl).await?; 38 | return Ok(true); 39 | } 40 | 41 | Ok(false) 42 | } 43 | 44 | pub async fn run_election(&mut self, mut leader_callback: F) -> RedisResult<()> 45 | where 46 | F: Sync + Send + FnMut(bool) -> Pin + Send>>, 47 | { 48 | let mut is_leader = false; 49 | 50 | loop { 51 | match self.acquire_leadership().await { 52 | Ok(acquired) => { 53 | if acquired != is_leader { 54 | is_leader = acquired; 55 | leader_callback(is_leader).await; 56 | } 57 | if is_leader { 58 | sleep(self.check_interval).await; 59 | } else { 60 | sleep(Duration::from_secs(1)).await; 61 | } 62 | } 63 | Err(e) => { 64 | eprintln!("Leader election error: {:?}", e); 65 | sleep(Duration::from_secs(5)).await; 66 | } 67 | } 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /crates/service/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "service" 3 | edition = "2024" 4 | publish = false 5 | 6 | [dependencies] 7 | sea-orm.workspace = true 8 | serde.workspace = true 9 | entity.workspace = true 10 | sea-query.workspace = true 11 | automate.workspace = true 12 | chrono.workspace = true 13 | anyhow.workspace = true 14 | tracing.workspace = true 15 | futures.workspace = true 16 | sql-builder.workspace = true 17 | poem.workspace = true 18 | russh.workspace = true 19 | russh-keys.workspace = true 20 | async-trait.workspace = true 21 | russh-sftp.workspace = true 22 | nanoid.workspace = true 23 | rust-crypto.workspace = true 24 | casbin = "*" 25 | simple_crypt.workspace = true 26 | redis.workspace = true 27 | tokio.workspace = true 28 | serde_json.workspace = true 29 | rustc-serialize.workspace = true 30 | reqwest.workspace = true 31 | toml.workspace = true 32 | shellexpand.workspace = true 33 | config.workspace = true 34 | utils.workspace = true 35 | evalexpr.workspace = true 36 | serde_repr.workspace = true 37 | -------------------------------------------------------------------------------- /crates/service/src/config.rs: -------------------------------------------------------------------------------- 1 | use std::{fs, path::Path}; 2 | 3 | use anyhow::Result; 4 | use config::{Config, File}; 5 | use serde::{Deserialize, Serialize}; 6 | 7 | #[derive(Debug, Deserialize, Serialize, Clone, Default)] 8 | pub struct Encrypt { 9 | pub private_key: String, 10 | } 11 | 12 | #[derive(Debug, Deserialize, Serialize, Clone, Default)] 13 | pub struct Admin { 14 | pub username: String, 15 | pub password: String, 16 | } 17 | 18 | #[derive(Debug, Deserialize, Serialize, Clone, Default)] 19 | pub struct Conf { 20 | /// if enable debug mode 21 | pub debug: bool, 22 | pub bind_addr: String, 23 | // api url debug 24 | pub api_url: String, 25 | pub redis_url: String, 26 | pub encrypt: Encrypt, 27 | pub comet_secret: String, 28 | pub database_url: String, 29 | pub admin: Admin, 30 | #[serde(skip)] 31 | config_file: String, 32 | } 33 | 34 | impl Conf { 35 | pub fn get_config_file(&self) -> String { 36 | self.config_file.to_owned() 37 | } 38 | } 39 | 40 | impl Conf { 41 | pub fn parse(filename: &str) -> Result { 42 | let v = Config::builder() 43 | .add_source(File::with_name("config/default").required(false)) 44 | .add_source(File::with_name(filename).required(false)) 45 | .build_cloned()? 46 | .try_deserialize()?; 47 | Ok(v) 48 | } 49 | 50 | pub fn sync2file(&self, filepath: Option) -> Result<()> { 51 | let toml = toml::to_string_pretty(self)?; 52 | let filepath = if let Some(v) = filepath { 53 | v 54 | } else { 55 | "jiascheduler-console.toml".to_string() 56 | }; 57 | let real_path = shellexpand::full(&filepath)?.to_string(); 58 | 59 | if let Some(p) = Path::new(&real_path).parent() { 60 | if !p.exists() { 61 | fs::create_dir_all(p)?; 62 | } 63 | } 64 | let ret = fs::write(&real_path, toml)?; 65 | Ok(ret) 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /crates/service/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod logic; 2 | pub mod state; 3 | use chrono::Local; 4 | pub use entity; 5 | use nanoid::nanoid; 6 | pub mod config; 7 | 8 | pub struct IdGenerator; 9 | 10 | impl IdGenerator { 11 | const JOB_PREFIX: &'static str = "j"; 12 | const JOB_BUNDLE_SCRIPT_PREFIX: &'static str = "b"; 13 | const TIMER_JOB_PREFIX: &'static str = "t"; 14 | const FLOW_JOB_PREFIX: &'static str = "f"; 15 | const SCHEDULE_ID_PREFIX: &'static str = "s"; 16 | const INSTANCE_PREFIX: &'static str = "i"; 17 | 18 | pub fn get_job_eid() -> String { 19 | Self::get_id(Self::JOB_PREFIX) 20 | } 21 | 22 | pub fn get_job_bundle_script_uid() -> String { 23 | Self::get_id(Self::JOB_BUNDLE_SCRIPT_PREFIX) 24 | } 25 | 26 | pub fn get_timer_uid() -> String { 27 | Self::get_id(Self::TIMER_JOB_PREFIX) 28 | } 29 | 30 | pub fn get_flow_job_uid() -> String { 31 | Self::get_id(Self::FLOW_JOB_PREFIX) 32 | } 33 | pub fn get_schedule_uid() -> String { 34 | Self::get_id(Self::SCHEDULE_ID_PREFIX) 35 | } 36 | 37 | pub fn get_instance_uid() -> String { 38 | Self::get_id(Self::INSTANCE_PREFIX) 39 | } 40 | 41 | fn get_id(prefix: &str) -> String { 42 | format!("{prefix}-{}", nanoid!(10)).into() 43 | } 44 | 45 | pub fn get_run_id() -> String { 46 | Local::now().format("%Y%m%d%H%M%S").to_string() 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /crates/service/src/logic/executor.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | entity::{self, executor, prelude::*}, 3 | state::AppContext, 4 | }; 5 | use anyhow::Result; 6 | use sea_orm::{ 7 | ActiveModelTrait, ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder, QueryTrait, 8 | }; 9 | 10 | pub struct ExecutorList(Vec); 11 | 12 | impl ExecutorList { 13 | pub fn get_by_id(&self, executor_id: u64) -> Option { 14 | let v = self 15 | .0 16 | .iter() 17 | .find(|&v| v.id == executor_id) 18 | .map(|v| v.to_owned()); 19 | v 20 | } 21 | } 22 | 23 | #[derive(Clone)] 24 | pub struct ExecutorLogic<'a> { 25 | ctx: &'a AppContext, 26 | } 27 | impl<'a> ExecutorLogic<'a> { 28 | pub fn new(ctx: &'a AppContext) -> Self { 29 | Self { ctx } 30 | } 31 | 32 | pub async fn get_by_id(&self, id: u32) -> Result> { 33 | let one = Executor::find_by_id(id).one(&self.ctx.db).await?; 34 | Ok(one) 35 | } 36 | 37 | pub async fn query_executor( 38 | &self, 39 | default_id: Option, 40 | name: Option, 41 | page: u64, 42 | page_size: u64, 43 | ) -> Result<(Vec, u64)> { 44 | let model = Executor::find().apply_if(name, |query, v| { 45 | query.filter(entity::executor::Column::Name.contains(v)) 46 | }); 47 | 48 | let total = model.clone().count(&self.ctx.db).await?; 49 | 50 | let list = model 51 | .apply_if(default_id, |query, v| { 52 | query.order_by_desc(executor::Column::Id.eq(v)) 53 | }) 54 | .order_by_asc(entity::executor::Column::Id) 55 | .paginate(&self.ctx.db, page_size) 56 | .fetch_page(page) 57 | .await?; 58 | Ok((list, total)) 59 | } 60 | 61 | pub async fn get_all_by_executor_id(&self, executor_id: Vec) -> Result { 62 | let model = Executor::find().filter(entity::executor::Column::Id.is_in(executor_id)); 63 | 64 | let list = model 65 | .order_by_asc(entity::executor::Column::Id) 66 | .all(&self.ctx.db) 67 | .await?; 68 | Ok(ExecutorList(list)) 69 | } 70 | 71 | pub async fn save_executor( 72 | &self, 73 | model: entity::executor::ActiveModel, 74 | ) -> Result { 75 | let model = model.save(&self.ctx.db).await?; 76 | Ok(model) 77 | } 78 | 79 | pub async fn delete_job(&self, id: u32) -> Result { 80 | let ret = Executor::delete_by_id(id).exec(&self.ctx.db).await?; 81 | Ok(ret.rows_affected) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /crates/service/src/logic/job/bundle_script.rs: -------------------------------------------------------------------------------- 1 | use crate::entity::executor; 2 | use crate::entity::job; 3 | use crate::entity::job_bundle_script; 4 | use crate::entity::prelude::*; 5 | use crate::entity::team; 6 | use crate::logic::types::UserInfo; 7 | use anyhow::anyhow; 8 | use anyhow::Result; 9 | use chrono::Local; 10 | use sea_orm::ActiveValue::Set; 11 | use sea_orm::Condition; 12 | use sea_orm::JoinType; 13 | use sea_orm::QuerySelect; 14 | use sea_orm::QueryTrait; 15 | use sea_orm::{ 16 | ActiveModelTrait, ColumnTrait, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder, 17 | }; 18 | use sea_query::Expr; 19 | 20 | use super::types; 21 | use super::JobLogic; 22 | 23 | impl<'a> JobLogic<'a> { 24 | pub async fn save_job_bundle_script( 25 | &self, 26 | active_model: job_bundle_script::ActiveModel, 27 | ) -> Result { 28 | let active_model = active_model.save(&self.ctx.db).await?; 29 | Ok(active_model) 30 | } 31 | 32 | pub async fn query_bundle_script( 33 | &self, 34 | username: Option, 35 | team_id: Option, 36 | default_eid: Option, 37 | name: Option, 38 | updated_time_range: Option<(String, String)>, 39 | page: u64, 40 | page_size: u64, 41 | ) -> Result<(Vec, u64)> { 42 | let model = JobBundleScript::find() 43 | .column_as(executor::Column::Name, "executor_name") 44 | .column_as(team::Column::Name, "team_name") 45 | .join_rev( 46 | JoinType::LeftJoin, 47 | Executor::belongs_to(JobBundleScript) 48 | .from(executor::Column::Id) 49 | .to(job_bundle_script::Column::ExecutorId) 50 | .into(), 51 | ) 52 | .join_rev( 53 | JoinType::LeftJoin, 54 | Team::belongs_to(JobBundleScript) 55 | .from(team::Column::Id) 56 | .to(job_bundle_script::Column::TeamId) 57 | .into(), 58 | ) 59 | .filter(job_bundle_script::Column::IsDeleted.eq(false)) 60 | .apply_if(username, |q, v| { 61 | q.filter(job_bundle_script::Column::CreatedUser.eq(v)) 62 | }) 63 | .apply_if(name, |query, v| { 64 | query.filter(job_bundle_script::Column::Name.contains(v)) 65 | }) 66 | .apply_if(updated_time_range, |query, v| { 67 | query.filter( 68 | job_bundle_script::Column::UpdatedTime 69 | .gt(v.0) 70 | .and(job_bundle_script::Column::UpdatedTime.lt(v.1)), 71 | ) 72 | }) 73 | .apply_if(team_id, |q, v| { 74 | q.filter(job_bundle_script::Column::TeamId.eq(v)) 75 | }); 76 | 77 | let total = model.clone().count(&self.ctx.db).await?; 78 | let list = model 79 | .apply_if(default_eid, |query, v| { 80 | query.order_by_desc(Expr::expr(job_bundle_script::Column::Eid.eq(v))) 81 | }) 82 | .order_by_desc(job_bundle_script::Column::Id) 83 | .into_model() 84 | .paginate(&self.ctx.db, page_size) 85 | .fetch_page(page) 86 | .await?; 87 | Ok((list, total)) 88 | } 89 | 90 | pub async fn delete_bundle_script(&self, user_info: &UserInfo, eid: String) -> Result { 91 | let cond = Condition::all().add(Expr::cust_with_values( 92 | "JSON_CONTAINS(bundle_script, ?)", 93 | vec![serde_json::json!({ "eid": eid.clone() })], 94 | )); 95 | 96 | let has = Job::find() 97 | .filter(cond) 98 | .filter(job::Column::JobType.eq("bundle")) 99 | .one(&self.ctx.db) 100 | .await?; 101 | if has.is_some() { 102 | anyhow::bail!("this bundle script is used by job"); 103 | } 104 | 105 | let ret = JobBundleScript::update_many() 106 | .set(job_bundle_script::ActiveModel { 107 | is_deleted: Set(true), 108 | deleted_at: Set(Some(Local::now())), 109 | deleted_by: Set(user_info.username.clone()), 110 | ..Default::default() 111 | }) 112 | .filter(job_bundle_script::Column::Eid.eq(eid)) 113 | .exec(&self.ctx.db) 114 | .await?; 115 | Ok(ret.rows_affected) 116 | } 117 | 118 | pub async fn get_bundle_script_by_eid( 119 | &self, 120 | eid: &str, 121 | ) -> Result> { 122 | let model = JobBundleScript::find() 123 | .filter(job_bundle_script::Column::Eid.eq(eid)) 124 | .one(&self.ctx.db) 125 | .await?; 126 | Ok(model) 127 | } 128 | 129 | pub async fn get_default_validate_team_id_by_bundle_script( 130 | &self, 131 | user_info: &UserInfo, 132 | eid: Option<&str>, 133 | default_team_id: Option, 134 | ) -> Result> { 135 | let Some(eid) = eid else { 136 | return Ok(default_team_id); 137 | }; 138 | 139 | let record = self 140 | .get_bundle_script_by_eid(eid) 141 | .await? 142 | .ok_or(anyhow!("not found the bundle script by {eid}"))?; 143 | let team_id = if record.team_id == 0 { 144 | return Ok(default_team_id); 145 | } else { 146 | Some(record.team_id) 147 | }; 148 | let ok = self 149 | .can_write_bundle_script(user_info, team_id, None) 150 | .await?; 151 | 152 | if ok { 153 | Ok(team_id) 154 | } else { 155 | Ok(None) 156 | } 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /crates/service/src/logic/job/timer.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use chrono::Local; 3 | use sea_orm::{ 4 | ActiveModelTrait, ActiveValue::Set, ColumnTrait, Condition, EntityTrait, JoinType, 5 | PaginatorTrait, QueryFilter, QueryOrder, QuerySelect, QueryTrait, 6 | }; 7 | use sea_query::Query; 8 | 9 | use super::{types::JobTimerRelatedJobModel, JobLogic}; 10 | use crate::{ 11 | entity::{executor, job, job_timer, prelude::*, tag_resource, team}, 12 | logic::types::{ResourceType, UserInfo}, 13 | }; 14 | 15 | impl<'a> JobLogic<'a> { 16 | pub async fn save_job_timer( 17 | &self, 18 | active_model: job_timer::ActiveModel, 19 | ) -> Result { 20 | Ok(active_model.save(&self.ctx.db).await?) 21 | } 22 | 23 | pub async fn query_job_timer( 24 | &self, 25 | team_id: Option, 26 | created_user: Option<&String>, 27 | name: Option, 28 | job_type: Option, 29 | updated_time_range: Option<(String, String)>, 30 | tag_ids: Option>, 31 | page: u64, 32 | page_size: u64, 33 | ) -> Result<(Vec, u64)> { 34 | let mut select = job_timer::Entity::find() 35 | .column_as(job::Column::Id, "job_id") 36 | .column_as(job::Column::Name, "job_name") 37 | .column(job::Column::ExecutorId) 38 | .column_as(executor::Column::Name, "executor_name") 39 | .column_as(executor::Column::Platform, "executor_platform") 40 | .column_as(team::Column::Id, "team_id") 41 | .column_as(team::Column::Name, "team_name") 42 | .join_rev( 43 | JoinType::LeftJoin, 44 | Job::belongs_to(JobTimer) 45 | .from(job::Column::Eid) 46 | .to(job_timer::Column::Eid) 47 | .into(), 48 | ) 49 | .join_rev( 50 | JoinType::LeftJoin, 51 | Executor::belongs_to(Job) 52 | .from(executor::Column::Id) 53 | .to(job::Column::ExecutorId) 54 | .into(), 55 | ) 56 | .join_rev( 57 | JoinType::LeftJoin, 58 | Team::belongs_to(Job) 59 | .from(team::Column::Id) 60 | .to(job::Column::TeamId) 61 | .into(), 62 | ) 63 | .filter(job_timer::Column::IsDeleted.eq(false)) 64 | .apply_if(name, |query, v| { 65 | query.filter(job_timer::Column::Name.contains(v)) 66 | }) 67 | .apply_if(created_user, |query, v| { 68 | query.filter(job_timer::Column::CreatedUser.eq(v)) 69 | }) 70 | .apply_if(job_type, |query, v| { 71 | query.filter(job_timer::Column::JobType.eq(v)) 72 | }) 73 | .apply_if(updated_time_range, |query, v| { 74 | query.filter( 75 | job_timer::Column::UpdatedTime 76 | .gt(v.0) 77 | .and(job_timer::Column::UpdatedTime.lt(v.1)), 78 | ) 79 | }) 80 | .apply_if(team_id, |q, v| q.filter(job::Column::TeamId.eq(v))); 81 | 82 | match tag_ids { 83 | Some(v) if v.len() > 0 => { 84 | select = select.filter( 85 | Condition::any().add( 86 | job::Column::Id.in_subquery( 87 | Query::select() 88 | .column(tag_resource::Column::ResourceId) 89 | .and_where( 90 | tag_resource::Column::ResourceType 91 | .eq(ResourceType::Job.to_string()) 92 | .and(tag_resource::Column::TagId.is_in(v)), 93 | ) 94 | .from(TagResource) 95 | .to_owned(), 96 | ), 97 | ), 98 | ); 99 | } 100 | _ => {} 101 | }; 102 | 103 | let total = select.clone().count(&self.ctx.db).await?; 104 | 105 | let list = select 106 | .order_by_desc(job_timer::Column::Id) 107 | .into_model() 108 | .paginate(&self.ctx.db, page_size) 109 | .fetch_page(page) 110 | .await?; 111 | 112 | Ok((list, total)) 113 | } 114 | 115 | pub async fn delete_job_timer(&self, user_info: &UserInfo, id: u64) -> Result { 116 | let ret = JobTimer::update_many() 117 | .set(job_timer::ActiveModel { 118 | is_deleted: Set(true), 119 | deleted_at: Set(Some(Local::now())), 120 | deleted_by: Set(user_info.username.clone()), 121 | ..Default::default() 122 | }) 123 | .filter(job_timer::Column::Id.eq(id)) 124 | .exec(&self.ctx.db) 125 | .await?; 126 | 127 | Ok(ret.rows_affected) 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /crates/service/src/logic/migration.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use sea_orm::{ConnectionTrait, Statement}; 3 | 4 | use crate::state::AppContext; 5 | 6 | use super::types; 7 | 8 | mod v100; 9 | 10 | #[derive(Clone)] 11 | pub struct MigrationLogic<'a> { 12 | ctx: &'a AppContext, 13 | } 14 | impl<'a> MigrationLogic<'a> { 15 | pub fn new(ctx: &'a AppContext) -> Self { 16 | Self { ctx } 17 | } 18 | 19 | pub fn query_version( 20 | &self, 21 | name: Option, 22 | page: u64, 23 | page_size: u64, 24 | ) -> Result<(Vec, u64)> { 25 | let list: Vec = vec![types::VersionRecord { 26 | name: "v1.0.0".to_string(), 27 | info: "first verison".to_string(), 28 | }] 29 | .into_iter() 30 | .filter(|v| match &name { 31 | Some(n) => v.name.contains(n), 32 | None => true, 33 | }) 34 | .collect(); 35 | 36 | let total = list.len(); 37 | 38 | let m = 39 | (((page - 1) * page_size) as usize)..(((page - 1) * page_size + page_size) as usize); 40 | 41 | let retain: Vec = list 42 | .into_iter() 43 | .enumerate() 44 | .filter(|v| m.contains(&v.0)) 45 | .map(|v| v.1) 46 | .collect(); 47 | 48 | return Ok((retain, total as u64)); 49 | } 50 | 51 | fn version(&self, ver: &str) -> Result<&str> { 52 | match ver { 53 | "v1.0.0" => Ok(v100::SQL), 54 | _ => anyhow::bail!("invalid version {ver}"), 55 | } 56 | } 57 | 58 | pub async fn migrate(&self, ver: &str) -> Result { 59 | let sql = self.version(ver)?; 60 | let ret = self.ctx.db.execute_unprepared(sql).await?; 61 | Ok(ret.rows_affected()) 62 | } 63 | 64 | pub async fn get_database(&self, db: &str) -> Result> { 65 | let backend = self.ctx.db.get_database_backend(); 66 | let ret = self 67 | .ctx 68 | .db 69 | .query_one(Statement::from_string( 70 | backend, 71 | format!("show create database {db}"), 72 | )) 73 | .await? 74 | .map_or(Ok(None::<(String, String)>), |ret| { 75 | let v1 = ret.try_get_by_index::(0); 76 | v1.and_then(|v1| ret.try_get_by_index::(1).map(|v2| Some((v1, v2)))) 77 | })?; 78 | Ok(ret) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /crates/service/src/logic/mod.rs: -------------------------------------------------------------------------------- 1 | use sea_orm::ActiveValue::{self, NotSet, Set}; 2 | 3 | pub mod executor; 4 | pub mod instance; 5 | pub mod job; 6 | pub mod migration; 7 | pub mod role; 8 | pub mod ssh; 9 | pub mod tag; 10 | pub mod team; 11 | pub mod types; 12 | pub mod user; 13 | 14 | pub fn omit_empty_active_value(val: T) -> ActiveValue 15 | where 16 | T: Default + Into, 17 | T: PartialEq, 18 | { 19 | if val != Default::default() { 20 | Set(val) 21 | } else { 22 | NotSet 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /crates/service/src/logic/tag.rs: -------------------------------------------------------------------------------- 1 | use super::types::{self, ResourceType}; 2 | use crate::{ 3 | entity::{instance, job, prelude::*, tag, tag_resource}, 4 | state::AppContext, 5 | }; 6 | use anyhow::{anyhow, Result}; 7 | use sea_orm::{ 8 | ActiveModelTrait, ColumnTrait, Condition, EntityTrait, JoinType, QueryFilter, QuerySelect, 9 | QueryTrait, Set, 10 | }; 11 | use sea_query::Query; 12 | 13 | #[derive(Clone)] 14 | pub struct TagLogic<'a> { 15 | ctx: &'a AppContext, 16 | } 17 | impl<'a> TagLogic<'a> { 18 | pub fn new(ctx: &'a AppContext) -> Self { 19 | Self { ctx } 20 | } 21 | 22 | pub async fn bind_tag( 23 | &self, 24 | user_info: &types::UserInfo, 25 | tag_name: &str, 26 | resource_type: ResourceType, 27 | resource_id: u64, 28 | ) -> Result { 29 | match resource_type { 30 | ResourceType::Job => { 31 | let record = Job::find() 32 | .filter(job::Column::Id.eq(resource_id)) 33 | .one(&self.ctx.db) 34 | .await?; 35 | if record.is_none() { 36 | anyhow::bail!("cannot found job by id {}", resource_id); 37 | } 38 | } 39 | ResourceType::Instance => { 40 | let record = Instance::find() 41 | .filter(instance::Column::Id.eq(resource_id)) 42 | .one(&self.ctx.db) 43 | .await?; 44 | if record.is_none() { 45 | anyhow::bail!("cannot found instance by id {}", resource_id); 46 | } 47 | } 48 | } 49 | 50 | let tag_record = Tag::find() 51 | .filter(tag::Column::TagName.eq(tag_name)) 52 | .one(&self.ctx.db) 53 | .await?; 54 | 55 | let tag_id = if tag_record.is_none() { 56 | let inserted = tag::ActiveModel { 57 | tag_name: Set(tag_name.to_string()), 58 | created_user: Set(user_info.username.clone()), 59 | ..Default::default() 60 | } 61 | .save(&self.ctx.db) 62 | .await?; 63 | 64 | inserted.id.as_ref().to_owned() 65 | } else { 66 | tag_record.unwrap().id 67 | }; 68 | 69 | match resource_type { 70 | ResourceType::Job => { 71 | Job::find() 72 | .filter(job::Column::Id.eq(resource_id)) 73 | .one(&self.ctx.db) 74 | .await? 75 | .ok_or(anyhow!("cannot found job by id {resource_id}"))?; 76 | } 77 | ResourceType::Instance => { 78 | Instance::find() 79 | .filter(instance::Column::Id.eq(resource_id)) 80 | .one(&self.ctx.db) 81 | .await? 82 | .ok_or(anyhow!("cannot found instance by id {resource_id}"))?; 83 | } 84 | }; 85 | 86 | tag_resource::ActiveModel { 87 | tag_id: Set(tag_id), 88 | resource_type: Set(resource_type.to_string()), 89 | resource_id: Set(resource_id), 90 | created_user: Set(user_info.username.clone()), 91 | ..Default::default() 92 | } 93 | .save(&self.ctx.db) 94 | .await?; 95 | 96 | Ok(tag_id) 97 | } 98 | 99 | pub async fn unbind_tag( 100 | &self, 101 | _user_info: &types::UserInfo, 102 | tag_id: u64, 103 | resource_type: ResourceType, 104 | resource_id: Vec, 105 | ) -> Result { 106 | let ret = TagResource::delete_many() 107 | .filter(tag_resource::Column::TagId.eq(tag_id)) 108 | .filter(tag_resource::Column::ResourceType.eq(resource_type.to_string())) 109 | .filter(tag_resource::Column::ResourceId.is_in(resource_id)) 110 | .exec(&self.ctx.db) 111 | .await?; 112 | Ok(ret.rows_affected) 113 | } 114 | 115 | pub async fn count_resource( 116 | &self, 117 | _user_info: &types::UserInfo, 118 | resource_type: ResourceType, 119 | team_id: Option, 120 | username: Option, 121 | ) -> Result> { 122 | let select = TagResource::find() 123 | .select_only() 124 | .column(tag::Column::TagName) 125 | .column_as(tag::Column::Id, "tag_id") 126 | .column_as(tag::Column::Id.count(), "total") 127 | .join_rev( 128 | JoinType::LeftJoin, 129 | Tag::belongs_to(TagResource) 130 | .from(tag::Column::Id) 131 | .to(tag_resource::Column::TagId) 132 | .into(), 133 | ) 134 | .filter(tag_resource::Column::ResourceType.eq(resource_type.to_string())); 135 | 136 | let select = match resource_type { 137 | ResourceType::Job => select 138 | .join_rev( 139 | JoinType::LeftJoin, 140 | Job::belongs_to(TagResource) 141 | .from(job::Column::Id) 142 | .to(tag_resource::Column::ResourceId) 143 | .into(), 144 | ) 145 | .filter(job::Column::IsDeleted.eq(false)) 146 | .apply_if(team_id, |q, v| q.filter(job::Column::TeamId.eq(v))) 147 | .apply_if(username, |q, v| q.filter(job::Column::CreatedUser.eq(v))), 148 | ResourceType::Instance => select.join_rev( 149 | JoinType::LeftJoin, 150 | Instance::belongs_to(TagResource) 151 | .from(instance::Column::Id) 152 | .to(tag_resource::Column::ResourceId) 153 | .into(), 154 | ), 155 | }; 156 | 157 | let tag_count: Vec = select 158 | .group_by(tag::Column::Id) 159 | .into_model() 160 | .all(&self.ctx.db) 161 | .await?; 162 | 163 | Ok(tag_count) 164 | } 165 | 166 | pub async fn get_all_tag_bind_by_job_ids( 167 | &self, 168 | job_ids: Vec, 169 | ) -> Result> { 170 | let tags = TagResource::find() 171 | .column(tag::Column::TagName) 172 | .join_rev( 173 | JoinType::LeftJoin, 174 | Tag::belongs_to(TagResource) 175 | .from(tag::Column::Id) 176 | .to(tag_resource::Column::TagId) 177 | .into(), 178 | ) 179 | .filter(tag_resource::Column::ResourceType.eq(ResourceType::Job.to_string())) 180 | .filter(tag_resource::Column::ResourceId.is_in(job_ids)) 181 | .into_model() 182 | .all(&self.ctx.db) 183 | .await?; 184 | Ok(tags) 185 | } 186 | 187 | pub async fn get_all_tag_by_job_ids(&self, job_ids: Vec) -> Result> { 188 | let tags = Tag::find() 189 | .filter( 190 | Condition::any().add( 191 | tag::Column::Id.in_subquery( 192 | Query::select() 193 | .column(tag_resource::Column::TagId) 194 | .and_where( 195 | tag_resource::Column::ResourceType 196 | .eq(ResourceType::Job.to_string()) 197 | .and(tag_resource::Column::TagId.is_in(job_ids)), 198 | ) 199 | .from(TagResource) 200 | .to_owned(), 201 | ), 202 | ), 203 | ) 204 | .all(&self.ctx.db) 205 | .await?; 206 | Ok(tags) 207 | } 208 | } 209 | -------------------------------------------------------------------------------- /crates/service/src/logic/types.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, fmt::Display}; 2 | 3 | use sea_orm::{prelude::DateTimeLocal, FromQueryResult}; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Serialize, Deserialize, Default)] 7 | pub struct UserInfo { 8 | pub username: String, // 用户名 9 | pub nickname: String, //昵称 10 | pub avatar: String, // 头像 11 | pub email: String, // 邮箱 12 | pub introduction: String, // 简介 13 | pub phone: String, 14 | pub gender: String, 15 | pub user_id: String, 16 | pub is_root: bool, 17 | pub role: String, 18 | pub role_id: u64, 19 | pub permissions: Vec, 20 | pub created_time: String, 21 | pub updated_time: String, 22 | } 23 | 24 | #[derive(Clone, Serialize, Deserialize, Default, FromQueryResult)] 25 | pub struct UserRecord { 26 | pub id: u64, 27 | pub user_id: String, 28 | pub username: String, 29 | pub nickname: String, 30 | pub is_root: bool, 31 | pub role_id: u64, 32 | pub salt: String, 33 | pub password: String, 34 | pub avatar: String, 35 | pub email: String, 36 | pub phone: String, 37 | pub gender: String, 38 | pub role: Option, 39 | pub introduction: String, 40 | pub created_time: DateTimeLocal, 41 | pub updated_time: DateTimeLocal, 42 | } 43 | 44 | #[derive(Clone, Serialize, Deserialize, Default, FromQueryResult)] 45 | pub struct UserServer { 46 | pub id: u64, 47 | pub ip: String, 48 | pub instance_id: String, 49 | pub mac_addr: String, 50 | pub info: String, 51 | pub namespace: String, 52 | pub sys_user: Option, 53 | pub ssh_port: Option, 54 | pub password: Option, 55 | pub instance_group_id: Option, 56 | pub instance_group_name: Option, 57 | pub tag_id: Option, 58 | pub status: i8, 59 | pub created_time: DateTimeLocal, 60 | pub updated_time: DateTimeLocal, 61 | } 62 | 63 | #[derive(Clone, Serialize, Deserialize, Default, FromQueryResult)] 64 | pub struct UserRoleCount { 65 | pub role_id: u64, 66 | pub total: i64, 67 | } 68 | 69 | pub struct UserRoleCountList(pub Vec); 70 | 71 | impl UserRoleCountList { 72 | pub fn get_by_role_id(&self, role_id: u64) -> Option<&UserRoleCount> { 73 | self.0.iter().find(|&v| v.role_id == role_id) 74 | } 75 | } 76 | 77 | #[derive(Clone, Serialize, Deserialize, Default, FromQueryResult)] 78 | pub struct InstanceRecord { 79 | pub id: u64, 80 | pub instance_id: String, 81 | pub ip: String, 82 | pub namespace: String, 83 | pub info: String, 84 | pub status: i8, 85 | pub sys_user: String, 86 | pub password: String, 87 | pub role_id: Option, 88 | pub role_name: Option, 89 | pub instance_group: Option, 90 | pub instance_group_id: u64, 91 | pub ssh_port: u16, 92 | pub created_time: DateTimeLocal, 93 | pub updated_time: DateTimeLocal, 94 | } 95 | 96 | #[derive(Clone, Serialize, Deserialize, Default)] 97 | pub struct VersionRecord { 98 | pub name: String, 99 | pub info: String, 100 | } 101 | 102 | #[derive(Clone, Serialize, Deserialize, Default)] 103 | pub struct Permission { 104 | pub name: &'static str, 105 | pub object: &'static str, 106 | pub action: &'static str, 107 | } 108 | 109 | impl Display for Permission { 110 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 111 | f.write_str(format!("{}_{}", self.object, self.action).as_str()) 112 | } 113 | } 114 | 115 | #[derive(Clone, Serialize, Deserialize, Default, FromQueryResult)] 116 | pub struct TeamMemberCount { 117 | pub team_id: u64, 118 | pub total: i64, 119 | } 120 | 121 | pub struct TeamMemberCountList(pub Vec); 122 | 123 | impl TeamMemberCountList { 124 | pub fn get_by_team_id(&self, team_id: u64) -> Option<&TeamMemberCount> { 125 | self.0.iter().find(|&v| v.team_id == team_id) 126 | } 127 | } 128 | 129 | #[derive(Clone, Serialize, Deserialize, Default, FromQueryResult)] 130 | pub struct TeamRecord { 131 | pub id: u64, 132 | pub name: String, 133 | pub info: String, 134 | pub is_admin: Option, 135 | pub created_user: String, 136 | pub updated_user: String, 137 | pub created_time: DateTimeLocal, 138 | pub updated_time: DateTimeLocal, 139 | } 140 | 141 | #[derive(Clone, Serialize, Deserialize)] 142 | pub enum ResourceType { 143 | Job, 144 | Instance, 145 | } 146 | 147 | impl Display for ResourceType { 148 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 149 | match self { 150 | ResourceType::Job => write!(f, "job"), 151 | ResourceType::Instance => write!(f, "instance"), 152 | } 153 | } 154 | } 155 | 156 | #[derive(Clone, Serialize, Deserialize, FromQueryResult)] 157 | pub struct TagCount { 158 | pub tag_id: u64, 159 | pub tag_name: String, 160 | pub total: i64, 161 | } 162 | 163 | #[derive(Clone, Serialize, Deserialize, FromQueryResult)] 164 | pub struct TagBind { 165 | pub tag_id: u64, 166 | pub tag_name: String, 167 | pub resource_id: u64, 168 | } 169 | 170 | #[derive(Serialize, Deserialize, Default)] 171 | pub struct CompletedCallbackOpts { 172 | #[serde(default)] 173 | pub trigger_on: CompletedCallbackTriggerType, 174 | pub header: Option>, 175 | pub url: String, 176 | pub enable: bool, 177 | } 178 | 179 | #[derive(Serialize, Deserialize, Default)] 180 | pub enum CompletedCallbackTriggerType { 181 | #[default] 182 | #[serde(rename = "all")] 183 | All, 184 | #[serde(rename = "error")] 185 | Error, 186 | } 187 | -------------------------------------------------------------------------------- /crates/utils/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "utils" 3 | edition = "2024" 4 | 5 | [dependencies] 6 | tokio.workspace = true 7 | tracing.workspace = true 8 | tracing-subscriber.workspace = true 9 | anyhow.workspace = true 10 | -------------------------------------------------------------------------------- /crates/utils/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::{future::Future, pin::Pin, sync::Arc}; 2 | 3 | use anyhow::{Result, anyhow}; 4 | use tokio::sync::RwLock; 5 | pub mod macros; 6 | 7 | pub async fn async_batch_do(data: Vec, handler: F) -> Vec> 8 | where 9 | F: 'static + Send + Sync + Clone + Fn(I) -> Pin> + Send>>, 10 | I: Send + Sync + 'static, 11 | T: Clone + Send + Sync + 'static, 12 | { 13 | let data_len = data.len(); 14 | let locked_data = Arc::new(RwLock::new(data)); 15 | let locked_outputs = Arc::new(RwLock::new(Vec::with_capacity(data_len))); 16 | let queue_len = if data_len > 500 { 500 } else { data_len }; 17 | let mut tasks = Vec::with_capacity(queue_len); 18 | 19 | for _ in 0..queue_len { 20 | let locked_data = locked_data.clone(); 21 | let locked_outputs = locked_outputs.clone(); 22 | let handler = handler.clone(); 23 | tasks.push(tokio::spawn(async move { 24 | loop { 25 | let mut queue = locked_data.write().await; 26 | if let Some(val) = queue.pop() { 27 | drop(queue); 28 | let ret = handler(val).await; 29 | let mut outputs = locked_outputs.write().await; 30 | outputs.push(ret); 31 | } else { 32 | return; 33 | } 34 | } 35 | })); 36 | } 37 | 38 | for task in tasks { 39 | let _ = task.await; 40 | } 41 | 42 | let outputs = locked_outputs.read().await; 43 | 44 | let mut ret = Vec::new(); 45 | 46 | outputs.iter().for_each(|v| { 47 | ret.push(match v { 48 | Ok(v) => Ok(v.to_owned()), 49 | Err(e) => Err(anyhow!("{e}")), 50 | }) 51 | }); 52 | 53 | ret 54 | } 55 | 56 | #[tokio::test] 57 | async fn test_async_queue_do() { 58 | use std::time::Duration; 59 | use tokio::time::sleep; 60 | unsafe { 61 | std::env::set_var("RUST_LOG", "debug"); 62 | } 63 | 64 | tracing_subscriber::fmt::init(); 65 | let data = 1..100; 66 | 67 | #[derive(Debug, Clone)] 68 | pub struct QueueResult { 69 | _val: i32, 70 | } 71 | 72 | let ret = async_batch_do(data.clone().collect(), |v| { 73 | Box::pin(async move { 74 | sleep(Duration::from_secs(1)).await; 75 | Ok(QueueResult { _val: v }) 76 | }) 77 | }) 78 | .await; 79 | 80 | println!("result:{:?}, len: {}", ret, ret.len(),) 81 | } 82 | -------------------------------------------------------------------------------- /crates/utils/src/macros.rs: -------------------------------------------------------------------------------- 1 | /// convert DateTime to local time(String) 2 | #[macro_export] 3 | macro_rules! local_time { 4 | ($time:expr) => { 5 | $time 6 | .with_timezone(&chrono::Local) 7 | .naive_local() 8 | .to_string() 9 | }; 10 | } 11 | 12 | #[macro_export] 13 | macro_rules! file_name { 14 | ($file:expr) => { 15 | PathBuf::from($file) 16 | .file_name() 17 | .unwrap() 18 | .to_str() 19 | .unwrap() 20 | .to_string() 21 | }; 22 | } 23 | -------------------------------------------------------------------------------- /dist/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | # 更新命令 2 | # sudo docker compose up -d 3 | # 更新指定服务 4 | # sudo docker compose up -d jiascheduler-console 5 | # 停止命令 6 | # sudo docker compose stop 7 | # 重启命令 8 | # sudo docker compose restart 9 | # 移除容器、网络(卷 --volumes) 10 | # sudo docker compose down 11 | # 更新所有镜像 12 | # sudo docker compose pull && sudo docker compose up -d 13 | 14 | services: 15 | mysql: 16 | image: mysql:latest 17 | # restart: always 18 | container_name: jiascheduler-mysql 19 | volumes: 20 | - $WORKDATA/mysql/data:/var/lib/mysql 21 | - $WORKCONF/mysql/conf/my.cnf:/etc/my.conf 22 | - $WORKDATA/mysql/logs:/var/log/mysql 23 | environment: 24 | - TZ=Asia/Shanghai # 设置时区 25 | - MYSQL_ROOT_PASSWORD=kytHmeBR4Vg 26 | - MYSQL_DATABASE=jiascheduler # 自动创建数据库 27 | ports: 28 | # 使用宿主机的端口映射到容器的端口 29 | # 宿主机:容器 30 | - 3306:3306 31 | redis: 32 | image: redis:latest 33 | container_name: jiascheduler-redis 34 | volumes: 35 | - $WORKDATA/redis/data:/data 36 | - $WORKCONF/redis/conf/redis.conf:/usr/local/etc/redis/redis.conf 37 | - $WORKDATA/redis/logs:/logs 38 | ports: 39 | - "6379:6379" 40 | environment: 41 | - REDIS_PASSWORD=3DGiuazc7wkAppV3 42 | command: ["redis-server", "--requirepass", "3DGiuazc7wkAppV3"] 43 | jiascheduler-console: 44 | image: iwannay/jiascheduler 45 | depends_on: 46 | - mysql 47 | - redis 48 | container_name: jiascheduler-console 49 | ports: 50 | - "9090:9090" 51 | restart: unless-stopped 52 | volumes: 53 | - $WORKCONF/.jiascheduler:/root/.jiascheduler 54 | command: ["./jiascheduler-console", "--bind-addr", "0.0.0.0:9090"] 55 | jiascheduler-comet: 56 | image: iwannay/jiascheduler 57 | depends_on: 58 | - redis 59 | container_name: jiascheduler-comet 60 | ports: 61 | - "3000:3000" 62 | command: 63 | [ 64 | "./jiascheduler-comet", 65 | "--bind", 66 | "0.0.0.0:3000", 67 | "-r", 68 | "redis://default:3DGiuazc7wkAppV3@redis:6379", 69 | "--secret", 70 | "rYzBYE+cXbtdMg==", 71 | ] 72 | restart: unless-stopped 73 | 74 | jiascheduler-agent: 75 | image: iwannay/jiascheduler 76 | depends_on: 77 | - jiascheduler-comet 78 | container_name: jiascheduler-agent 79 | command: 80 | [ 81 | "./jiascheduler-agent", 82 | "--comet-addr", 83 | "ws://jiascheduler-comet:3000", 84 | "--assign-username", 85 | "guest", 86 | "--assign-password", 87 | "guest", 88 | "--namespace", 89 | "home", 90 | ] 91 | restart: unless-stopped 92 | -------------------------------------------------------------------------------- /examples/supervisor.rs: -------------------------------------------------------------------------------- 1 | use std::{sync::Arc, time::Duration}; 2 | 3 | use tokio::{select, time::sleep}; 4 | use watchexec_supervisor::{ 5 | command::{Command, Program, SpawnOptions}, 6 | job::start_job, 7 | }; 8 | 9 | async fn supervisor_test() { 10 | let code = r#"date; 11 | echo hello world 12 | sleep 5 & 13 | echo end"#; 14 | 15 | let (job, task) = start_job(Arc::new(Command { 16 | program: Program::Exec { 17 | prog: "/usr/bin/bash".into(), 18 | args: vec!["-c".into(), code.into()], 19 | } 20 | .into(), 21 | options: SpawnOptions { 22 | grouped: true, 23 | reset_sigmask: true, 24 | }, 25 | })); 26 | 27 | job.start().await; 28 | job.set_error_handler(|v| { 29 | let e = v.get().unwrap(); 30 | println!("error: {e}"); 31 | }); 32 | 33 | let clone_job = job.clone(); 34 | 35 | tokio::spawn(async move { 36 | loop { 37 | select! { 38 | _v = clone_job.to_wait() => { 39 | if clone_job.is_dead() { 40 | return; 41 | } 42 | sleep(Duration::from_secs(1)).await; 43 | clone_job.start().await; 44 | } 45 | } 46 | } 47 | }); 48 | 49 | sleep(Duration::from_secs(10)).await; 50 | job.stop(); 51 | job.delete_now(); 52 | 53 | let _ = task.await.expect("failed to wait join finished"); 54 | sleep(Duration::from_secs(5)).await; 55 | } 56 | 57 | #[tokio::main] 58 | async fn main() { 59 | supervisor_test().await; 60 | } 61 | -------------------------------------------------------------------------------- /examples/ws.rs: -------------------------------------------------------------------------------- 1 | use automate::bridge::{Bridge, client::WsClient}; 2 | use futures_util::{ 3 | StreamExt, 4 | stream::{SplitSink, SplitStream}, 5 | }; 6 | use local_ip_address::local_ip; 7 | use poem::{ 8 | EndpointExt, IntoResponse, Route, Server, get, handler, 9 | listener::TcpListener, 10 | web::{ 11 | Data, Html, 12 | websocket::{Message, WebSocket, WebSocketStream}, 13 | }, 14 | }; 15 | use tracing::info; 16 | 17 | #[handler] 18 | fn index() -> Html<&'static str> { 19 | Html( 20 | r###" 21 | 22 |
23 | Name: 24 | 25 |
26 | 27 | 31 | 32 | 33 | 34 | 63 | "###, 64 | ) 65 | } 66 | 67 | #[handler] 68 | fn ws(ws: WebSocket, mut bridge: Data<&Bridge>) -> impl IntoResponse { 69 | let mut bridge = bridge.clone(); 70 | ws.on_upgrade(move |socket| async move { 71 | let (mut sink, mut stream) = socket.split(); 72 | let mut client: WsClient< 73 | SplitSink, 74 | SplitStream, 75 | > = WsClient::new(Some(bridge.clone())); 76 | client 77 | .set_namespace(String::from("default")) 78 | .set_local_ip(local_ip().expect("failed get local ip")); 79 | bridge 80 | .append_client("hello".to_string(), client.sender()) 81 | .await; 82 | client.set_rw(sink, stream); 83 | client.start_processing_to_client_msg(); 84 | client.recv(|_x| async { todo!() }).await; 85 | }) 86 | } 87 | 88 | #[tokio::main] 89 | async fn main() { 90 | if std::env::var_os("RUST_LOG").is_none() { 91 | unsafe { 92 | std::env::set_var("RUST_LOG", "comet=debug"); 93 | } 94 | } 95 | tracing_subscriber::fmt::init(); 96 | info!("start es example"); 97 | let bridge = Bridge::new(); 98 | let app = Route::new().at("/", get(index)).at("/ws", ws.data(bridge)); 99 | 100 | Server::new(TcpListener::bind("0.0.0.0:3000")) 101 | .run(app) 102 | .await 103 | .expect("failed start server"); 104 | 105 | println!("hello world"); 106 | } 107 | -------------------------------------------------------------------------------- /migration/.env: -------------------------------------------------------------------------------- 1 | DATABASE_URL=mysql://root:root@127.0.0.1:3306/jiascheduler-v1.0.0 -------------------------------------------------------------------------------- /migration/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "migration" 3 | edition = "2024" 4 | publish = false 5 | 6 | [lib] 7 | name = "migration" 8 | path = "src/lib.rs" 9 | 10 | [dependencies] 11 | async-std = { version = "1", features = ["attributes", "tokio1"] } 12 | 13 | [dependencies.sea-orm-migration] 14 | version = "1.0.0" 15 | features = [ 16 | # Enable at least one `ASYNC_RUNTIME` and `DATABASE_DRIVER` feature if you want to run migration via CLI. 17 | # View the list of supported features at https://www.sea-ql.org/SeaORM/docs/install-and-config/database-and-async-runtime. 18 | # e.g. 19 | "runtime-tokio-rustls", # `ASYNC_RUNTIME` feature 20 | # "sqlx-postgres", # `DATABASE_DRIVER` feature 21 | "sqlx-mysql", 22 | ] 23 | -------------------------------------------------------------------------------- /migration/README.md: -------------------------------------------------------------------------------- 1 | # Running Migrator CLI 2 | 3 | - Generate a new migration file 4 | ```sh 5 | cargo run -- generate MIGRATION_NAME 6 | ``` 7 | - Apply all pending migrations 8 | ```sh 9 | cargo run 10 | ``` 11 | ```sh 12 | cargo run -- up 13 | ``` 14 | - Apply first 10 pending migrations 15 | ```sh 16 | cargo run -- up -n 10 17 | ``` 18 | - Rollback last applied migrations 19 | ```sh 20 | cargo run -- down 21 | ``` 22 | - Rollback last 10 applied migrations 23 | ```sh 24 | cargo run -- down -n 10 25 | ``` 26 | - Drop all tables from the database, then reapply all migrations 27 | ```sh 28 | cargo run -- fresh 29 | ``` 30 | - Rollback all applied migrations, then reapply all migrations 31 | ```sh 32 | cargo run -- refresh 33 | ``` 34 | - Rollback all applied migrations 35 | ```sh 36 | cargo run -- reset 37 | ``` 38 | - Check the status of all migrations 39 | ```sh 40 | cargo run -- status 41 | ``` 42 | -------------------------------------------------------------------------------- /migration/sql/m20250412_add_job_soft_deleted/down.sql: -------------------------------------------------------------------------------- 1 | alter table job 2 | drop column is_deleted, 3 | drop column deleted_at, 4 | drop column deleted_by; 5 | 6 | alter table job_timer 7 | drop column is_deleted, 8 | drop column deleted_at, 9 | drop column deleted_by; 10 | 11 | alter table job_supervisor 12 | drop column is_deleted, 13 | drop column deleted_at, 14 | drop column deleted_by; 15 | 16 | alter table job_bundle_script 17 | drop column is_deleted, 18 | drop column deleted_at, 19 | drop column deleted_by; 20 | 21 | alter table job_schedule_history 22 | drop column is_deleted, 23 | drop column deleted_at, 24 | drop column deleted_by; 25 | 26 | alter table job_running_status 27 | drop column is_deleted, 28 | drop column deleted_at, 29 | drop column deleted_by; 30 | -------------------------------------------------------------------------------- /migration/sql/m20250412_add_job_soft_deleted/up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE job 2 | ADD COLUMN is_deleted BOOLEAN NOT NULL DEFAULT FALSE, 3 | ADD COLUMN deleted_at TIMESTAMP NULL DEFAULT NULL, 4 | ADD COLUMN deleted_by varchar(50) NOT NULL DEFAULT '' COMMENT '删除人'; 5 | 6 | ALTER TABLE job_timer 7 | ADD COLUMN is_deleted BOOLEAN NOT NULL DEFAULT FALSE, 8 | ADD COLUMN deleted_at TIMESTAMP NULL DEFAULT NULL, 9 | ADD COLUMN deleted_by varchar(50) NOT NULL DEFAULT '' COMMENT '删除人'; 10 | 11 | ALTER TABLE job_supervisor 12 | ADD COLUMN is_deleted BOOLEAN NOT NULL DEFAULT FALSE, 13 | ADD COLUMN deleted_at TIMESTAMP NULL DEFAULT NULL, 14 | ADD COLUMN deleted_by varchar(50) NOT NULL DEFAULT '' COMMENT '删除人'; 15 | 16 | ALTER TABLE job_bundle_script 17 | ADD COLUMN is_deleted BOOLEAN NOT NULL DEFAULT FALSE, 18 | ADD COLUMN deleted_at TIMESTAMP NULL DEFAULT NULL, 19 | ADD COLUMN deleted_by varchar(50) NOT NULL DEFAULT '' COMMENT '删除人'; 20 | 21 | ALTER TABLE job_schedule_history 22 | ADD COLUMN is_deleted BOOLEAN NOT NULL DEFAULT FALSE, 23 | ADD COLUMN deleted_at TIMESTAMP NULL DEFAULT NULL, 24 | ADD COLUMN deleted_by varchar(50) NOT NULL DEFAULT '' COMMENT '删除人'; 25 | 26 | ALTER TABLE job_running_status 27 | ADD COLUMN is_deleted BOOLEAN NOT NULL DEFAULT FALSE, 28 | ADD COLUMN deleted_at TIMESTAMP NULL DEFAULT NULL, 29 | ADD COLUMN deleted_by varchar(50) NOT NULL DEFAULT '' COMMENT '删除人'; 30 | -------------------------------------------------------------------------------- /migration/sql/m20250420_modify_job_index/down.sql: -------------------------------------------------------------------------------- 1 | show 2 | create table job; 3 | -------------------------------------------------------------------------------- /migration/sql/m20250420_modify_job_index/up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE job 2 | drop index uk_name, 3 | add index idx_name (team_id, name); 4 | 5 | ALTER TABLE job_bundle_script 6 | drop index uk_name, 7 | add index idx_name (team_id, name); 8 | 9 | ALTER TABLE job_timer 10 | drop index uk_name; 11 | 12 | ALTER TABLE job_supervisor 13 | drop index uk_name; 14 | -------------------------------------------------------------------------------- /migration/sql/v1_0_0/down.sql: -------------------------------------------------------------------------------- 1 | -- Active: 1717845081831@@127.0.0.1@3306@jiascheduler 2 | DROP TABLE IF EXISTS `user`; 3 | 4 | DROP TABLE IF EXISTS `agent_release_version`; 5 | 6 | DROP TABLE IF EXISTS `instance`; 7 | 8 | DROP TABLE IF EXISTS `instance_group`; 9 | 10 | DROP TABLE IF EXISTS `instance_role`; 11 | 12 | DROP TABLE IF EXISTS `role`; 13 | 14 | DROP TABLE IF EXISTS `user_server`; 15 | 16 | DROP TABLE IF EXISTS `job_timer`; 17 | 18 | DROP TABLE IF EXISTS `job`; 19 | 20 | DROP TABLE IF EXISTS `job_bundle_script`; 21 | 22 | DROP TABLE IF EXISTS `executor`; 23 | 24 | DROP TABLE IF EXISTS `job_exec_history`; 25 | 26 | DROP TABLE IF EXISTS `job_organizer`; 27 | 28 | DROP TABLE IF EXISTS `job_organizer_process`; 29 | 30 | DROP TABLE IF EXISTS `job_organizer_release`; 31 | 32 | DROP TABLE IF EXISTS `job_organizer_release_edge`; 33 | 34 | DROP TABLE IF EXISTS `job_organizer_release_node`; 35 | 36 | DROP TABLE IF EXISTS `job_organizer_task`; 37 | 38 | DROP TABLE IF EXISTS `job_organizer_task_result`; 39 | 40 | DROP TABLE IF EXISTS `job_running_status`; 41 | 42 | DROP TABLE IF EXISTS `job_schedule_history`; -------------------------------------------------------------------------------- /migration/sql/v1_1_0_001/down.sql: -------------------------------------------------------------------------------- 1 | 2 | DROP TABLE IF EXISTS `tag`; 3 | DROP TABLE IF EXISTS `tag_resource`; 4 | -------------------------------------------------------------------------------- /migration/sql/v1_1_0_001/up.sql: -------------------------------------------------------------------------------- 1 | 2 | DROP TABLE IF EXISTS `tag`; 3 | 4 | CREATE TABLE `tag` ( 5 | `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', 6 | `tag_name` varchar(40) NOT NULL DEFAULT '' COMMENT '调度uuid', 7 | `created_user` varchar(50) NOT NULL DEFAULT '' COMMENT '创建人', 8 | `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', 9 | PRIMARY KEY (`id`), 10 | UNIQUE KEY `uk_tag_name` ( `tag_name`) 11 | ) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4 COMMENT = '标签'; 12 | 13 | DROP TABLE IF EXISTS `tag_resource`; 14 | CREATE TABLE `tag_resource` ( 15 | `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', 16 | `tag_id` bigint unsigned NOT NULL DEFAULT 0 COMMENT '标签id', 17 | `resource_type` varchar(40) NOT NULL DEFAULT '' COMMENT '资源类型', 18 | `resource_id` BIGINT UNSIGNED NOT NULL DEFAULT 0 COMMENT '标签值id', 19 | `created_user` varchar(50) NOT NULL DEFAULT '' COMMENT '创建人', 20 | `created_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', 21 | PRIMARY KEY (`id`), 22 | UNIQUE KEY `uk_tag_id` (`resource_type`, `tag_id`, `resource_id`) 23 | ) ENGINE = InnoDB DEFAULT CHARSET = utf8mb4 COMMENT = '标签绑定'; 24 | 25 | -------------------------------------------------------------------------------- /migration/sql/v1_1_0_002/down.sql: -------------------------------------------------------------------------------- 1 | alter Table `job` 2 | DROP COLUMN `completed_callback`; 3 | 4 | ALTER TABLE `job_exec_history` DROP COLUMN `run_id`; -------------------------------------------------------------------------------- /migration/sql/v1_1_0_002/up.sql: -------------------------------------------------------------------------------- 1 | alter Table `job` 2 | ADD COLUMN `completed_callback` JSON DEFAULT NULL COMMENT '任务完成回调'; 3 | 4 | ALTER TABLE `job_exec_history` ADD COLUMN `run_id` VARCHAR(50) NOT NULL DEFAULT '' COMMENT '任务运行id'; -------------------------------------------------------------------------------- /migration/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub use sea_orm_migration::prelude::*; 2 | 3 | mod m20250412_add_job_soft_deleted; 4 | mod m20250420_modify_job_index; 5 | mod v1_0_0_create_table; 6 | mod v1_1_0_001_create_table; 7 | mod v1_1_0_002_create_table; 8 | pub struct Migrator; 9 | 10 | #[async_trait::async_trait] 11 | impl MigratorTrait for Migrator { 12 | fn migrations() -> Vec> { 13 | vec![ 14 | Box::new(v1_0_0_create_table::Migration), 15 | Box::new(v1_1_0_001_create_table::Migration), 16 | Box::new(v1_1_0_002_create_table::Migration), 17 | Box::new(m20250412_add_job_soft_deleted::Migration), 18 | Box::new(m20250420_modify_job_index::Migration), 19 | ] 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /migration/src/m20250412_add_job_soft_deleted.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | #[derive(DeriveMigrationName)] 4 | pub struct Migration; 5 | 6 | #[async_trait::async_trait] 7 | impl MigrationTrait for Migration { 8 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 9 | let db = manager.get_connection(); 10 | let sql = include_str!("../sql/m20250412_add_job_soft_deleted/up.sql"); 11 | db.execute_unprepared(sql).await?; 12 | Ok(()) 13 | } 14 | 15 | async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { 16 | let db = manager.get_connection(); 17 | let sql = include_str!("../sql/m20250412_add_job_soft_deleted/down.sql"); 18 | db.execute_unprepared(sql).await?; 19 | Ok(()) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /migration/src/m20250420_modify_job_index.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | #[derive(DeriveMigrationName)] 4 | pub struct Migration; 5 | 6 | #[async_trait::async_trait] 7 | impl MigrationTrait for Migration { 8 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 9 | let db = manager.get_connection(); 10 | let sql = include_str!("../sql/m20250420_modify_job_index/up.sql"); 11 | db.execute_unprepared(sql).await?; 12 | Ok(()) 13 | } 14 | 15 | async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { 16 | let db = manager.get_connection(); 17 | let sql = include_str!("../sql/m20250420_modify_job_index/down.sql"); 18 | db.execute_unprepared(sql).await?; 19 | Ok(()) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /migration/src/main.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | #[async_std::main] 4 | async fn main() { 5 | cli::run_cli(migration::Migrator).await; 6 | } 7 | -------------------------------------------------------------------------------- /migration/src/v1_0_0_create_table.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | #[derive(DeriveMigrationName)] 4 | pub struct Migration; 5 | 6 | #[async_trait::async_trait] 7 | impl MigrationTrait for Migration { 8 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 9 | let db = manager.get_connection(); 10 | let sql = include_str!("../sql/v1_0_0/up.sql"); 11 | db.execute_unprepared(sql).await?; 12 | Ok(()) 13 | } 14 | 15 | async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { 16 | let db = manager.get_connection(); 17 | let sql = include_str!("../sql/v1_0_0/down.sql"); 18 | db.execute_unprepared(sql).await?; 19 | Ok(()) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /migration/src/v1_1_0_001_create_table.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | #[derive(DeriveMigrationName)] 4 | pub struct Migration; 5 | 6 | #[async_trait::async_trait] 7 | impl MigrationTrait for Migration { 8 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 9 | let db = manager.get_connection(); 10 | let sql = include_str!("../sql/v1_1_0_001/up.sql"); 11 | db.execute_unprepared(sql).await?; 12 | Ok(()) 13 | } 14 | 15 | async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { 16 | let db = manager.get_connection(); 17 | let sql = include_str!("../sql/v1_1_0_001/down.sql"); 18 | db.execute_unprepared(sql).await?; 19 | Ok(()) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /migration/src/v1_1_0_002_create_table.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | #[derive(DeriveMigrationName)] 4 | pub struct Migration; 5 | 6 | #[async_trait::async_trait] 7 | impl MigrationTrait for Migration { 8 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 9 | let db = manager.get_connection(); 10 | let sql = include_str!("../sql/v1_1_0_002/up.sql"); 11 | db.execute_unprepared(sql).await?; 12 | Ok(()) 13 | } 14 | 15 | async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { 16 | let db = manager.get_connection(); 17 | let sql = include_str!("../sql/v1_1_0_002/down.sql"); 18 | db.execute_unprepared(sql).await?; 19 | Ok(()) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /openapi-derive/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "openapi-derive" 3 | edition = "2024" 4 | publish = false 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | 9 | [lib] 10 | proc-macro = true 11 | 12 | [dependencies] 13 | poem.workspace = true 14 | poem-openapi.workspace = true 15 | tokio.workspace = true 16 | serde_json.workspace = true 17 | serde.workspace = true 18 | anyhow.workspace = true 19 | thiserror.workspace = true 20 | syn.workspace = true 21 | quote.workspace = true 22 | proc-macro2.workspace = true 23 | -------------------------------------------------------------------------------- /openapi-derive/src/lib.rs: -------------------------------------------------------------------------------- 1 | use proc_macro::TokenStream; 2 | use proc_macro2::Ident; 3 | use quote::quote; 4 | use syn::{parse_macro_input, DeriveInput}; 5 | 6 | #[proc_macro_derive(ApiStdResponse)] 7 | pub fn derive_api_std_response(input: TokenStream) -> TokenStream { 8 | let input = parse_macro_input!(input as DeriveInput); 9 | let name = input.ident; 10 | 11 | let response_result = Ident::new(&format!("{}Result", name), name.span()); 12 | let response_name = Ident::new(&format!("{}Response", name), name.span()); 13 | 14 | let v = quote! { 15 | 16 | #[derive(Object, Default)] 17 | pub struct #response_name { 18 | pub code: u32, 19 | pub status: bool, 20 | pub message: String, 21 | pub data:Option<#name> 22 | } 23 | 24 | #[derive(poem_openapi::ApiResponse)] 25 | pub enum #response_result { 26 | /// Returns when the pet is successfully created. 27 | #[oai(status = 200)] 28 | Success(poem_openapi::payload::Json<#response_name>), 29 | #[oai(status = 500)] 30 | Error(poem_openapi::payload::Json) 31 | } 32 | 33 | }; 34 | 35 | v.into() 36 | } 37 | 38 | #[proc_macro] 39 | pub fn return_success(input: TokenStream) -> TokenStream { 40 | let mut iter = input.into_iter(); 41 | 42 | let ty = iter.nth(0).unwrap().to_string(); 43 | let name = iter.nth(1).unwrap().to_string(); 44 | 45 | let ret = format!( 46 | r##" 47 | {ty}Result::Success(poem_openapi::payload::Json( 48 | {ty}Response {{ 49 | code: 0000, 50 | status: true, 51 | data: Some({name}), 52 | message: "success".to_string(), 53 | }}, 54 | )) 55 | "## 56 | ); 57 | 58 | ret.parse().unwrap() 59 | } 60 | -------------------------------------------------------------------------------- /openapi-derive/tests/test.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | 4 | #[test] 5 | fn test_return_response() { 6 | struct User { 7 | #[allow(unused)] 8 | name: String, 9 | } 10 | let _user = User { 11 | name: "hello world".to_string(), 12 | }; 13 | 14 | // return_response!(User, user); 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /openapi/.env: -------------------------------------------------------------------------------- 1 | DATABASE_URL=mysql://root:root@127.0.0.1:3306/jiascheduler -------------------------------------------------------------------------------- /openapi/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "openapi" 3 | edition = "2021" 4 | 5 | 6 | [features] 7 | 8 | 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [dependencies] 12 | poem.workspace = true 13 | poem-openapi.workspace = true 14 | tokio.workspace = true 15 | serde_json.workspace = true 16 | serde.workspace = true 17 | anyhow.workspace = true 18 | thiserror.workspace = true 19 | openapi-derive.workspace = true 20 | tracing-subscriber.workspace = true 21 | sea-orm.workspace = true 22 | # diesel.workspace = true 23 | nanoid.workspace = true 24 | futures.workspace = true 25 | serde_repr.workspace = true 26 | russh.workspace = true 27 | russh-sftp.workspace = true 28 | russh-keys.workspace = true 29 | tracing.workspace = true 30 | async-trait.workspace = true 31 | config.workspace = true 32 | redis.workspace = true 33 | # redis = { version = "0.25.3", features = [ 34 | # "aio", 35 | # "tokio-comp", 36 | # "connection-manager", 37 | # ] } 38 | casbin = "*" 39 | rust-crypto.workspace = true 40 | chrono.workspace = true 41 | automate.workspace = true 42 | reqwest.workspace = true 43 | sea-query.workspace = true 44 | rust-embed.workspace = true 45 | evalexpr.workspace = true 46 | sea-orm-adapter.workspace = true 47 | simple_crypt.workspace = true 48 | rustc-serialize.workspace = true 49 | toml.workspace = true 50 | url.workspace = true 51 | shellexpand.workspace = true 52 | migration.workspace = true 53 | git-version.workspace = true 54 | http.workspace = true 55 | tokio-tungstenite.workspace = true 56 | sql-builder.workspace = true 57 | leader-election.workspace = true 58 | entity.workspace = true 59 | service.workspace = true 60 | 61 | 62 | [target.'cfg(unix)'.dependencies] 63 | termion = "*" 64 | -------------------------------------------------------------------------------- /openapi/src/api.rs: -------------------------------------------------------------------------------- 1 | pub mod executor; 2 | pub mod file; 3 | pub mod instance; 4 | pub mod job; 5 | pub mod manage; 6 | pub mod migration; 7 | pub mod role; 8 | pub mod tag; 9 | pub mod team; 10 | pub mod terminal; 11 | pub mod user; 12 | 13 | mod utils; 14 | 15 | use poem_openapi::{Tags, Validator}; 16 | use std::fmt::{self, Display, Formatter}; 17 | 18 | pub fn default_page() -> u64 { 19 | 1 20 | } 21 | 22 | pub fn default_page_size() -> u64 { 23 | 20 24 | } 25 | 26 | pub fn default_option_page() -> Option { 27 | Some(1) 28 | } 29 | 30 | pub fn default_option_page_size() -> Option { 31 | Some(20) 32 | } 33 | 34 | #[derive(Tags)] 35 | pub enum Tag { 36 | User, 37 | Team, 38 | Job, 39 | Executor, 40 | Instance, 41 | File, 42 | Role, 43 | Admin, 44 | Migration, 45 | Tag, 46 | } 47 | 48 | pub struct OneOfValidator(Vec); 49 | 50 | impl OneOfValidator { 51 | pub fn new(v: Vec<&str>) -> Self { 52 | Self(v.into_iter().map(|v| v.to_owned()).collect()) 53 | } 54 | } 55 | 56 | impl Display for OneOfValidator { 57 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 58 | f.write_str(format!("OneOfValidator: {:?}", self.0).as_str()) 59 | } 60 | } 61 | 62 | impl Validator for OneOfValidator { 63 | fn check(&self, value: &String) -> bool { 64 | self.0.contains(value) 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /openapi/src/api/executor.rs: -------------------------------------------------------------------------------- 1 | use crate::{entity::executor, local_time, logic, response::ApiStdResponse, return_ok, AppState}; 2 | use poem::{session::Session, web::Data, Result}; 3 | use poem_openapi::{param::Query, payload::Json, OpenApi}; 4 | use sea_orm::{ActiveValue::NotSet, Set}; 5 | 6 | mod types { 7 | use poem_openapi::Object; 8 | use serde::Serialize; 9 | 10 | #[derive(Object, Serialize, Default)] 11 | pub struct DeleteExecutorReq { 12 | pub id: u32, 13 | } 14 | 15 | #[derive(Object, Serialize, Default)] 16 | pub struct SaveExecutorReq { 17 | pub id: Option, 18 | #[oai(validator(min_length = 1))] 19 | pub name: String, 20 | pub command: String, 21 | pub platform: String, 22 | pub info: String, 23 | pub read_code_from_stdin: Option, 24 | } 25 | 26 | #[derive(Object, Serialize, Default)] 27 | pub struct SaveExecutorRes { 28 | pub result: u64, 29 | } 30 | 31 | #[derive(Object, Serialize, Default)] 32 | pub struct QueryExecutorResp { 33 | pub total: u64, 34 | pub list: Vec, 35 | } 36 | 37 | #[derive(Object, Serialize, Default)] 38 | pub struct ExecutorRecord { 39 | pub id: u64, 40 | pub name: String, 41 | pub command: String, 42 | pub platform: String, 43 | pub info: String, 44 | pub created_time: String, 45 | pub updated_time: String, 46 | } 47 | } 48 | 49 | pub struct ExecutorApi; 50 | 51 | #[OpenApi(prefix_path = "/executor", tag = super::Tag::Executor)] 52 | impl ExecutorApi { 53 | #[oai(path = "/delete", method = "post")] 54 | pub async fn delete_executor( 55 | &self, 56 | state: Data<&AppState>, 57 | Json(req): Json, 58 | ) -> Result> { 59 | let svc = state.service(); 60 | let ret = svc.executor.delete_job(req.id).await?; 61 | return_ok!(ret) 62 | } 63 | 64 | #[oai(path = "/save", method = "post")] 65 | pub async fn save_executor( 66 | &self, 67 | state: Data<&AppState>, 68 | _session: &Session, 69 | user_info: Data<&logic::types::UserInfo>, 70 | Json(req): Json, 71 | ) -> Result> { 72 | let svc = state.service(); 73 | 74 | let ret = svc 75 | .executor 76 | .save_executor(executor::ActiveModel { 77 | id: req.id.filter(|v| *v != 0).map_or(NotSet, |v| Set(v)), 78 | name: Set(req.name), 79 | command: Set(req.command), 80 | platform: Set(req.platform), 81 | info: Set(req.info), 82 | read_code_from_stdin: Set(req.read_code_from_stdin.map_or(0, |v| match v { 83 | true => 1, 84 | false => 0, 85 | })), 86 | created_user: Set(user_info.username.clone()), 87 | updated_user: Set(user_info.username.clone()), 88 | ..Default::default() 89 | }) 90 | .await?; 91 | 92 | return_ok!(types::SaveExecutorRes { 93 | result: ret.id.as_ref().to_owned() 94 | }); 95 | } 96 | 97 | #[oai(path = "/list", method = "get")] 98 | pub async fn query_executor( 99 | &self, 100 | state: Data<&AppState>, 101 | _session: &Session, 102 | 103 | Query(default_id): Query>, 104 | 105 | #[oai( 106 | default = "crate::api::default_page", 107 | validator(maximum(value = "10000")) 108 | )] 109 | Query(page): Query, 110 | Query(name): Query>, 111 | 112 | #[oai( 113 | default = "crate::api::default_page_size", 114 | validator(maximum(value = "10000")) 115 | )] 116 | Query(page_size): Query, 117 | _user_info: Data<&logic::types::UserInfo>, 118 | ) -> Result> { 119 | let svc = state.service(); 120 | let ret = svc 121 | .executor 122 | .query_executor(default_id, name, page - 1, page_size) 123 | .await?; 124 | 125 | let list: Vec = ret 126 | .0 127 | .into_iter() 128 | .map(|v: executor::Model| types::ExecutorRecord { 129 | id: v.id, 130 | name: v.name, 131 | command: v.command, 132 | platform: v.platform, 133 | info: v.info, 134 | created_time: local_time!(v.created_time), 135 | updated_time: local_time!(v.updated_time), 136 | }) 137 | .collect(); 138 | return_ok!(types::QueryExecutorResp { 139 | total: ret.1, 140 | list: list, 141 | }) 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /openapi/src/api/migration.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Context; 2 | use migration::MigratorTrait; 3 | use nanoid::nanoid; 4 | use poem::{session::Session, web::Data, Result}; 5 | use poem_openapi::{param::Query, payload::Json, OpenApi}; 6 | use redis::Client; 7 | use sea_orm::{ConnectOptions, Database}; 8 | use tokio::sync::mpsc::Sender; 9 | use url::Url; 10 | 11 | use crate::{ 12 | api_response, 13 | logic::{self, user::UserLogic}, 14 | response::{std_into_error, ApiStdResponse}, 15 | return_err, return_ok, AppState, InstallState, 16 | }; 17 | use service::config::Conf; 18 | 19 | mod types { 20 | use poem_openapi::Object; 21 | use serde::{Deserialize, Serialize}; 22 | 23 | #[derive(Object, Serialize, Deserialize)] 24 | pub struct UpgradeVersionReq { 25 | pub version: String, 26 | } 27 | 28 | #[derive(Object, Serialize, Deserialize)] 29 | pub struct UpgradeVersionResp { 30 | pub result: u64, 31 | } 32 | 33 | #[derive(Object, Serialize, Default)] 34 | pub struct QueryVersionResp { 35 | pub total: u64, 36 | pub list: Vec, 37 | } 38 | 39 | #[derive(Object, Serialize, Default, Clone)] 40 | pub struct VersionRecord { 41 | pub name: String, 42 | pub info: String, 43 | } 44 | 45 | #[derive(Object, Serialize, Default)] 46 | pub struct GetDatabaseResp { 47 | pub name: String, 48 | pub sql: String, 49 | } 50 | 51 | #[derive(Object, Serialize, Default)] 52 | pub struct InstallResp { 53 | pub result: i32, 54 | } 55 | 56 | fn default_up() -> String { 57 | "up".to_string() 58 | } 59 | 60 | #[derive(Object, Serialize, Default)] 61 | pub struct InstallReq { 62 | #[oai(validator(min_length = 1, max_length = 50))] 63 | pub username: String, 64 | #[oai(validator(min_length = 1, max_length = 50))] 65 | pub password: String, 66 | pub database_url: String, 67 | pub redis_url: String, 68 | pub bind_addr: String, 69 | pub comet_secret: String, 70 | #[oai(default = "default_up")] 71 | pub migration_type: String, 72 | } 73 | 74 | #[derive(Object, Serialize, Default)] 75 | pub struct CheckVersionResp { 76 | pub config_file: Option, 77 | pub is_installed: bool, 78 | pub current_version: String, 79 | pub bind_addr: String, 80 | pub need_upgrade: bool, 81 | } 82 | } 83 | 84 | pub struct MigrationApi; 85 | 86 | #[OpenApi(prefix_path = "/migration", tag = super::Tag::Migration)] 87 | impl MigrationApi { 88 | #[oai(path = "/version/upgrade", method = "post")] 89 | pub async fn upgrade_version( 90 | &self, 91 | _user_info: Data<&logic::types::UserInfo>, 92 | _session: &Session, 93 | state: Data<&AppState>, 94 | Json(req): Json, 95 | ) -> Result> { 96 | let svc = state.service(); 97 | let ret = svc.migration.migrate(&req.version).await?; 98 | return_ok!(types::UpgradeVersionResp { result: ret }) 99 | } 100 | 101 | #[oai(path = "/version/list", method = "get")] 102 | pub async fn query_version( 103 | &self, 104 | _user_info: Data<&logic::types::UserInfo>, 105 | _session: &Session, 106 | 107 | Query(name): Query>, 108 | #[oai( 109 | default = "crate::api::default_page_size", 110 | validator(maximum(value = "10000")) 111 | )] 112 | Query(page_size): Query, 113 | #[oai( 114 | default = "crate::api::default_page", 115 | validator(maximum(value = "10000")) 116 | )] 117 | Query(page): Query, 118 | state: Data<&AppState>, 119 | ) -> api_response!(types::QueryVersionResp) { 120 | let svc = state.service(); 121 | let ret = svc.migration.query_version(name, page, page_size)?; 122 | let list = ret 123 | .0 124 | .into_iter() 125 | .map(|v| types::VersionRecord { 126 | name: v.name, 127 | info: v.info, 128 | }) 129 | .collect(); 130 | 131 | return_ok!(types::QueryVersionResp { total: ret.1, list }) 132 | } 133 | 134 | #[oai(path = "/database/get", method = "get")] 135 | pub async fn get_database( 136 | &self, 137 | _user_info: Data<&logic::types::UserInfo>, 138 | _session: &Session, 139 | Query(name): Query, 140 | state: Data<&AppState>, 141 | ) -> api_response!(types::GetDatabaseResp) { 142 | let svc = state.service(); 143 | let ret = svc.migration.get_database(&name).await?; 144 | 145 | match ret { 146 | Some(v) => return_ok!(types::GetDatabaseResp { 147 | name: v.0, 148 | sql: v.1, 149 | }), 150 | None => return_err!("not found"), 151 | } 152 | } 153 | 154 | #[oai(path = "/version/check", method = "get")] 155 | pub async fn check_version( 156 | &self, 157 | install_state: Data<&InstallState>, 158 | state: Data<&AppState>, 159 | ) -> Result> { 160 | let need_upgrade = if install_state.is_installed { 161 | !migration::Migrator::get_pending_migrations(&state.db) 162 | .await 163 | .map_err(std_into_error)? 164 | .is_empty() 165 | } else { 166 | false 167 | }; 168 | 169 | return_ok!(types::CheckVersionResp { 170 | is_installed: install_state.is_installed, 171 | current_version: install_state.current_version.clone(), 172 | bind_addr: install_state.bind_addr.clone(), 173 | config_file: install_state.config_file.clone(), 174 | need_upgrade 175 | }) 176 | } 177 | 178 | #[oai(path = "/install", method = "post")] 179 | pub async fn install( 180 | &self, 181 | install_state: Data<&InstallState>, 182 | Json(req): Json, 183 | tx: Data<&Sender<()>>, 184 | ) -> Result> { 185 | // 1. connect database 186 | let database_url = Url::parse(&req.database_url).context("failed parse database url")?; 187 | 188 | let opt = ConnectOptions::new(database_url); 189 | let conn = Database::connect(opt) 190 | .await 191 | .context("failed connect database")?; 192 | 193 | // 2. connect redis 194 | let redis_url = Url::parse(&req.redis_url).context("failed parse redis url")?; 195 | Client::open(redis_url).context("failed connect to redis")?; 196 | 197 | if req.migration_type == "up" { 198 | migration::Migrator::up(&conn, None) 199 | .await 200 | .context("failed migrate database")?; 201 | } 202 | 203 | // 2. create admin user 204 | let _ = UserLogic::init_admin(&conn, &req.username, &req.password) 205 | .await 206 | .context("failed create admin user")?; 207 | 208 | // 3. generate config file 209 | let mut conf = Conf::default(); 210 | conf.database_url = req.database_url; 211 | conf.redis_url = req.redis_url; 212 | conf.bind_addr = req.bind_addr; 213 | conf.admin.username = req.username; 214 | conf.admin.password = req.password; 215 | conf.comet_secret = req.comet_secret; 216 | conf.encrypt.private_key = nanoid!(); 217 | conf.sync2file(install_state.config_file.clone()) 218 | .context("failed save config file")?; 219 | 220 | tx.send(()).await.context("failed send install signal")?; 221 | return_ok!(types::InstallResp { result: 0 }) 222 | } 223 | } 224 | -------------------------------------------------------------------------------- /openapi/src/api/tag.rs: -------------------------------------------------------------------------------- 1 | use poem::{web::Data, Endpoint, EndpointExt}; 2 | use poem_openapi::{ 3 | param::{Header, Query}, 4 | payload::Json, 5 | OpenApi, 6 | }; 7 | use types::{BindTagResp, UnbindTagResp}; 8 | 9 | use crate::{ 10 | api_response, 11 | logic::{self, types::ResourceType}, 12 | middleware, return_ok, 13 | state::AppState, 14 | }; 15 | 16 | pub mod types { 17 | use poem_openapi::{Enum, Object}; 18 | use serde::{Deserialize, Serialize}; 19 | 20 | #[derive(Object, Deserialize, Serialize)] 21 | pub struct BindTagReq { 22 | pub resource_id: u64, 23 | pub resource_type: ResourceType, 24 | pub tag_name: String, 25 | } 26 | 27 | #[derive(Serialize, Default, Deserialize, Enum)] 28 | pub enum ResourceType { 29 | #[default] 30 | #[oai(rename = "job")] 31 | Job, 32 | #[oai(rename = "instance")] 33 | Instance, 34 | } 35 | 36 | #[derive(Object, Deserialize, Serialize)] 37 | pub struct BindTagResp { 38 | pub result: u64, 39 | } 40 | 41 | #[derive(Object, Deserialize, Serialize)] 42 | pub struct UnbindTagReq { 43 | pub resource_id: u64, 44 | pub resource_type: ResourceType, 45 | pub tag_id: u64, 46 | } 47 | 48 | #[derive(Object, Deserialize, Serialize)] 49 | pub struct UnbindTagResp { 50 | pub result: u64, 51 | } 52 | 53 | #[derive(Object, Deserialize, Serialize)] 54 | pub struct CountTagResp { 55 | pub list: Vec, 56 | } 57 | 58 | #[derive(Object, Deserialize, Serialize)] 59 | pub struct TagCount { 60 | pub tag_id: u64, 61 | pub tag_name: String, 62 | pub total: i64, 63 | } 64 | } 65 | 66 | fn set_middleware(ep: impl Endpoint) -> impl Endpoint { 67 | ep.with(middleware::TeamPermissionMiddleware) 68 | } 69 | 70 | pub struct TagApi; 71 | 72 | #[OpenApi(prefix_path="/tag", tag = super::Tag::Tag)] 73 | impl TagApi { 74 | #[oai(path = "/bind_tag", method = "post", transform = "set_middleware")] 75 | pub async fn bind_tag( 76 | &self, 77 | user_info: Data<&logic::types::UserInfo>, 78 | state: Data<&AppState>, 79 | Json(req): Json, 80 | ) -> api_response!(BindTagResp) { 81 | let svc = state.service(); 82 | let resource_type = match req.resource_type { 83 | types::ResourceType::Job => ResourceType::Job, 84 | types::ResourceType::Instance => ResourceType::Instance, 85 | }; 86 | 87 | let ret = svc 88 | .tag 89 | .bind_tag(&user_info, &req.tag_name, resource_type, req.resource_id) 90 | .await?; 91 | 92 | return_ok!(BindTagResp { result: ret }); 93 | } 94 | 95 | #[oai(path = "/unbind_tag", method = "post", transform = "set_middleware")] 96 | pub async fn unbind_tag( 97 | &self, 98 | user_info: Data<&logic::types::UserInfo>, 99 | state: Data<&AppState>, 100 | Json(req): Json, 101 | ) -> api_response!(UnbindTagResp) { 102 | let svc = state.service(); 103 | let resource_type = match req.resource_type { 104 | types::ResourceType::Job => ResourceType::Job, 105 | types::ResourceType::Instance => ResourceType::Instance, 106 | }; 107 | let ret = svc 108 | .tag 109 | .unbind_tag(&user_info, req.tag_id, resource_type, vec![req.resource_id]) 110 | .await?; 111 | return_ok!(UnbindTagResp { result: ret }); 112 | } 113 | 114 | #[oai(path = "/count_resource", method = "get", transform = "set_middleware")] 115 | pub async fn count_resource( 116 | &self, 117 | user_info: Data<&logic::types::UserInfo>, 118 | state: Data<&AppState>, 119 | Query(resource_type): Query, 120 | #[oai(name = "X-Team-Id")] Header(team_id): Header>, 121 | ) -> api_response!(types::CountTagResp) { 122 | let svc = state.service(); 123 | let resource_type = match resource_type { 124 | types::ResourceType::Job => ResourceType::Job, 125 | types::ResourceType::Instance => ResourceType::Instance, 126 | }; 127 | 128 | let search_username = 129 | if state.can_manage_job(&user_info.user_id).await? || team_id.is_some() { 130 | None 131 | } else { 132 | Some(user_info.username.clone()) 133 | }; 134 | 135 | let ret = svc 136 | .tag 137 | .count_resource(&user_info, resource_type, team_id, search_username) 138 | .await?; 139 | 140 | let list: Vec = ret 141 | .into_iter() 142 | .map(|v| types::TagCount { 143 | tag_id: v.tag_id, 144 | tag_name: v.tag_name, 145 | total: v.total, 146 | }) 147 | .collect(); 148 | 149 | let resp = types::CountTagResp { list }; 150 | 151 | return_ok!(resp); 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /openapi/src/api/utils.rs: -------------------------------------------------------------------------------- 1 | // fn check_team_permission(ep:) 2 | -------------------------------------------------------------------------------- /openapi/src/error.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, Result}; 2 | use poem::{error::ResponseError, http::StatusCode, Error as PError, IntoResponse}; 3 | use poem_openapi::payload::Json; 4 | use std::{error::Error as StdError, ops::Deref}; 5 | use thiserror::Error; 6 | 7 | use crate::response::StdResponse; 8 | 9 | #[allow(unused)] 10 | #[derive(Error, Debug)] 11 | pub enum BizError { 12 | #[error("无效的请求参数 `{0}`")] 13 | InvalidReqParams(String), 14 | #[error("`{0}` 无效的JSON格式 - {1}")] 15 | InvalidJSON(&'static str, anyhow::Error), 16 | } 17 | 18 | pub struct BizErr(PError); 19 | 20 | impl From for PError { 21 | fn from(value: BizErr) -> Self { 22 | value.0 23 | } 24 | } 25 | 26 | impl BizErr { 27 | pub fn new>(msg: S, code: i32) -> Self { 28 | let mut e = PError::from_string(msg.into(), StatusCode::OK); 29 | e.set_data(code); 30 | BizErr(e) 31 | } 32 | 33 | pub fn with_error(mut self, err: impl StdError + Send + Sync + 'static) -> Self { 34 | self.0 35 | .set_error_message(format!("{}: {}", self.0.to_string(), err.to_string())); 36 | self 37 | } 38 | 39 | pub fn with_msg(mut self, msg: impl Into) -> Self { 40 | self.0 41 | .set_error_message(format!("{}: {}", self.0.to_string(), msg.into())); 42 | self 43 | } 44 | 45 | pub fn error(self) -> Result<()> { 46 | Err(anyhow!(self.0)) 47 | } 48 | } 49 | 50 | impl Deref for BizErr { 51 | type Target = PError; 52 | 53 | fn deref(&self) -> &Self::Target { 54 | &self.0 55 | } 56 | } 57 | 58 | macro_rules! define_biz_error { 59 | ($($(#[$docs:meta])* ($name:ident, $code:expr, $msg:expr);)*) => { 60 | $( 61 | $(#[$docs])* 62 | #[allow(non_snake_case)] 63 | #[inline] 64 | #[allow(unused)] 65 | pub fn $name() -> BizErr { 66 | // let mut e= PError::from_string($msg, StatusCode::OK); 67 | // e.set_data($code); 68 | // BizErr(e) 69 | BizErr::new($msg, $code) 70 | } 71 | )* 72 | 73 | }; 74 | 75 | 76 | } 77 | 78 | define_biz_error!( 79 | (InvalidJSON, 50003, "Invalid JSON format"); 80 | (BizError, 50000, "Internal error"); 81 | (InvalidUser, 50004, "Invalid username or passowrd"); 82 | (NoPermission, 50005, "This operation is not allowed"); 83 | ); 84 | 85 | impl ResponseError for BizError { 86 | fn status(&self) -> StatusCode { 87 | StatusCode::OK 88 | } 89 | } 90 | 91 | pub async fn custom_error(e: PError) -> impl IntoResponse { 92 | let mut code = e.status().as_u16() as i32; 93 | let mut status_code = e.status(); 94 | let mut msg = e.to_string(); 95 | if code == 500 { 96 | status_code = StatusCode::OK; 97 | code = 50000 98 | } 99 | 100 | if code == 400 { 101 | status_code = StatusCode::OK; 102 | code = 50400 103 | } 104 | 105 | if msg.contains("Duplicate entry") { 106 | msg = "record already exists, please do not add it again".to_string() 107 | } 108 | 109 | let code = e.data::().unwrap_or(&code).to_owned(); 110 | Json(StdResponse:: { 111 | code, 112 | data: None, 113 | msg, 114 | }) 115 | .with_status(status_code) 116 | } 117 | -------------------------------------------------------------------------------- /openapi/src/job.rs: -------------------------------------------------------------------------------- 1 | use std::{sync::Arc, time::Duration}; 2 | 3 | use anyhow::{Context, Result}; 4 | use automate::{ 5 | bridge::msg::{AgentOfflineParams, AgentOnlineParams, HeartbeatParams}, 6 | bus::{Bus, Msg}, 7 | }; 8 | 9 | use leader_election::LeaderElection; 10 | use tokio::{sync::RwLock, time::sleep}; 11 | use tracing::{error, info}; 12 | 13 | use crate::AppState; 14 | 15 | async fn heartbeat(state: AppState, msg: HeartbeatParams) -> Result<()> { 16 | state 17 | .service() 18 | .instance 19 | .set_instance_online(msg.mac_addr, msg.source_ip) 20 | .await?; 21 | 22 | Ok(()) 23 | } 24 | 25 | async fn agent_online(state: AppState, msg: AgentOnlineParams) -> Result<()> { 26 | info!("{}:{}:{} online", msg.agent_ip, msg.namespace, msg.mac_addr); 27 | let mut svc = state.service(); 28 | 29 | svc.instance 30 | .update_status( 31 | Some(msg.namespace.clone()), 32 | msg.agent_ip.clone(), 33 | msg.mac_addr.clone(), 34 | 1, 35 | msg.secret_header.assign_user, 36 | msg.secret_header.ssh_connection_params, 37 | ) 38 | .await?; 39 | 40 | if !msg.is_initialized { 41 | info!( 42 | "start initialize runnable job on {}:{}", 43 | msg.agent_ip, msg.namespace, 44 | ); 45 | svc.job 46 | .fix_running_status(&msg.agent_ip, &msg.mac_addr) 47 | .await 48 | .map_or_else(|v| error!("failed fix running_status, {v:?}"), |n| n); 49 | 50 | if let Err(e) = svc 51 | .job 52 | .dispatch_runnable_job_to_endpoint( 53 | msg.namespace.clone(), 54 | msg.agent_ip.clone(), 55 | msg.mac_addr.clone(), 56 | ) 57 | .await 58 | { 59 | error!( 60 | "failed dispatch_runnable_job_to_endpoint, {}", 61 | e.to_string() 62 | ); 63 | } 64 | } 65 | 66 | Ok(()) 67 | } 68 | 69 | async fn agent_offline(state: AppState, msg: AgentOfflineParams) -> Result<()> { 70 | info!("{}:{} offline", msg.agent_ip, msg.mac_addr,); 71 | 72 | Ok(state 73 | .service() 74 | .instance 75 | .update_status(None, msg.agent_ip, msg.mac_addr, 0, None, None) 76 | .await?) 77 | } 78 | 79 | pub async fn instance_health_check(state: AppState) { 80 | let is_master = Arc::new(RwLock::new(false)); 81 | let state_clone = state.clone(); 82 | let is_master_clone = is_master.clone(); 83 | tokio::spawn(async move { 84 | let mut l = LeaderElection::new(state_clone.redis(), "jiascheduler:leader_election", 10) 85 | .expect("failed initialize leader election"); 86 | 87 | l.run_election(|ok| { 88 | let is_master_clone = is_master_clone.clone(); 89 | Box::pin(async move { 90 | info!("got leader election result {ok}"); 91 | let mut val = is_master_clone.write().await; 92 | *val = ok; 93 | () 94 | }) 95 | }) 96 | .await 97 | .expect("faild run leader election"); 98 | }); 99 | 100 | tokio::spawn(async move { 101 | let svc = state.service(); 102 | loop { 103 | let ok = is_master.read().await; 104 | if *ok { 105 | info!("start offline inactive instance"); 106 | let _ = svc 107 | .instance 108 | .offline_inactive_instance(60) 109 | .await 110 | .context("failed offline inactive instance") 111 | .map_err(|e| error!("{e:?}")); 112 | 113 | sleep(Duration::from_secs(30)).await; 114 | } else { 115 | sleep(Duration::from_secs(1)).await; 116 | } 117 | } 118 | }); 119 | } 120 | 121 | pub async fn start(state: AppState) -> Result<()> { 122 | let bus = Bus::new(state.redis().clone()); 123 | 124 | instance_health_check(state.clone()).await; 125 | 126 | tokio::spawn(async move { 127 | loop { 128 | let ret = bus 129 | .recv(|_key, msg| { 130 | let state = state.clone(); 131 | Box::pin(async move { 132 | match msg { 133 | Msg::UpdateJob(v) => { 134 | let _ = state.service().job.update_job_status(v).await?; 135 | } 136 | Msg::Heartbeat(v) => { 137 | let _ = heartbeat(state.clone(), v).await?; 138 | } 139 | Msg::AgentOnline(msg) => agent_online(state.clone(), msg).await?, 140 | Msg::AgentOffline(msg) => agent_offline(state.clone(), msg).await?, 141 | }; 142 | Ok(()) 143 | }) 144 | }) 145 | .await; 146 | if let Err(e) = ret { 147 | error!("failed to recv bus msg - {e}"); 148 | sleep(Duration::from_millis(500)).await; 149 | } 150 | info!("restart recv bus msg"); 151 | } 152 | }); 153 | Ok(()) 154 | } 155 | -------------------------------------------------------------------------------- /openapi/src/macros.rs: -------------------------------------------------------------------------------- 1 | #[macro_export] 2 | macro_rules! api_response { 3 | ($type:ty) => { 4 | poem::Result> 5 | }; 6 | } 7 | 8 | #[macro_export] 9 | macro_rules! return_ok { 10 | ($data:expr) => { 11 | return Ok(poem_openapi::payload::Json(crate::response::StdResponse { 12 | code: 20000, 13 | data: Some($data), 14 | msg: "success".to_string(), 15 | })) 16 | }; 17 | } 18 | 19 | #[macro_export] 20 | macro_rules! return_err { 21 | ($msg:expr) => {{ 22 | let mut e = poem::Error::from_string($msg, poem::http::StatusCode::OK); 23 | e.set_data(50001i32); 24 | return Err(e); 25 | }}; 26 | } 27 | 28 | /// convert DateTime to local time(String) 29 | #[macro_export] 30 | macro_rules! local_time { 31 | ($time:expr) => { 32 | $time 33 | .with_timezone(&chrono::Local) 34 | .naive_local() 35 | .to_string() 36 | }; 37 | } 38 | #[macro_export] 39 | macro_rules! time_format { 40 | ($time:expr, $format:expr) => { 41 | $time 42 | // .with_timezone(&chrono::Local) 43 | .naive_local() 44 | .format($format) 45 | .to_string() 46 | }; 47 | } 48 | 49 | #[macro_export] 50 | macro_rules! default_string { 51 | ($v:expr, $default:expr) => { 52 | $v.clone().map_or($default.to_string(), |v| v) 53 | }; 54 | } 55 | 56 | #[macro_export] 57 | macro_rules! default_local_time { 58 | ($time:expr) => { 59 | $time.clone().map_or("".to_string(), |v| { 60 | v.with_timezone(&chrono::Local).naive_local().to_string() 61 | }) 62 | }; 63 | } 64 | 65 | #[macro_export] 66 | macro_rules! return_err_to_wsconn { 67 | ($client:expr, $err_msg:expr) => { 68 | if let Err(e) = $client 69 | .send(poem::web::websocket::Message::Text(format!( 70 | "\r\n\x1b[31m{}", 71 | $err_msg 72 | ))) 73 | .await 74 | { 75 | error!("failed send message to ws connection - {e}"); 76 | } 77 | return; 78 | }; 79 | } 80 | -------------------------------------------------------------------------------- /openapi/src/middleware/auth.rs: -------------------------------------------------------------------------------- 1 | use crate::logic::{types, user::UserLogic}; 2 | use poem::{ 3 | session::Session, web::Json, Endpoint, IntoResponse, Middleware, Request, Response, Result, 4 | }; 5 | 6 | pub struct AuthMiddleware; 7 | 8 | impl Middleware for AuthMiddleware { 9 | type Output = AuthMiddlewareEndpoint; 10 | 11 | fn transform(&self, ep: E) -> Self::Output { 12 | AuthMiddlewareEndpoint { ep } 13 | } 14 | } 15 | 16 | pub struct AuthMiddlewareEndpoint { 17 | ep: E, 18 | } 19 | 20 | // #[async_trait::async_trait] 21 | impl Endpoint for AuthMiddlewareEndpoint 22 | where 23 | E: Endpoint, 24 | { 25 | type Output = Response; 26 | 27 | async fn call(&self, mut req: Request) -> Result { 28 | let login_resp = Json(serde_json::json! ({ 29 | "code": 50401, 30 | "msg": "not login", 31 | })) 32 | .into_response(); 33 | 34 | let sess: &Session = req.extensions().get().expect("not init session"); 35 | 36 | if let Some(user_info) = sess.get::(UserLogic::SESS_KEY) { 37 | req.extensions_mut().insert(user_info); 38 | } else { 39 | if vec!["/user/login", "/user/logout", "/migration/version/check"] 40 | .contains(&req.uri().path()) 41 | { 42 | return self.ep.call(req).await.map(IntoResponse::into_response); 43 | } 44 | return Ok(login_resp); 45 | } 46 | self.ep.call(req).await.map(IntoResponse::into_response) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /openapi/src/middleware/mod.rs: -------------------------------------------------------------------------------- 1 | mod auth; 2 | mod team_permission; 3 | pub use auth::AuthMiddleware; 4 | pub use team_permission::TeamPermissionMiddleware; 5 | -------------------------------------------------------------------------------- /openapi/src/middleware/team_permission.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | logic::{self}, 3 | state::AppState, 4 | }; 5 | use poem::{web::Json, Endpoint, IntoResponse, Middleware, Request, Response, Result}; 6 | 7 | pub struct TeamPermissionMiddleware; 8 | 9 | impl Middleware for TeamPermissionMiddleware { 10 | type Output = TeamPermissionMiddlewareEndpoint; 11 | 12 | fn transform(&self, ep: E) -> Self::Output { 13 | TeamPermissionMiddlewareEndpoint { ep } 14 | } 15 | } 16 | 17 | pub struct TeamPermissionMiddlewareEndpoint { 18 | ep: E, 19 | } 20 | 21 | // #[async_trait::async_trait] 22 | impl Endpoint for TeamPermissionMiddlewareEndpoint 23 | where 24 | E: Endpoint, 25 | { 26 | type Output = Response; 27 | 28 | async fn call(&self, req: Request) -> Result { 29 | let resp = Json(serde_json::json! ({ 30 | "code": 50403, 31 | "msg": "No permission to access the team's jobs", 32 | })) 33 | .into_response(); 34 | 35 | let team_id = match req.header("X-Team-Id").map(str::parse::).transpose() { 36 | Ok(v) => v, 37 | Err(e) => { 38 | return Ok(Json(serde_json::json! ({ 39 | "code": 50000, 40 | "msg": e.to_string(), 41 | })) 42 | .into_response()) 43 | } 44 | }; 45 | 46 | if team_id.is_none() { 47 | return self.ep.call(req).await.map(IntoResponse::into_response); 48 | } 49 | 50 | let user_info: &logic::types::UserInfo = 51 | req.extensions().get().expect("not init user info"); 52 | let state: &AppState = req.extensions().get().expect("not init state"); 53 | 54 | let ok = state 55 | .service() 56 | .job 57 | .can_write_job(&user_info, team_id, None) 58 | .await?; 59 | if !ok { 60 | return Ok(resp); 61 | } 62 | self.ep.call(req).await.map(IntoResponse::into_response) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /openapi/src/response.rs: -------------------------------------------------------------------------------- 1 | use poem::{http::StatusCode, Error}; 2 | use poem_openapi::{ 3 | payload::Json, 4 | types::{ParseFromJSON, ToJSON}, 5 | Object, 6 | }; 7 | 8 | use serde::{Deserialize, Serialize}; 9 | 10 | #[derive(Object, Serialize, Deserialize)] 11 | pub struct StdResponse { 12 | pub code: i32, 13 | pub data: Option, 14 | pub msg: String, 15 | } 16 | 17 | pub fn std_into_error(e: impl std::error::Error + Sync + Send + 'static) -> Error { 18 | let mut e = Error::new(e, StatusCode::OK); 19 | e.set_data(50001i32); 20 | e 21 | } 22 | 23 | pub fn anyhow_into_error(e: anyhow::Error) -> Error { 24 | let mut e = Error::from((StatusCode::OK, e)); 25 | e.set_data(50001i32); 26 | e 27 | } 28 | 29 | pub type ApiStdResponse = Json>; 30 | -------------------------------------------------------------------------------- /openapi/src/utils.rs: -------------------------------------------------------------------------------- 1 | use std::{future::Future, pin::Pin, sync::Arc}; 2 | 3 | use anyhow::{anyhow, Result}; 4 | use tokio::sync::RwLock; 5 | 6 | pub async fn async_batch_do(data: Vec, handler: F) -> Vec> 7 | where 8 | F: 'static + Send + Sync + Clone + Fn(I) -> Pin> + Send>>, 9 | I: Send + Sync + 'static, 10 | T: Clone + Send + Sync + 'static, 11 | { 12 | let data_len = data.len(); 13 | let locked_data = Arc::new(RwLock::new(data)); 14 | let locked_outputs = Arc::new(RwLock::new(Vec::with_capacity(data_len))); 15 | let queue_len = if data_len > 500 { 500 } else { data_len }; 16 | let mut tasks = Vec::with_capacity(queue_len); 17 | 18 | for _ in 0..queue_len { 19 | let locked_data = locked_data.clone(); 20 | let locked_outputs = locked_outputs.clone(); 21 | let handler = handler.clone(); 22 | tasks.push(tokio::spawn(async move { 23 | loop { 24 | let mut queue = locked_data.write().await; 25 | if let Some(val) = queue.pop() { 26 | drop(queue); 27 | let ret = handler(val).await; 28 | let mut outputs = locked_outputs.write().await; 29 | outputs.push(ret); 30 | } else { 31 | return; 32 | } 33 | } 34 | })); 35 | } 36 | 37 | for task in tasks { 38 | let _ = task.await; 39 | } 40 | 41 | let outputs = locked_outputs.read().await; 42 | 43 | let mut ret = Vec::new(); 44 | 45 | outputs.iter().for_each(|v| { 46 | ret.push(match v { 47 | Ok(v) => Ok(v.to_owned()), 48 | Err(e) => Err(anyhow!("{e}")), 49 | }) 50 | }); 51 | 52 | ret 53 | } 54 | 55 | #[tokio::test] 56 | async fn test_async_queue_do() { 57 | use std::time::Duration; 58 | use tokio::time::sleep; 59 | 60 | std::env::set_var("RUST_LOG", "debug"); 61 | tracing_subscriber::fmt::init(); 62 | let data = 1..100; 63 | 64 | #[derive(Debug, Clone)] 65 | pub struct QueueResult { 66 | _val: i32, 67 | } 68 | 69 | let ret = async_batch_do(data.clone().collect(), |v| { 70 | Box::pin(async move { 71 | sleep(Duration::from_secs(1)).await; 72 | Ok(QueueResult { _val: v }) 73 | }) 74 | }) 75 | .await; 76 | 77 | println!("result:{:?}, len: {}", ret, ret.len(),) 78 | } 79 | -------------------------------------------------------------------------------- /restapi/comet.http: -------------------------------------------------------------------------------- 1 | POST http://localhost:3000/dispatch 2 | Content-Type: application/json 3 | 4 | { 5 | "agent_ip":"192.168.1.36", 6 | "namespace":"default", 7 | "dispatch_params":{ 8 | "base_job":{ 9 | "juid":"test", 10 | "cmd_name":"bash", 11 | "code":"ls -alh", 12 | "args":["-c"], 13 | "read_code_from_stdin":false 14 | }, 15 | "cron_spec":"* * * * * * *", 16 | "is_sync":true, 17 | "action":"Exec" 18 | 19 | } 20 | } -------------------------------------------------------------------------------- /restapi/openapi.http: -------------------------------------------------------------------------------- 1 | @baseUrl = http://localhost:9090 2 | @contentType = application/json 3 | 4 | # @name login 5 | POST {{baseUrl}}/api/user/login HTTP/1.1 6 | Content-Type: {{contentType}} 7 | 8 | 9 | {"username":"admin","password":"admin"} 10 | 11 | ### 12 | 13 | @cookie = {{login.response.headers.Cookie}} 14 | 15 | GET {{baseUrl}}/api/instance/list 16 | Cookie: {{cookie}} 17 | 18 | ### 19 | 20 | GET {{baseUrl}}/api/instance/user-server 21 | Cookie: {{cookie}} 22 | 23 | 24 | ### 25 | 26 | POST {{baseUrl}}/api/instance/granted-user 27 | Cookie: {{cookie}} 28 | Content-Type: {{contentType}} 29 | 30 | { 31 | "instance_ids": [ 32 | 1 33 | ] 34 | } 35 | 36 | ### 37 | 38 | GET {{baseUrl}}/api/user/list 39 | Cookie: {{cookie}} 40 | 41 | ### 42 | 43 | POST {{baseUrl}}/api/user/info 44 | Cookie: {{cookie}} 45 | 46 | ### 47 | 48 | GET {{baseUrl}}/api/file/sftp/read-dir?ip=127.0.0.1&dir=/home/iwannay/Dev/log/weops-agent 49 | Cookie: {{cookie}} 50 | 51 | 52 | 53 | ### 54 | 55 | GET {{baseUrl}}/api/file/sftp/download?ip=127.0.0.1&file_path=/home/iwannay/Dev/log/weops-agent/1.log 56 | Cookie: {{cookie}} 57 | 58 | 59 | ### 60 | GET {{baseUrl}}/api/instance/user-server 61 | Cookie: {{cookie}} 62 | 63 | ### 64 | GET {{baseUrl}}/api/migration/version/list?page=1 65 | 66 | ### 67 | POST {{baseUrl}}/api/migration/version/upgrade 68 | Cookie: {{cookie}} 69 | Content-Type: {{contentType}} 70 | 71 | { 72 | "version":"v1.0.0" 73 | } 74 | 75 | ### 76 | GET {{baseUrl}}/api/migration/database/get?name=jiascheduler 77 | 78 | ### 79 | GET {{baseUrl}}/api/job/list?updated_time_range=2&updated_time_range=1 80 | 81 | ### 82 | 83 | POST {{baseUrl}}/api/instance/save 84 | Cookie: {{cookie}} 85 | Content-Type: {{contentType}} 86 | 87 | { 88 | "id": 0, 89 | "ip": "172.22.110.232", 90 | "namespace": "default", 91 | "instance_group_id": 0, 92 | "info": "localhost", 93 | "status": 0, 94 | "sys_user": "iwannay", 95 | "password": "qqqqqq", 96 | "ssh_port": 22 97 | } 98 | 99 | ### 100 | GET {{baseUrl}}/api/instance/list 101 | 102 | 103 | ### 104 | GET {{baseUrl}}/api/migration/conf/init 105 | 106 | ### 107 | POST {{baseUrl}}/api/job/dashboard 108 | Cookie: {{cookie}} 109 | Content-Type: {{contentType}} 110 | 111 | { 112 | "job_type":"bundle", 113 | "filter_schedule_history":[] 114 | } -------------------------------------------------------------------------------- /src/bin/agent.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use clap::Parser; 3 | 4 | use tracing::error; 5 | 6 | use automate::scheduler::{ 7 | Scheduler, 8 | types::{AssignUserOption, SshConnectionOption}, 9 | }; 10 | 11 | #[derive(Parser, Debug)] 12 | #[command( 13 | author = "iwannay <772648576@qq.com>", 14 | about = "A high-performance, scalable, dynamically configured job scheduler developed with rust", 15 | version 16 | )] 17 | struct AgentArgs { 18 | #[arg(short, long, default_value_t = String::from("0.0.0.0:3001"))] 19 | bind: String, 20 | #[arg(long, default_values_t = vec![String::from("ws://127.0.0.1:3000")])] 21 | comet_addr: Vec, 22 | /// Directory for saving job execution logs 23 | #[arg(long, default_value_t = String::from("./log"))] 24 | output_dir: String, 25 | #[arg(long, default_value_t = String::from("rYzBYE+cXbtdMg=="))] 26 | comet_secret: String, 27 | #[arg(short, long, default_value_t = String::from("default"))] 28 | namespace: String, 29 | /// Set the login user of the instance for SSH remote connection 30 | #[arg(long)] 31 | ssh_user: Option, 32 | /// Set the login user's password of the instance for SSH remote connection 33 | #[arg(long)] 34 | ssh_password: Option, 35 | /// Set the port of this instance for SSH remote connection 36 | #[arg(long)] 37 | ssh_port: Option, 38 | 39 | /// Assign this instance to a user and specify their username 40 | #[arg(long)] 41 | assign_username: Option, 42 | /// Assign this instance to a user and specify their password 43 | #[arg(long)] 44 | assign_password: Option, 45 | 46 | /// Set log level, eg: "trace", "debug", "info", "warn", "error" etc. 47 | #[arg(long, default_value_t = String::from("error"))] 48 | log_level: String, 49 | } 50 | 51 | #[tokio::main] 52 | async fn main() -> Result<()> { 53 | let args = AgentArgs::parse(); 54 | unsafe { 55 | std::env::set_var("RUST_LOG", args.log_level); 56 | } 57 | tracing_subscriber::fmt::init(); 58 | 59 | let mut scheduler = Scheduler::new( 60 | args.namespace, 61 | args.comet_addr, 62 | args.comet_secret, 63 | args.output_dir, 64 | SshConnectionOption::build(args.ssh_user, args.ssh_password, args.ssh_port), 65 | AssignUserOption::build(args.assign_username, args.assign_password), 66 | ); 67 | 68 | if let Err(e) = scheduler.connect_comet().await { 69 | error!("failed connect to comet - {e}"); 70 | } 71 | 72 | scheduler.run().await 73 | } 74 | -------------------------------------------------------------------------------- /src/bin/comet.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use automate::comet::{self, CometOptions}; 3 | use clap::Parser; 4 | 5 | #[derive(Parser, Debug)] 6 | #[command( 7 | author = "iwannay <772648576@qq.com>", 8 | about = "A high-performance, scalable, dynamically configured job scheduler developed with rust", 9 | version 10 | )] 11 | struct CometArgs { 12 | /// if enable debug mode 13 | #[arg(short, long)] 14 | debug: bool, 15 | #[arg(short, long, default_value_t = String::from("0.0.0.0:3000"))] 16 | bind: String, 17 | #[arg(short,default_value_t = String::from("redis://:wang@127.0.0.1"))] 18 | redis_url: String, 19 | #[arg(long, default_value_t = String::from("rYzBYE+cXbtdMg=="))] 20 | secret: String, 21 | 22 | /// Set log level, eg: "trace", "debug", "info", "warn", "error" etc. 23 | #[arg(long, default_value_t = String::from("error"))] 24 | log_level: String, 25 | } 26 | 27 | #[tokio::main] 28 | async fn main() -> Result<()> { 29 | let args = CometArgs::parse(); 30 | unsafe { 31 | std::env::set_var("RUST_LOG", args.log_level); 32 | } 33 | 34 | tracing_subscriber::fmt::init(); 35 | 36 | comet::run( 37 | CometOptions { 38 | redis_url: args.redis_url, 39 | bind_addr: args.bind, 40 | secret: args.secret, 41 | }, 42 | None, 43 | ) 44 | .await 45 | } 46 | -------------------------------------------------------------------------------- /src/bin/console.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use clap::Parser; 3 | use openapi::WebapiOptions; 4 | 5 | /// A high-performance, scalable, dynamically configured job scheduler developed with rust 6 | #[derive(Parser, Debug)] 7 | #[command( 8 | author = "iwannay <772648576@qq.com>", 9 | about = "A high-performance, scalable, dynamically configured job scheduler developed with rust", 10 | version 11 | )] 12 | struct WebapiArgs { 13 | /// if enable debug mode 14 | #[arg(short, long)] 15 | debug: bool, 16 | /// http server listen address, eg: "0.0.0.0:9090" 17 | #[arg(long)] 18 | bind_addr: Option, 19 | 20 | /// Set log level, eg: "trace", "debug", "info", "warn", "error" etc. 21 | #[arg(long, default_value_t = String::from("error"))] 22 | log_level: String, 23 | 24 | /// where to read config file, 25 | /// you can temporarily overwrite the configuration file using command-line parameters 26 | #[arg(long, value_name = "FILE", default_value_t = String::from("~/.jiascheduler/console.toml"))] 27 | config: String, 28 | /// redis connect address, eg: "redis://:wang@127.0.0.1" 29 | /// can be used to override configuration items in the configuration file 30 | #[arg(long)] 31 | redis_url: Option, 32 | /// mysql connect address, eg: "mysql://root:root@localhost:3306/jiascheduler" 33 | /// can be used to override configuration items in the configuration file 34 | #[arg(long)] 35 | database_url: Option, 36 | } 37 | 38 | #[tokio::main] 39 | async fn main() -> Result<()> { 40 | let args = WebapiArgs::parse(); 41 | unsafe { 42 | std::env::set_var("RUST_LOG", args.log_level); 43 | } 44 | 45 | tracing_subscriber::fmt::init(); 46 | 47 | openapi::run( 48 | WebapiOptions { 49 | database_url: args.database_url, 50 | redis_url: args.redis_url, 51 | config_file: args.config, 52 | bind_addr: args.bind_addr, 53 | }, 54 | None, 55 | ) 56 | .await 57 | } 58 | -------------------------------------------------------------------------------- /src/bin/jiascheduler.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use anyhow::Result; 4 | use automate::{ 5 | comet::{self, CometOptions}, 6 | scheduler::{ 7 | Scheduler, 8 | types::{AssignUserOption, SshConnectionOption}, 9 | }, 10 | }; 11 | use clap::Parser; 12 | use openapi::WebapiOptions; 13 | use service::config::Conf; 14 | use tokio::sync::{Mutex, oneshot::channel}; 15 | use tracing::{error, info}; 16 | 17 | /// A high-performance, scalable, dynamically configured job scheduler developed with rust 18 | #[derive(Parser, Debug)] 19 | #[command( 20 | author = "iwannay <772648576@qq.com>", 21 | about = "A high-performance, scalable, dynamically configured job scheduler developed with rust", 22 | version 23 | )] 24 | struct WebapiArgs { 25 | /// if enable debug mode 26 | #[arg(short, long)] 27 | debug: bool, 28 | /// http server listen address, eg: "0.0.0.0:9090" 29 | #[arg(long)] 30 | console_bind_addr: Option, 31 | 32 | /// Set log level, eg: "info", "debug", "warn", "error" etc. 33 | #[arg(long, default_value_t = String::from("error"))] 34 | log_level: String, 35 | 36 | /// Comet server listen address, eg: "0.0.0.0:3000" 37 | #[arg(short, long, default_value_t = String::from("0.0.0.0:3000"))] 38 | comet_bind_addr: String, 39 | 40 | #[arg(short, long, default_value_t = String::from("default"))] 41 | namespace: String, 42 | /// Directory for saving job execution logs 43 | #[arg(long, default_value_t = String::from("./log"))] 44 | output_dir: String, 45 | /// Set the login user of the instance for SSH remote connection 46 | #[arg(long)] 47 | ssh_user: Option, 48 | /// Set the login user's password of the instance for SSH remote connection 49 | #[arg(long)] 50 | ssh_password: Option, 51 | /// Set the port of this instance for SSH remote connection 52 | #[arg(long)] 53 | ssh_port: Option, 54 | 55 | /// Assign this instance to a user and specify their username 56 | #[arg(long)] 57 | assign_username: Option, 58 | /// Assign this instance to a user and specify their password 59 | #[arg(long)] 60 | assign_password: Option, 61 | 62 | /// where to read config file, 63 | /// you can temporarily overwrite the configuration file using command-line parameters 64 | #[arg(long, value_name = "FILE", default_value_t = String::from("~/.jiascheduler/console.toml"))] 65 | config: String, 66 | } 67 | 68 | #[tokio::main] 69 | async fn main() -> Result<()> { 70 | let args = WebapiArgs::parse(); 71 | unsafe { 72 | std::env::set_var("RUST_LOG", args.log_level); 73 | if args.debug { 74 | std::env::set_var("RUST_LOG", "debug"); 75 | } 76 | } 77 | 78 | tracing_subscriber::fmt::init(); 79 | 80 | let (console_tx, console_rx) = channel::(); 81 | let (comet_tx, comet_rx) = channel::<()>(); 82 | 83 | let console_conf: Arc>> = Arc::new(Mutex::new(None)); 84 | let console_conf_clone = console_conf.clone(); 85 | let comet_bind_addr = args.comet_bind_addr.clone(); 86 | 87 | tokio::spawn(async move { 88 | let conf = console_rx.await.unwrap(); 89 | console_conf_clone.lock().await.replace(conf.clone()); 90 | info!("starting comet"); 91 | comet::run( 92 | CometOptions { 93 | redis_url: conf.redis_url, 94 | bind_addr: comet_bind_addr.clone(), 95 | secret: conf.comet_secret, 96 | }, 97 | Some(comet_tx), 98 | ) 99 | .await 100 | .expect("failed to start comet server"); 101 | }); 102 | 103 | tokio::spawn(async move { 104 | comet_rx 105 | .await 106 | .expect("failed to receive comet server signal"); 107 | let binding = console_conf.lock().await; 108 | let conf = binding.as_ref().unwrap(); 109 | let mut scheduler = Scheduler::new( 110 | args.namespace, 111 | vec![format!("ws://{}", args.comet_bind_addr)], 112 | conf.comet_secret.to_string(), 113 | args.output_dir, 114 | SshConnectionOption::build(args.ssh_user, args.ssh_password, args.ssh_port), 115 | AssignUserOption::build(args.assign_username, args.assign_password), 116 | ); 117 | info!("starting agent"); 118 | if let Err(e) = scheduler.connect_comet().await { 119 | error!("failed connect to comet - {e}"); 120 | } 121 | 122 | scheduler.run().await.expect("failed to start scheduler"); 123 | }); 124 | 125 | openapi::run( 126 | WebapiOptions { 127 | database_url: None, 128 | redis_url: None, 129 | config_file: args.config, 130 | bind_addr: args.console_bind_addr, 131 | }, 132 | Some(console_tx), 133 | ) 134 | .await 135 | } 136 | --------------------------------------------------------------------------------