├── src
├── UI.hs
└── OddJobs
│ ├── Job
│ └── Query.hs
│ ├── Migrations.hs
│ ├── Endpoints.hs
│ ├── Cli.hs
│ ├── ConfigBuilder.hs
│ ├── Types.hs
│ ├── Web.hs
│ └── Job.hs
├── Setup.hs
├── .gitignore
├── assets
├── odd-jobs-color-logo.png
├── css
│ ├── logo-slider.css
│ ├── styles.css
│ └── Navigation-Clean1.css
├── js
│ ├── custom.js
│ └── logo-slider.js
└── bootstrap
│ └── css
│ ├── bootstrap-reboot.min.css
│ └── bootstrap-reboot.css
├── test
└── CliParser.hs
├── .gitattributes
├── .hlint.yaml
├── shell.nix
├── default.nix
├── run-ghcid.sh
├── .github
└── workflows
│ ├── hlint.yml
│ ├── nix.yaml
│ └── ci.yml
├── stack.yaml
├── stack-lts-18.28.yaml
├── CHANGELOG.md
├── flake.lock
├── flake.nix
├── LICENSE
├── README.md
├── CONTRIBUTING.md
├── dev
└── DevelMain.hs
├── package.yaml
├── stack-lts-18.28.yaml.lock
├── examples
└── OddJobsCliExample.lhs
└── odd-jobs.cabal
/src/UI.hs:
--------------------------------------------------------------------------------
1 | module UI where
2 |
3 |
--------------------------------------------------------------------------------
/Setup.hs:
--------------------------------------------------------------------------------
1 | import Distribution.Simple
2 | main = defaultMain
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | stack.yaml.lock
2 | test.log
3 | TAGS
4 | .stack-work/
5 | dist-newstyle/
--------------------------------------------------------------------------------
/assets/odd-jobs-color-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/saurabhnanda/odd-jobs/HEAD/assets/odd-jobs-color-logo.png
--------------------------------------------------------------------------------
/test/CliParser.hs:
--------------------------------------------------------------------------------
1 | module CliParser where
2 |
3 | import OddJobs.Cli
4 |
5 | main :: IO ()
6 | main = undefined
7 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | assets/bootstrap/** linguist-vendored
2 | js/jquery.min.js linguist-vendored
3 | css/Navigation-Clean1.css linguist-vendored
4 |
--------------------------------------------------------------------------------
/.hlint.yaml:
--------------------------------------------------------------------------------
1 | # Warnings currently triggered by your code
2 | - ignore: {name: "Use newtype instead of data"} # 1 hint ignored because newtypes can't have strictness annotations.
3 |
--------------------------------------------------------------------------------
/assets/css/logo-slider.css:
--------------------------------------------------------------------------------
1 | .text-center.text-h1 {
2 | font-size:6vw;
3 | margin-bottom:2vw;
4 | }
5 |
6 | .customer-logos {
7 | background:#ededed;
8 | }
9 |
10 | .margin-bot {
11 | margin-bottom:-8em;
12 | }
13 |
14 |
--------------------------------------------------------------------------------
/assets/js/custom.js:
--------------------------------------------------------------------------------
1 | function toggleError(e) {
2 | var $e = $(e).closest('.job-error');
3 | $e.toggleClass('expanded').toggleClass('collapsed');
4 | $e.find('.badge.error-expand').toggleClass('d-none');
5 | $e.find('.badge.error-collapse').toggleClass('d-none');
6 | return false;
7 | }
8 |
--------------------------------------------------------------------------------
/shell.nix:
--------------------------------------------------------------------------------
1 | (import
2 | (
3 | let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in
4 | fetchTarball {
5 | url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
6 | sha256 = lock.nodes.flake-compat.locked.narHash;
7 | }
8 | )
9 | { src = ./.; }
10 | ).shellNix
11 |
--------------------------------------------------------------------------------
/default.nix:
--------------------------------------------------------------------------------
1 | (import
2 | (
3 | let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in
4 | fetchTarball {
5 | url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
6 | sha256 = lock.nodes.flake-compat.locked.narHash;
7 | }
8 | )
9 | { src = ./.; }
10 | ).defaultNix
11 |
--------------------------------------------------------------------------------
/run-ghcid.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | sudo sysctl -w kern.maxfiles=20480
4 | sudo sysctl -w kern.maxfilesperproc=30000
5 | ulimit -n 20000
6 |
7 | exec ghcid \
8 | --test 'DevelMain.update' -W --color=always \
9 | -c 'stack ghci odd-jobs:exe:devel' \
10 | --reverse-errors --no-height-limit \
11 | --restart stack.yaml \
12 | --restart package.yaml \
13 | --allow-eval \
14 | --clear
15 |
--------------------------------------------------------------------------------
/assets/js/logo-slider.js:
--------------------------------------------------------------------------------
1 | $(document).ready(function(){
2 | $('.customer-logos').slick({
3 | slidesToShow: 4,
4 | slidesToScroll: 1,
5 | autoplay: true,
6 | autoplaySpeed: 1000,
7 | arrows: false,
8 | dots: false,
9 | pauseOnHover: true,
10 | responsive: [{
11 | breakpoint: 768,
12 | settings: {
13 | slidesToShow: 3
14 | }
15 | }, {
16 | breakpoint: 520,
17 | settings: {
18 | slidesToShow: 2
19 | }
20 | }]
21 | });
22 | });
--------------------------------------------------------------------------------
/.github/workflows/hlint.yml:
--------------------------------------------------------------------------------
1 | name: hlint
2 |
3 | on:
4 | pull_request:
5 | push:
6 |
7 | jobs:
8 | build:
9 | name: hlint-3.5
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - uses: actions/checkout@v3
14 | with:
15 | submodules: true
16 |
17 | - uses: haskell/actions/hlint-setup@v2
18 | name: Set up HLint
19 | with:
20 | version: "3.5"
21 |
22 | - uses: haskell/actions/hlint-run@v2
23 | name: hlint
24 | with:
25 | path: ./
26 | fail-on: suggestion
--------------------------------------------------------------------------------
/stack.yaml:
--------------------------------------------------------------------------------
1 | resolver: lts-21.14
2 |
3 | packages:
4 | - .
5 | extra-deps:
6 | - aeson-2.2.0.0
7 | - timing-convenience-0.1@sha256:7ff807a9a9e5596f2b18d45c5a01aefb91d4a98f6a1008d183b5c550f68f7cb7,2092
8 | - resource-pool-0.4.0.0@sha256:9c1e448a159875e21a7e68697feee2b61a4e584720974fa465a2fa1bc0776c73,1342
9 | - integer-conversion-0.1@sha256:9f77cc7711d3100a4483f2dd1a22f4be5b59d235a556d910d0e6c5e90a967551,2208
10 | - text-iso8601-0.1@sha256:fc10d8de72fc094d0d299644f17421b9430d1c1092a1355c7f0c02d8b6edf6a7,2371
11 | - th-abstraction-0.5.0.0@sha256:4351cadf6bf0ca80c90225bae0b12f4cfdd550f30c333b050186af5a8bc19457,2236
12 | - servant-0.20.1
13 | - servant-server-0.20
14 | - postgresql-simple-0.6.5.1
15 |
16 | allow-newer: true
17 |
--------------------------------------------------------------------------------
/stack-lts-18.28.yaml:
--------------------------------------------------------------------------------
1 | resolver: lts-18.28
2 |
3 | packages:
4 | - .
5 | extra-deps:
6 | - aeson-2.2.0.0
7 | - timing-convenience-0.1@sha256:7ff807a9a9e5596f2b18d45c5a01aefb91d4a98f6a1008d183b5c550f68f7cb7,2092
8 | - resource-pool-0.4.0.0@sha256:9c1e448a159875e21a7e68697feee2b61a4e584720974fa465a2fa1bc0776c73,1342
9 | - integer-conversion-0.1@sha256:9f77cc7711d3100a4483f2dd1a22f4be5b59d235a556d910d0e6c5e90a967551,2208
10 | - text-iso8601-0.1@sha256:fc10d8de72fc094d0d299644f17421b9430d1c1092a1355c7f0c02d8b6edf6a7,2371
11 | - th-abstraction-0.5.0.0@sha256:4351cadf6bf0ca80c90225bae0b12f4cfdd550f30c333b050186af5a8bc19457,2236
12 | - servant-0.20.1
13 | - servant-server-0.20
14 | - postgresql-simple-0.6.5.1
15 | - generically-0.1.1
16 | - servant-lucid-0.9.0.6
17 | - servant-static-th-1.0.0.0
18 | - semialign-1.3
19 | - OneTuple-0.3.1
20 |
21 | allow-newer: true
22 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | ## 0.2.3
4 | + Upgrade resource pool
5 | + Add ability to kill jobs
6 |
7 | ## 0.2.2
8 |
9 | - Bugfix in the `deleteJobQuery` function
10 | - committed the cabal file to the repo
11 |
12 | ## 0.2.1
13 |
14 | - build fixes
15 |
16 | ## 0.2.0
17 |
18 | (broken build)
19 |
20 | - Completed the web UI
21 | - Added default functions for structured logging in JSON
22 | - Introduced OddJobs.ConfigBuilder
23 | - Introduced JobErrHandler machinery and changed the way onJobFailure works
24 | - Made default job-timeout configurable via cfgDefaultJobTimeout
25 | - Delete jobs when they are successful
26 |
27 | ## 0.1.0
28 |
29 | - Initial release
30 | - LISTEN/NOTIFY for immediate execution of jobs
31 | - Polling for execution of jobs in the future, and for retrying failed jobs.
32 | - Utilities for rapidly building CLIs that can be deployed as background daemons
33 | - Structured logging
34 | - Graceful shutdown
35 | - Basic concurrency control
36 | - Incomplete web UI
37 | - First cut of Haddock docs
38 | - Simple CLI example
39 |
--------------------------------------------------------------------------------
/.github/workflows/nix.yaml:
--------------------------------------------------------------------------------
1 | name: "Nix"
2 | on:
3 | pull_request:
4 | push:
5 | jobs:
6 | tests:
7 | runs-on: ubuntu-latest
8 | steps:
9 | - uses: actions/checkout@v3
10 | - name: Local cache
11 | uses: actions/cache@v3
12 | with:
13 | path: /nix/store
14 | key: "${{ runner.os }}-nix-cache"
15 | - uses: cachix/install-nix-action@v20
16 | with:
17 | nix_path: nixpkgs=channel:nixos-unstable
18 | extra_nix_config: |
19 | access-tokens = github.com=${{ secrets.GITHUB_TOKEN }}
20 |
21 | # follow guide: https://nix.dev/tutorials/continuous-integration-github-actions
22 | # this uses the tokens which are revokable
23 | - uses: cachix/cachix-action@v12
24 | with:
25 | name: jappie
26 | # If you chose API tokens for write access OR if you have a private cache
27 | # authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
28 | signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
29 | - run: nix build
30 | - run: nix flake check
31 | - run: nix develop -c echo OK
32 | # legacy actions
33 | - run: nix-build
34 | - run: nix-shell --run "echo OK"
35 |
--------------------------------------------------------------------------------
/flake.lock:
--------------------------------------------------------------------------------
1 | {
2 | "nodes": {
3 | "flake-compat": {
4 | "flake": false,
5 | "locked": {
6 | "lastModified": 1673956053,
7 | "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
8 | "owner": "edolstra",
9 | "repo": "flake-compat",
10 | "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
11 | "type": "github"
12 | },
13 | "original": {
14 | "owner": "edolstra",
15 | "repo": "flake-compat",
16 | "type": "github"
17 | }
18 | },
19 | "nixpkgs": {
20 | "locked": {
21 | "lastModified": 1688556768,
22 | "narHash": "sha256-mhd6g0iJGjEfOr3+6mZZOclUveeNr64OwxdbNtLc8mY=",
23 | "owner": "nixos",
24 | "repo": "nixpkgs",
25 | "rev": "27bd67e55fe09f9d68c77ff151c3e44c4f81f7de",
26 | "type": "github"
27 | },
28 | "original": {
29 | "owner": "nixos",
30 | "ref": "nixpkgs-unstable",
31 | "repo": "nixpkgs",
32 | "type": "github"
33 | }
34 | },
35 | "root": {
36 | "inputs": {
37 | "flake-compat": "flake-compat",
38 | "nixpkgs": "nixpkgs"
39 | }
40 | }
41 | },
42 | "root": "root",
43 | "version": 7
44 | }
45 |
--------------------------------------------------------------------------------
/flake.nix:
--------------------------------------------------------------------------------
1 | # I used chatgpt to generate this template and then just
2 | # modified to how I normally use these things.
3 | {
4 | description = "odd jobs";
5 |
6 | inputs = {
7 | nixpkgs.url = "github:nixos/nixpkgs/nixpkgs-unstable";
8 | flake-compat = {
9 | url = "github:edolstra/flake-compat";
10 | flake = false;
11 | };
12 | };
13 |
14 | outputs = { self, nixpkgs, flake-compat }:
15 | let
16 | pkgs = nixpkgs.legacyPackages.x86_64-linux;
17 | hpkgs = pkgs.haskellPackages.override {
18 | overrides = hnew: hold: {
19 | odd-jobs = pkgs.haskell.lib.dontCheck # TOOD need to figure out how to make it call nixosTest end setup a postgres
20 | (hnew.callCabal2nix "odd-jobs" ./. { });
21 | servant-static-th = pkgs.haskell.lib.dontCheck (pkgs.haskell.lib.markUnbroken hold.servant-static-th);
22 | resource-pool = hnew.callHackageDirect {
23 | pkg = "resource-pool";
24 | ver = "0.4.0.0";
25 | sha256 = "sha256-X3VI1LnkyB28ZumRzOFUNG1UIJiW1UH63ZW/CPqPln4=";
26 | } {};
27 | };
28 | };
29 | in
30 | {
31 | defaultPackage.x86_64-linux = hpkgs.odd-jobs;
32 | inherit pkgs;
33 | devShell.x86_64-linux = hpkgs.shellFor {
34 | packages = ps : [ ps."odd-jobs" ];
35 | withHoogle = true;
36 |
37 | buildInputs = [
38 | hpkgs.haskell-language-server
39 | pkgs.ghcid
40 | pkgs.cabal-install
41 | ];
42 | };
43 | };
44 | }
45 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright Author name here (c) 2019
2 |
3 | All rights reserved.
4 |
5 | Redistribution and use in source and binary forms, with or without
6 | modification, are permitted provided that the following conditions are met:
7 |
8 | * Redistributions of source code must retain the above copyright
9 | notice, this list of conditions and the following disclaimer.
10 |
11 | * Redistributions in binary form must reproduce the above
12 | copyright notice, this list of conditions and the following
13 | disclaimer in the documentation and/or other materials provided
14 | with the distribution.
15 |
16 | * Neither the name of Author name here nor the names of other
17 | contributors may be used to endorse or promote products derived
18 | from this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - '*'
7 |
8 | jobs:
9 | stack:
10 | name: ${{ matrix.os }} / stack / ghc ${{ matrix.ghc }}
11 | runs-on: ${{ matrix.os }}
12 | env:
13 | ACTIONS_ALLOW_UNSECURE_COMMANDS: true
14 | strategy:
15 | matrix:
16 | os: [ubuntu-latest]
17 | stack: ["2.11.1"]
18 | ghc: ["8.8.4", "8.10.7", "9.2.8", "9.4.5"]
19 |
20 | services:
21 | postgres:
22 | image: postgres
23 | env:
24 | POSTGRES_PASSWORD: jobs_test
25 | POSTGRES_USER: jobs_test
26 | POSTGRES_DB: jobs_test
27 | options: >-
28 | --health-cmd pg_isready
29 | --health-interval 10s
30 | --health-timeout 5s
31 | --health-retries 5
32 | ports:
33 | - 5432:5432
34 |
35 | steps:
36 | - name: confirm that postgres is working
37 | run: |
38 | PGPASSWORD=jobs_test psql -U jobs_test -h localhost -c 'select 1'
39 |
40 | - uses: actions/checkout@v2
41 |
42 | - uses: actions/setup-haskell@v1.1
43 | name: Setup Haskell Stack
44 | with:
45 | enable-stack: true
46 | stack-no-global: true
47 | stack-setup-ghc: true
48 | stack-version: ${{ matrix.stack }}
49 | ghc-version: ${{ matrix.ghc }}
50 |
51 | - uses: actions/cache@v2
52 | name: Cache ~/.stack
53 | with:
54 | path: ~/.stack
55 | key: ${{ matrix.os }}-${{ matrix.ghc }}-stack
56 |
57 | - name: Build
58 | run: |
59 | stack build
60 |
61 | - name: Test
62 | run: |
63 | stack test
64 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://github.com/saurabhnanda/odd-jobs/actions)
2 |
3 | # Introduction
4 |
5 | - [Odd Jobs home page](https://www.haskelltutorials.com/odd-jobs) - contains a description of top-level features of this library
6 | - [Getting started & implementation guide for Odd Jobs](https://www.haskelltutorials.com/odd-jobs/guide.html)
7 | - [Haskell Job Queues: An Ultimate Guide](https://www.haskelltutorials.com/odd-jobs/haskell-job-queues-ultimate-guide.html) - A detailed writeup on why we built Odd Jobs and how it compares against other similar libraries.
8 | - [Start reading Hackage documentation from `OddJobs.Job`](https://hackage.haskell.org/package/odd-jobs-0.2.2/docs/OddJobs-Job.html) (**Note:** Please ensure you're reading docs for the correct version of the library)
9 | - Open an issue on [Odd Jobs Github repo](https://github.com/saurabhnanda/odd-jobs) if you need help, or want to collaborate.
10 | - Please :star: the repository for good karma and :heart:
11 |
12 | # odd-jobs in production?
13 |
14 | If you are already using, or considering using, `odd-jobs` in production, please read [production usage-reports](https://github.com/saurabhnanda/odd-jobs/issues/44). It would be great if you could add your own usage-report to that discussion thread as well. **We need more success stories of Haskell in production!**
15 |
16 | # Contributing
17 |
18 | Please read the [contribution guidelines](./CONTRIBUTING.md)
19 |
20 | ## Development
21 |
22 | ### Prerequisites
23 |
24 | - [The Haskell Tool Stack](https://docs.haskellstack.org/en/stable/README/#how-to-install)
25 | - `libpq-dev` library (required for PostgreSQL dependency)
26 |
27 | ### Build
28 |
29 | ```bash
30 | stack build
31 | ```
32 |
33 |
34 | ### Running tests
35 |
36 | Add the users to postgresql:
37 |
38 |
39 | ```
40 | CREATE USER jobs_test WITH SUPERUSER PASSWORD 'jobs_test';
41 | CREATE DATABASE jobs_test OWNER jobs_test;
42 | ```
43 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contribution Guidelines
2 |
3 | ## Goal for odd-jobs
4 |
5 | Broadly speaking, I want to strike a good balance between features, configurability, and ease-of-use - with the last one getting a little more weightage. For example, I don't want `odd-jobs` to devolve into a mess of complicated types and custom monads, which, _theoretically_, give a lot of compile-time guarantees, but _practically_ make the library nearly-impossible to use!
6 |
7 | I'd like to see odd-jobs become a mature and production-ready job-queue.
8 |
9 | ## How to contribute?
10 |
11 | 1. For small fixes (like obvious typos, obvious ommissions in docs, etc), go ahead and directly open a PR. Please don't worry if the PR is open for a few days - I generally work on things in a batch.
12 | 2. For integrating with other frameworks, like Yesod, Snap, IHP, and more, if you are aware of how to reasonably integrate with the target framework, please open the PR. We will need to decide whether to convert this repo into a mono-repo, so that all such integrations can be released in-step with core package.
13 | 3. For larger features or fixes, it's best to quickly bounce-off the idea on the issue tracker before investing time in writing code.
14 | - I already have a broad roadmap in mind for the project (search for [issues tagged with `roadmap`](https://github.com/saurabhnanda/odd-jobs/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap)). See if the feature/fix you have in mind is already on the roadmap. Participate in the relevant discussion (on the issue tracker) and let your intent to work on that issue be known (to avoid effort duplication).
15 | - If you want to work on something that is not on the roadmap, please raise an issue and start a discussion first. Try sharing the real-life use-case for the feature, and brainstorming possible solutions. Designing something that is powerful, yet simple, is harder than it sounds!
16 |
17 | ## Hacktoberfest 2020
18 |
19 |
20 |
21 | We are participating (for the first time ever) in a Hacktoberfest. I will keep some time aside throughout the month of Ocotber to participate in discussions, and review + merge PRs. If you are interested, please take a look at all the [issues tagged with `hacktoberfest` label](https://github.com/saurabhnanda/odd-jobs/issues?q=is%3Aissue+is%3Aopen+label%3Ahacktoberfest). They seem like they can be tackled within one month of picking them up.
22 |
--------------------------------------------------------------------------------
/assets/css/styles.css:
--------------------------------------------------------------------------------
1 | .currently-running {
2 | /*border-bottom:3px dotted #ddd;*/
3 | /*margin-bottom:1em;*/
4 | /*padding-bottom:1em;*/
5 | }
6 |
7 | .row.job-log:not(:last-child) {
8 | border-bottom:1px solid #eee;
9 | margin-bottom:5px;
10 | padding-bottom:5px;
11 | }
12 |
13 | td.job-type {
14 | /*text-align:right;*/
15 | width:15%;
16 | min-width:9em;
17 | }
18 |
19 | .job-run-time {
20 | font-size:90%;
21 | color:#777;
22 | display:block;
23 | }
24 |
25 | .job-completed-at {
26 | display:block;
27 | font-size:90%;
28 | color:#777;
29 | text-decoration:underline;
30 | text-decoration-style:dotted;
31 | }
32 |
33 | .job-runner-name {
34 | display:block;
35 | font-size:90%;
36 | color:#777;
37 | }
38 |
39 | .label.job-runner {
40 | float:right;
41 | }
42 |
43 | .job .job-payload {
44 | color: #777;
45 | display: inline-block;
46 | }
47 |
48 | .job-payload .key {
49 | color:#8e44ad;
50 | display:inline-block;
51 | margin-right:.2em;
52 | }
53 |
54 | .key-value-pair {
55 | /* display:inline-block; */
56 | margin-right:0.5em;
57 | }
58 |
59 | ul.nav.nav-pills.job-type-nav > li > a {
60 | /*padding:5px 8px;*/
61 | }
62 |
63 | .search-bar .search-filter {
64 | background-color:#f2f2f2;
65 | /*padding:5px 10px;*/
66 | }
67 |
68 | .search-bar .filter-name {
69 | display:inline-block;
70 | padding:5px 5px 5px 8px;
71 | font-weight:bold;
72 | border-right:1px solid #ddd;
73 | }
74 |
75 | .filter-value {
76 | padding:5px 8px 5px 5px;
77 | display:inline-block;
78 | }
79 |
80 | ul.search-bar > li.search-filter {
81 | padding:0px;
82 | margin-right: 0.5em;
83 | }
84 |
85 | .filter-value i.glyphicon {
86 | margin-left:4px;
87 | /*vertical-align:middle;*/
88 | }
89 |
90 | .search-container {
91 | display:flex;
92 | flex-direction:row;
93 | background:white;
94 | }
95 |
96 | .search-container ul.search-bar {
97 | padding:4px;
98 | flex-grow:2;
99 | margin:0px;
100 | border:1px solid #ccc;
101 | border-right-width:0px;
102 | border-radius:3px 0px 0px 3px;
103 | }
104 |
105 | .search-container button.search-button {
106 | border-top-left-radius:0px;
107 | border-bottom-left-radius:0px;
108 | }
109 |
110 | .main-nav {
111 | text-align:center;
112 | }
113 |
114 | .navbar.navbar-default.navigation-clean {
115 | box-shadow:0px 2px 5px rgba(0,0,0, 0.1);
116 | }
117 |
118 | body {
119 | background:linear-gradient(to bottom, #f9f9f9, #ffffff);
120 | }
121 |
122 | .json-ellipsis {
123 | color:#8e44ad;
124 | font-weight:bold;
125 | display:inline-block;
126 | padding:0px 2px;
127 | background:#f2f2f2;
128 | }
129 |
130 |
131 | abbr[title] {
132 | border-bottom: inherit;
133 | }
134 |
135 | .list-group-item.active-nav {
136 | border-left: 3px solid #28a745;
137 | }
138 |
139 | .job .job-type {
140 | font-weight: bold;
141 | display: inline-block;
142 | margin-right: 0.75rem;
143 | }
144 |
145 | .job .job-error.collapsed {
146 | height: 1.5em;
147 | overflow: hidden;
148 | text-overflow: ellipsis;
149 | white-space: no-wrap;
150 | color: #777;
151 | }
152 |
153 |
--------------------------------------------------------------------------------
/src/OddJobs/Job/Query.hs:
--------------------------------------------------------------------------------
1 |
2 | -- | Split out the queries because the multiline strings conflict
3 | -- with CPP.
4 | -- see https://gitlab.haskell.org/ghc/ghc/-/issues/16520
5 | module OddJobs.Job.Query
6 | ( jobPollingSql
7 | , jobPollingWithResourceSql
8 | , killJobPollingSql
9 | , qWithResources
10 | , createJobQuery
11 | , ensureResource
12 | , registerResourceUsage
13 | , concatJobDbColumns
14 | , jobDbColumns
15 | )
16 | where
17 |
18 | import Database.PostgreSQL.Simple(Query)
19 | import Data.String
20 |
21 | -- | Ref: 'jobPoller'
22 | jobPollingSql :: Query
23 | jobPollingSql =
24 | "update ? set status = ?, locked_at = ?, locked_by = ?, attempts=attempts+1 \
25 | \ WHERE id in (select id from ? where (run_at<=? AND ((status in ?) OR (status = ? and locked_at))) \
26 | \ ORDER BY attempts ASC, run_at ASC LIMIT 1 FOR UPDATE) RETURNING id"
27 |
28 | jobPollingWithResourceSql :: Query
29 | jobPollingWithResourceSql =
30 | " UPDATE ? SET status = ?, locked_at = ?, locked_by = ?, attempts = attempts + 1 \
31 | \ WHERE id in (select id from ? where (run_at<=? AND ((status in ?) OR (status = ? and locked_at))) \
32 | \ AND ?(id) \
33 | \ ORDER BY attempts ASC, run_at ASC LIMIT 1) \
34 | \ RETURNING id"
35 |
36 | -- | Ref: 'killJobPoller'
37 | killJobPollingSql :: Query
38 | killJobPollingSql =
39 | "UPDATE ? SET locked_at = NULL, locked_by = NULL \
40 | \ WHERE id IN (SELECT id FROM ? WHERE status = ? AND locked_by = ? AND locked_at <= ? \
41 | \ ORDER BY locked_at ASC LIMIT 1 FOR UPDATE \
42 | \ ) RETURNING id"
43 |
44 | qWithResources :: Query
45 | qWithResources =
46 | "UPDATE ? SET status=?, locked_at=now(), locked_by=?, attempts=attempts+1 \
47 | \ WHERE id=? AND status in ? AND ?(id) RETURNING id"
48 |
49 |
50 | createJobQuery :: Query
51 | createJobQuery = "INSERT INTO ? (run_at, status, payload, last_error, attempts, locked_at, locked_by) VALUES (?, ?, ?, ?, ?, ?, ?) RETURNING " <> concatJobDbColumns
52 |
53 | ensureResource :: Query
54 | ensureResource = "INSERT INTO ? (id, usage_limit) VALUES (?, ?) ON CONFLICT DO NOTHING"
55 |
56 | registerResourceUsage :: Query
57 | registerResourceUsage = "INSERT INTO ? (job_id, resource_id, usage) VALUES (?, ?, ?)"
58 |
59 | -- | All 'jobDbColumns' joined together with commas. Useful for constructing SQL
60 | -- queries, eg:
61 | --
62 | -- @'query_' conn $ "SELECT " <> concatJobDbColumns <> "FROM jobs"@
63 |
64 | concatJobDbColumns :: (IsString s, Semigroup s) => s
65 | concatJobDbColumns = concatJobDbColumns_ jobDbColumns ""
66 | where
67 | concatJobDbColumns_ [] x = x
68 | concatJobDbColumns_ [col] x = x <> col
69 | concatJobDbColumns_ (col:cols) x = concatJobDbColumns_ cols (x <> col <> ", ")
70 |
71 | -- | If you are writing SQL queries where you want to return ALL columns from
72 | -- the jobs table it is __recommended__ that you do not issue a @SELECT *@ or
73 | -- @RETURNIG *@. List out specific DB columns using 'jobDbColumns' and
74 | -- 'concatJobDbColumns' instead. This will insulate you from runtime errors
75 | -- caused by addition of new columns to 'cfgTableName' in future versions of
76 | -- OddJobs.
77 | jobDbColumns :: (IsString s, Semigroup s) => [s]
78 | jobDbColumns =
79 | [ "id"
80 | , "created_at"
81 | , "updated_at"
82 | , "run_at"
83 | , "status"
84 | , "payload"
85 | , "last_error"
86 | , "attempts"
87 | , "locked_at"
88 | , "locked_by"
89 | ]
90 |
--------------------------------------------------------------------------------
/dev/DevelMain.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE OverloadedStrings #-}
2 |
3 | -- | Running your app inside GHCi.
4 | --
5 | -- > stack ghci
6 | --
7 | -- To start your app, run:
8 | --
9 | -- > :l DevelMain
10 | -- > DevelMain.update
11 | --
12 | -- You can also call @DevelMain.shutdown@ to stop the app
13 | --
14 | -- There is more information about this approach,
15 | -- on the wiki: https://github.com/yesodweb/yesod/wiki/ghci
16 |
17 | module DevelMain where
18 |
19 | import Prelude
20 |
21 | import Control.Concurrent (MVar, ThreadId, forkIO, killThread,
22 | newEmptyMVar, putMVar, takeMVar)
23 | import Control.Exception (finally)
24 | import Control.Monad ((>=>))
25 | import Data.IORef (IORef, newIORef, readIORef,
26 | writeIORef)
27 | import Foreign.Store (Store (..), lookupStore, readStore,
28 | storeAction, withStore)
29 | import GHC.Word (Word32)
30 | import Network.Wai.Handler.Warp (defaultSettings, runSettings,
31 | setPort)
32 |
33 | -- import OddJobs.Endpoints (startApp, stopApp)
34 | -- import qualified ElmCodeGen
35 |
36 | -- | Start or restart the server.
37 | -- newStore is from foreign-store.
38 | -- A Store holds onto some data across ghci reloads
39 | update :: IO ()
40 | update = undefined
41 | -- update = do
42 | -- mtidStore <- lookupStore tidStoreNum
43 | -- case mtidStore of
44 | -- -- no server running
45 | -- Nothing -> do
46 | -- done <- storeAction doneStore newEmptyMVar
47 | -- tid <- start done
48 | -- _ <- storeAction (Store tidStoreNum) (newIORef tid)
49 | -- return ()
50 | -- -- server is already running
51 | -- Just tidStore -> restartAppInNewThread tidStore
52 | -- where
53 | -- doneStore :: Store (MVar ())
54 | -- doneStore = Store 0
55 |
56 | -- -- shut the server down with killThread and wait for the done signal
57 | -- restartAppInNewThread :: Store (IORef ThreadId) -> IO ()
58 | -- restartAppInNewThread tidStore = modifyStoredIORef tidStore $ \tid -> do
59 | -- killThread tid
60 | -- withStore doneStore takeMVar
61 | -- readStore doneStore >>= start
62 |
63 |
64 | -- -- | Start the server in a separate thread.
65 | -- start :: MVar () -- ^ Written to when the thread is killed.
66 | -- -> IO ThreadId
67 | -- start done = do
68 | -- -- (port, config, app) <- initialize
69 | -- -- ElmCodeGen.updatea
70 | -- forkIO (finally startApp
71 | -- -- Note that this implies concurrency
72 | -- -- between shutdownApp and the next app that is starting.
73 | -- -- Normally this should be fine
74 | -- (putMVar done () >> stopApp))
75 |
76 | -- | kill the server
77 | shutdown :: IO ()
78 | shutdown = do
79 | mtidStore <- lookupStore tidStoreNum
80 | case mtidStore of
81 | -- no server running
82 | Nothing -> putStrLn "no app running"
83 | Just tidStore -> do
84 | withStore tidStore $ readIORef >=> killThread
85 | putStrLn "App is shutdown"
86 |
87 | tidStoreNum :: Word32
88 | tidStoreNum = 1
89 |
90 | modifyStoredIORef :: Store (IORef a) -> (a -> IO a) -> IO ()
91 | modifyStoredIORef store f = withStore store $ \ref -> do
92 | v <- readIORef ref
93 | f v >>= writeIORef ref
94 |
95 | main :: IO ()
96 | main = update
97 |
--------------------------------------------------------------------------------
/assets/bootstrap/css/bootstrap-reboot.min.css:
--------------------------------------------------------------------------------
1 | /*!
2 | * Bootstrap Reboot v4.4.1 (https://getbootstrap.com/)
3 | * Copyright 2011-2019 The Bootstrap Authors
4 | * Copyright 2011-2019 Twitter, Inc.
5 | * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
6 | * Forked from Normalize.css, licensed MIT (https://github.com/necolas/normalize.css/blob/master/LICENSE.md)
7 | */*,::after,::before{box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:transparent}article,aside,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}body{margin:0;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:left;background-color:#fff}[tabindex="-1"]:focus:not(:focus-visible){outline:0!important}hr{box-sizing:content-box;height:0;overflow:visible}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:.5rem}p{margin-top:0;margin-bottom:1rem}abbr[data-original-title],abbr[title]{text-decoration:underline;-webkit-text-decoration:underline dotted;text-decoration:underline dotted;cursor:help;border-bottom:0;-webkit-text-decoration-skip-ink:none;text-decoration-skip-ink:none}address{margin-bottom:1rem;font-style:normal;line-height:inherit}dl,ol,ul{margin-top:0;margin-bottom:1rem}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}b,strong{font-weight:bolder}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{color:#007bff;text-decoration:none;background-color:transparent}a:hover{color:#0056b3;text-decoration:underline}a:not([href]){color:inherit;text-decoration:none}a:not([href]):hover{color:inherit;text-decoration:none}code,kbd,pre,samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;font-size:1em}pre{margin-top:0;margin-bottom:1rem;overflow:auto}figure{margin:0 0 1rem}img{vertical-align:middle;border-style:none}svg{overflow:hidden;vertical-align:middle}table{border-collapse:collapse}caption{padding-top:.75rem;padding-bottom:.75rem;color:#6c757d;text-align:left;caption-side:bottom}th{text-align:inherit}label{display:inline-block;margin-bottom:.5rem}button{border-radius:0}button:focus{outline:1px dotted;outline:5px auto -webkit-focus-ring-color}button,input,optgroup,select,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,input{overflow:visible}button,select{text-transform:none}select{word-wrap:normal}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]:not(:disabled),[type=reset]:not(:disabled),[type=submit]:not(:disabled),button:not(:disabled){cursor:pointer}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{padding:0;border-style:none}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}input[type=date],input[type=datetime-local],input[type=month],input[type=time]{-webkit-appearance:listbox}textarea{overflow:auto;resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;max-width:100%;padding:0;margin-bottom:.5rem;font-size:1.5rem;line-height:inherit;color:inherit;white-space:normal}progress{vertical-align:baseline}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{outline-offset:-2px;-webkit-appearance:none}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}summary{display:list-item;cursor:pointer}template{display:none}[hidden]{display:none!important}
8 | /*# sourceMappingURL=bootstrap-reboot.min.css.map */
--------------------------------------------------------------------------------
/package.yaml:
--------------------------------------------------------------------------------
1 | name: odd-jobs
2 | version: 0.2.2
3 | synopsis: A full-featured PostgreSQL-backed job queue (with an admin UI)
4 | description: |
5 | - Background jobs library for Haskell.
6 | - Extracted from production code at [Vacation Labs](https://www.vacationlabs.com).
7 | - Inspired by the Delayed Jobs library in Rails.
8 | - Queue backend is a Postgres table.
9 | - Queue is monitored using, both, LISTEN/NOTIFY and polling the DB.
10 | - Comes with an in-built admin UI.
11 | - Comes with utilities to help you built a CLI for your job-queue.
12 | - Highly configurable and monitorable
13 | homepage: https://www.haskelltutorials.com/odd-jobs
14 | bug-reports: https://github.com/saurabhnanda/odd-jobs/issues
15 | license: BSD3
16 | author: Saurabh Nanda
17 | maintainer: saurabhnanda@gmail.com
18 | copyright: 2016-2020 Saurabh Nanda
19 | category: Web
20 | extra-source-files:
21 | - README.md
22 | - CHANGELOG.md
23 | - assets/bootstrap/css/bootstrap-grid.css
24 | - assets/bootstrap/css/bootstrap-grid.min.css
25 | - assets/bootstrap/css/bootstrap-reboot.css
26 | - assets/bootstrap/css/bootstrap-reboot.min.css
27 | - assets/bootstrap/css/bootstrap.css
28 | - assets/bootstrap/css/bootstrap.min.css
29 | - assets/bootstrap/js/bootstrap.bundle.js
30 | - assets/bootstrap/js/bootstrap.bundle.min.js
31 | - assets/bootstrap/js/bootstrap.js
32 | - assets/bootstrap/js/bootstrap.min.js
33 | - assets/css/Navigation-Clean1.css
34 | - assets/css/logo-slider.css
35 | - assets/css/styles.css
36 | - assets/js/custom.js
37 | - assets/js/jquery.min.js
38 | - assets/js/logo-slider.js
39 | - assets/odd-jobs-color-logo.png
40 |
41 |
42 | ghc-options:
43 | - -Wall
44 | - -fno-warn-orphans
45 | - -fno-warn-unused-imports
46 | - -fno-warn-dodgy-exports
47 | - -Werror=missing-fields
48 | - -Werror=incomplete-patterns
49 |
50 | default-extensions:
51 | - NamedFieldPuns
52 | - LambdaCase
53 | - TemplateHaskell
54 | - ScopedTypeVariables
55 | - GeneralizedNewtypeDeriving
56 | - QuasiQuotes
57 | - OverloadedStrings
58 |
59 | dependencies:
60 | - base >= 4.7 && < 5
61 | - async ==2.2.4
62 | - hdaemonize
63 | - text
64 | - postgresql-simple
65 | - bytestring
66 | - safe
67 | - text-conversions
68 | - aeson
69 | - resource-pool
70 | - unliftio
71 | - unliftio-core
72 | - mtl
73 | - monad-logger
74 | - fast-logger
75 | - either
76 | - time
77 | - unix
78 | - monad-control
79 | - string-conv
80 | - hostname
81 | - lucid
82 | - timing-convenience
83 | - friendly-time
84 | - servant
85 | - servant-server
86 | - servant-lucid
87 | - warp
88 | - containers
89 | - unordered-containers
90 | - optparse-applicative
91 | - filepath
92 | - directory
93 | - generic-deriving
94 | - servant-static-th
95 | - wai
96 |
97 | library:
98 | source-dirs:
99 | - src
100 | exposed-modules:
101 | - OddJobs.Job
102 | - OddJobs.Migrations
103 | - OddJobs.Web
104 | - OddJobs.Endpoints
105 | - OddJobs.Cli
106 | - OddJobs.Types
107 | - OddJobs.ConfigBuilder
108 |
109 | executables:
110 | devel:
111 | source-dirs:
112 | - dev
113 | - src
114 | main: DevelMain
115 | ghc-options:
116 | - -threaded
117 | - -with-rtsopts=-N
118 | dependencies:
119 | - foreign-store
120 | - odd-jobs
121 |
122 | odd-jobs-cli-example:
123 | main: OddJobsCliExample.lhs
124 | source-dirs:
125 | - examples
126 | ghc-options:
127 | - -threaded
128 | - -with-rtsopts=-N
129 | - -main-is OddJobsCliExample
130 | dependencies:
131 | - odd-jobs
132 |
133 | tests:
134 | jobrunner:
135 | ghc-options:
136 | - -threaded
137 | - -with-rtsopts=-N
138 | main: Test
139 | source-dirs:
140 | - test
141 | - src
142 | dependencies:
143 | - tasty
144 | - tasty-discover
145 | - hedgehog
146 | - tasty-hedgehog
147 | - tasty-hunit
148 | - random
149 | - monad-control
150 | - mmorph
151 | - lifted-base
152 | - lifted-async
153 |
--------------------------------------------------------------------------------
/assets/css/Navigation-Clean1.css:
--------------------------------------------------------------------------------
1 | .navigation-clean {
2 | background:#fff;
3 | padding-top:10px;
4 | padding-bottom:10px;
5 | color:#333;
6 | border-radius:0;
7 | box-shadow:none;
8 | border:none;
9 | margin-bottom:0;
10 | }
11 |
12 | @media (max-width:767px) {
13 | .navigation-clean {
14 | padding-top:0;
15 | padding-bottom:0;
16 | }
17 | }
18 |
19 | @media (max-width:767px) {
20 | .navigation-clean .navbar-header {
21 | padding-top:10px;
22 | padding-bottom:10px;
23 | }
24 | }
25 |
26 | .navigation-clean .navbar-brand {
27 | font-weight:bold;
28 | color:inherit;
29 | }
30 |
31 | .navigation-clean .navbar-brand:hover {
32 | color:#222;
33 | }
34 |
35 | .navigation-clean.navbar-inverse .navbar-brand:hover {
36 | color:#f0f0f0;
37 | }
38 |
39 | .navigation-clean .navbar-brand img {
40 | height:100%;
41 | display:inline-block;
42 | margin-right:10px;
43 | width:auto;
44 | }
45 |
46 | .navigation-clean .navbar-toggle {
47 | border-color:#ddd;
48 | }
49 |
50 | .navigation-clean .navbar-toggle:hover, .navigation-clean .navbar-toggle:focus {
51 | background:none;
52 | }
53 |
54 | .navigation-clean.navbar-inverse .navbar-toggle {
55 | border-color:#555;
56 | }
57 |
58 | .navigation-clean .navbar-toggle .icon-bar {
59 | background-color:#888;
60 | }
61 |
62 | .navigation-clean.navbar-inverse .navbar-toggle .icon-bar {
63 | background-color:#eee;
64 | }
65 |
66 | .navigation-clean .navbar-collapse, .navigation-clean .navbar-form {
67 | border-top-color:#ddd;
68 | }
69 |
70 | .navigation-clean.navbar-inverse .navbar-collapse, .navigation-clean.navbar-inverse .navbar-form {
71 | border-top-color:#333;
72 | }
73 |
74 | .navigation-clean .navbar-nav > .active > a, .navigation-clean .navbar-nav > .open > a {
75 | background:none;
76 | box-shadow:none;
77 | }
78 |
79 | .navigation-clean.navbar-default .navbar-nav > .active > a, .navigation-clean.navbar-default .navbar-nav > .active > a:focus, .navigation-clean.navbar-default .navbar-nav > .active > a:hover {
80 | color:#8f8f8f;
81 | box-shadow:none;
82 | background:none;
83 | pointer-events:none;
84 | }
85 |
86 | .navigation-clean.navbar .navbar-nav > li > a {
87 | padding-left:18px;
88 | padding-right:18px;
89 | }
90 |
91 | .navigation-clean.navbar-default .navbar-nav > li > a {
92 | color:#465765;
93 | }
94 |
95 | .navigation-clean.navbar-default .navbar-nav > li > a:focus, .navigation-clean.navbar-default .navbar-nav > li > a:hover {
96 | color:#37434d !important;
97 | background-color:transparent;
98 | }
99 |
100 | .navigation-clean .navbar-nav > li > .dropdown-menu {
101 | margin-top:-5px;
102 | box-shadow:0 4px 8px rgba(0,0,0,.1);
103 | background-color:#fff;
104 | border-radius:2px;
105 | }
106 |
107 | @media (max-width:767px) {
108 | .navigation-clean .navbar-nav .open .dropdown-menu {
109 | box-shadow:none;
110 | }
111 | }
112 |
113 | @media (max-width:767px) {
114 | .navigation-clean .navbar-nav .open .dropdown-menu > li > a {
115 | color:#37434d;
116 | padding-top:12px;
117 | padding-bottom:12px;
118 | line-height:1;
119 | }
120 | }
121 |
122 | .navigation-clean .dropdown-menu > li > a:focus, .navigation-clean .dropdown-menu > li > a {
123 | line-height:2;
124 | font-size:14px;
125 | color:#37434d;
126 | }
127 |
128 | .navigation-clean .dropdown-menu > li > a:focus, .navigation-clean .dropdown-menu > li > a:hover {
129 | background:#eee;
130 | color:inherit;
131 | }
132 |
133 | .navigation-clean.navbar-inverse {
134 | background-color:#1f2021;
135 | color:#fff;
136 | }
137 |
138 | .navigation-clean.navbar-inverse .navbar-nav > .active > a, .navigation-clean.navbar-inverse .navbar-nav > .active > a:focus, .navigation-clean.navbar-inverse .navbar-nav > .active > a:hover {
139 | color:#8f8f8f;
140 | box-shadow:none;
141 | background:none;
142 | pointer-events:none;
143 | }
144 |
145 | .navigation-clean.navbar-inverse .navbar-nav > li > a {
146 | color:#dfe8ee;
147 | }
148 |
149 | .navigation-clean.navbar-inverse .navbar-nav > li > a:focus, .navigation-clean.navbar-inverse .navbar-nav > li > a:hover {
150 | color:#fff !important;
151 | background-color:transparent;
152 | }
153 |
154 | .navigation-clean.navbar-inverse .navbar-nav > li > .dropdown-menu {
155 | background-color:#1f2021;
156 | }
157 |
158 | .navigation-clean.navbar-inverse .dropdown-menu > li > a:focus, .navigation-clean.navbar-inverse .dropdown-menu > li > a {
159 | color:#f2f5f8;
160 | }
161 |
162 | .navigation-clean.navbar-inverse .dropdown-menu > li > a:focus, .navigation-clean.navbar-inverse .dropdown-menu > li > a:hover {
163 | background:#363739;
164 | }
165 |
166 | @media (max-width:767px) {
167 | .navigation-clean.navbar-inverse .navbar-nav .open .dropdown-menu > li > a {
168 | color:#fff;
169 | }
170 | }
171 |
172 |
--------------------------------------------------------------------------------
/stack-lts-18.28.yaml.lock:
--------------------------------------------------------------------------------
1 | # This file was autogenerated by Stack.
2 | # You should not edit this file by hand.
3 | # For more information, please see the documentation at:
4 | # https://docs.haskellstack.org/en/stable/lock_files
5 |
6 | packages:
7 | - completed:
8 | hackage: aeson-2.2.0.0@sha256:deeab33f7f2a235c9e9ea76168f27a3669ad51bbdb363e830015352a3b2874ec,6217
9 | pantry-tree:
10 | sha256: 2c65659ff9ab19047728c8cc2f20869ba7ca78a09fe03cecf826f5857f3d4c95
11 | size: 83036
12 | original:
13 | hackage: aeson-2.2.0.0
14 | - completed:
15 | hackage: timing-convenience-0.1@sha256:7ff807a9a9e5596f2b18d45c5a01aefb91d4a98f6a1008d183b5c550f68f7cb7,2092
16 | pantry-tree:
17 | sha256: fbcb00f5b8a4f7b8fd94b034b7f507ffdd7579848de782ce60ad2b661dcb2b42
18 | size: 469
19 | original:
20 | hackage: timing-convenience-0.1@sha256:7ff807a9a9e5596f2b18d45c5a01aefb91d4a98f6a1008d183b5c550f68f7cb7,2092
21 | - completed:
22 | hackage: resource-pool-0.4.0.0@sha256:9c1e448a159875e21a7e68697feee2b61a4e584720974fa465a2fa1bc0776c73,1342
23 | pantry-tree:
24 | sha256: 1008fbb9b6bd9d12925989f2708af612daa3a8ebf3a56763016231461d9e314f
25 | size: 406
26 | original:
27 | hackage: resource-pool-0.4.0.0@sha256:9c1e448a159875e21a7e68697feee2b61a4e584720974fa465a2fa1bc0776c73,1342
28 | - completed:
29 | hackage: integer-conversion-0.1@sha256:9f77cc7711d3100a4483f2dd1a22f4be5b59d235a556d910d0e6c5e90a967551,2208
30 | pantry-tree:
31 | sha256: 17063e15091711b54f9a4a9ccb032f3c2ed68bb4ba097243333469ad8462b748
32 | size: 509
33 | original:
34 | hackage: integer-conversion-0.1@sha256:9f77cc7711d3100a4483f2dd1a22f4be5b59d235a556d910d0e6c5e90a967551,2208
35 | - completed:
36 | hackage: text-iso8601-0.1@sha256:fc10d8de72fc094d0d299644f17421b9430d1c1092a1355c7f0c02d8b6edf6a7,2371
37 | pantry-tree:
38 | sha256: f72aa62f5f4d1155b3d3f35e8227548a27cee83fb5d0742564b5ef26597f1431
39 | size: 427
40 | original:
41 | hackage: text-iso8601-0.1@sha256:fc10d8de72fc094d0d299644f17421b9430d1c1092a1355c7f0c02d8b6edf6a7,2371
42 | - completed:
43 | hackage: th-abstraction-0.5.0.0@sha256:4351cadf6bf0ca80c90225bae0b12f4cfdd550f30c333b050186af5a8bc19457,2236
44 | pantry-tree:
45 | sha256: f1e547789ee75cf1d880c2dd6498254e9a37de052842b573b43c4b4a1ebcfd6d
46 | size: 670
47 | original:
48 | hackage: th-abstraction-0.5.0.0@sha256:4351cadf6bf0ca80c90225bae0b12f4cfdd550f30c333b050186af5a8bc19457,2236
49 | - completed:
50 | hackage: servant-0.20.1@sha256:ec6b11132f3448a0f7daa6b3d0daf2634062017df89855c6b9d8268944c72243,5436
51 | pantry-tree:
52 | sha256: 1d165bc7ebf50280ba39191112eba0368bb9af55b7345bc17318fdcf803efdba
53 | size: 2873
54 | original:
55 | hackage: servant-0.20.1
56 | - completed:
57 | hackage: servant-server-0.20@sha256:1bb179f028b0b2b28222f04bd589f9fbc6f855194e73a7cbc692e097f3b30574,5739
58 | pantry-tree:
59 | sha256: 8ae235fc825018111ecfeb74fe63753010ed4585a1fc391422b8083ce3ba3ae8
60 | size: 2615
61 | original:
62 | hackage: servant-server-0.20
63 | - completed:
64 | hackage: postgresql-simple-0.6.5.1@sha256:527939755637c1ea3fdad2819a7725e63d0a01931678e631549367c9faedc620,5280
65 | pantry-tree:
66 | sha256: b945116ff80da1f0638914d900459a8eb64b675c238d7ab87b81f16cf01d75c1
67 | size: 4197
68 | original:
69 | hackage: postgresql-simple-0.6.5.1
70 | - completed:
71 | hackage: generically-0.1.1@sha256:378ec049bc2853b8011df116647fbd34bb9f00edce9840e4957f98abc097597c,1169
72 | pantry-tree:
73 | sha256: 9f30503d1fe709f3849c5dd8b9751697a8db4d66105d7ba9c3b98bf4e36bb232
74 | size: 233
75 | original:
76 | hackage: generically-0.1.1
77 | - completed:
78 | hackage: servant-lucid-0.9.0.6@sha256:4a557ab3c535f9e267b0f614375aa2d9db070997ac566d5ffde8d8cb47a51376,1804
79 | pantry-tree:
80 | sha256: d1c55089c3d69efb3ebf0ddd913eb2e3582e58d849d78116cc0b417a049b0a9f
81 | size: 392
82 | original:
83 | hackage: servant-lucid-0.9.0.6
84 | - completed:
85 | hackage: servant-static-th-1.0.0.0@sha256:7dc46a426d8c78ee0386729159e8bbc18ff98e0dd700a2c723c87901b204fa6c,3883
86 | pantry-tree:
87 | sha256: 13269fcc1c871ee1548d31010f0278484318bffbd9b8e0c0bfdfab4e27d6c3d5
88 | size: 1493
89 | original:
90 | hackage: servant-static-th-1.0.0.0
91 | - completed:
92 | hackage: semialign-1.3@sha256:7be9ef5ca1d6b052991f68c053aab68b9d1ab3b1938c9557ac84c97937815223,2888
93 | pantry-tree:
94 | sha256: e5daa7e0023dabb1b21a04bf084364b94e45e81b380e950b90f51294a1990b87
95 | size: 537
96 | original:
97 | hackage: semialign-1.3
98 | - completed:
99 | hackage: OneTuple-0.3.1@sha256:fc32cb744477befa450a538ea4975cc523f0a2f1585cb5a36e9936a3d18e9a3c,2276
100 | pantry-tree:
101 | sha256: a45de98364d5a128ffcc07a05f2f3dd0befee311c7e106bc86dc87548288bc6b
102 | size: 506
103 | original:
104 | hackage: OneTuple-0.3.1
105 | snapshots:
106 | - completed:
107 | sha256: 428ec8d5ce932190d3cbe266b9eb3c175cd81e984babf876b64019e2cbe4ea68
108 | size: 590100
109 | url: https://raw.githubusercontent.com/commercialhaskell/stackage-snapshots/master/lts/18/28.yaml
110 | original: lts-18.28
111 |
--------------------------------------------------------------------------------
/src/OddJobs/Migrations.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE RecordWildCards #-}
2 | module OddJobs.Migrations
3 | ( module OddJobs.Migrations
4 | , module OddJobs.Types
5 | )
6 | where
7 |
8 | import Database.PostgreSQL.Simple as PGS
9 | import Database.PostgreSQL.Simple.Types as PGS
10 | import Data.Functor (void)
11 | import OddJobs.Types
12 |
13 | createJobTableQuery :: Query
14 | createJobTableQuery = "CREATE TABLE IF NOT EXISTS ?" <>
15 | "( id serial primary key" <>
16 | ", created_at timestamp with time zone default now() not null" <>
17 | ", updated_at timestamp with time zone default now() not null" <>
18 | ", run_at timestamp with time zone default now() not null" <>
19 | ", status text not null" <>
20 | ", payload jsonb not null" <>
21 | ", last_error jsonb null" <>
22 | ", attempts int not null default 0" <>
23 | ", locked_at timestamp with time zone null" <>
24 | ", locked_by text null" <>
25 | ", constraint incorrect_locking_info CHECK (" <>
26 | "(locked_at is null and locked_by is null and status <> 'locked') or " <>
27 | "(locked_at is not null and locked_by is not null and (status = 'locked' or status = 'cancelled')))" <>
28 | ");" <>
29 | "create index if not exists ? on ?(created_at);" <>
30 | "create index if not exists ? on ?(updated_at);" <>
31 | "create index if not exists ? on ?(locked_at);" <>
32 | "create index if not exists ? on ?(locked_by);" <>
33 | "create index if not exists ? on ?(status);" <>
34 | "create index if not exists ? on ?(run_at);"
35 |
36 | createNotificationTrigger :: Query
37 | createNotificationTrigger = "create or replace function ?() returns trigger as $$" <>
38 | "begin \n" <>
39 | " perform pg_notify('?', \n" <>
40 | " json_build_object('id', new.id, 'run_at', new.run_at, 'locked_at', new.locked_at)::text); \n" <>
41 | " return new; \n" <>
42 | "end; \n" <>
43 | "$$ language plpgsql;" <>
44 | "drop trigger if exists ? on ?;" <>
45 | "create trigger ? after insert on ? for each row execute procedure ?();"
46 |
47 | createJobTable :: Connection -> TableName -> IO ()
48 | createJobTable conn tname = void $ do
49 | let tnameTxt = getTnameTxt tname
50 | _ <- PGS.execute conn createJobTableQuery
51 | ( tname
52 | , PGS.Identifier $ "idx_" <> tnameTxt <> "_created_at"
53 | , tname
54 | , PGS.Identifier $ "idx_" <> tnameTxt <> "_updated_at"
55 | , tname
56 | , PGS.Identifier $ "idx_" <> tnameTxt <> "_locked_at"
57 | , tname
58 | , PGS.Identifier $ "idx_" <> tnameTxt <> "_locked_by"
59 | , tname
60 | , PGS.Identifier $ "idx_" <> tnameTxt <> "_status"
61 | , tname
62 | , PGS.Identifier $ "idx_" <> tnameTxt <> "_run_at"
63 | , tname
64 | )
65 | PGS.execute conn createNotificationTrigger
66 | ( fnName
67 | , pgEventName tname
68 | , trgName
69 | , tname
70 | , trgName
71 | , tname
72 | , fnName
73 | )
74 | where
75 | fnName = PGS.Identifier $ "notify_job_monitor_for_" <> getTnameTxt tname
76 | trgName = PGS.Identifier $ "trg_notify_job_monitor_for_" <> getTnameTxt tname
77 | getTnameTxt (PGS.QualifiedIdentifier _ tname') = tname'
78 |
79 | createResourceTableQuery :: Query
80 | createResourceTableQuery = "CREATE TABLE IF NOT EXISTS ?" <>
81 | "( id text primary key" <>
82 | ", usage_limit int not null" <>
83 | ")";
84 |
85 | createUsageTableQuery :: Query
86 | createUsageTableQuery = "CREATE TABLE IF NOT EXISTS ?" <>
87 | "( job_id serial not null REFERENCES ? ON DELETE CASCADE" <>
88 | ", resource_id text not null REFERENCES ? ON DELETE CASCADE" <>
89 | ", usage int not null" <>
90 | ", PRIMARY KEY (job_id, resource_id)" <>
91 | ");"
92 |
93 | createUsageFunction :: Query
94 | createUsageFunction = "CREATE OR REPLACE FUNCTION ?(resourceId text) RETURNS int as $$" <>
95 | " SELECT coalesce(sum(usage), 0) FROM ? AS j INNER JOIN ? AS jr ON j.id = jr.job_id " <>
96 | " WHERE jr.resource_id = $1 AND j.status = ? " <>
97 | " $$ LANGUAGE SQL;"
98 |
99 | createCheckResourceFunction :: Query
100 | createCheckResourceFunction = "CREATE OR REPLACE FUNCTION ?(jobId int) RETURNS bool as $$" <>
101 | " SELECT coalesce(bool_and(?(resource.id) + job_resource.usage <= resource.usage_limit), true) FROM " <>
102 | " ? AS job_resource INNER JOIN ? AS resource ON job_resource.resource_id = resource.id " <>
103 | " WHERE job_resource.job_id = $1" <>
104 | " $$ LANGUAGE SQL;"
105 |
106 | createResourceTables
107 | :: Connection
108 | -> TableName -- ^ Name of the jobs table
109 | -> ResourceCfg
110 | -> IO ()
111 | createResourceTables conn jobTableName ResourceCfg{..} = do
112 | void $ PGS.execute conn createResourceTableQuery (PGS.Only resCfgResourceTable)
113 | void $ PGS.execute conn createUsageTableQuery
114 | ( resCfgUsageTable
115 | , jobTableName
116 | , resCfgResourceTable
117 | )
118 | void $ PGS.execute conn createUsageFunction
119 | ( usageFnName
120 | , jobTableName
121 | , resCfgUsageTable
122 | , Locked
123 | )
124 | void $ PGS.execute conn createCheckResourceFunction
125 | ( resCfgCheckResourceFunction
126 | , usageFnName
127 | , resCfgUsageTable
128 | , resCfgResourceTable
129 | )
130 | where
131 | usageFnName = PGS.Identifier $ "calculate_usage_for_resource_from_" <> getTnameTxt resCfgUsageTable
132 | getTnameTxt (PGS.QualifiedIdentifier _ tname') = tname'
133 |
--------------------------------------------------------------------------------
/assets/bootstrap/css/bootstrap-reboot.css:
--------------------------------------------------------------------------------
1 | /*!
2 | * Bootstrap Reboot v4.4.1 (https://getbootstrap.com/)
3 | * Copyright 2011-2019 The Bootstrap Authors
4 | * Copyright 2011-2019 Twitter, Inc.
5 | * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
6 | * Forked from Normalize.css, licensed MIT (https://github.com/necolas/normalize.css/blob/master/LICENSE.md)
7 | */
8 | *,
9 | *::before,
10 | *::after {
11 | box-sizing: border-box;
12 | }
13 |
14 | html {
15 | font-family: sans-serif;
16 | line-height: 1.15;
17 | -webkit-text-size-adjust: 100%;
18 | -webkit-tap-highlight-color: rgba(0, 0, 0, 0);
19 | }
20 |
21 | article, aside, figcaption, figure, footer, header, hgroup, main, nav, section {
22 | display: block;
23 | }
24 |
25 | body {
26 | margin: 0;
27 | font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
28 | font-size: 1rem;
29 | font-weight: 400;
30 | line-height: 1.5;
31 | color: #212529;
32 | text-align: left;
33 | background-color: #fff;
34 | }
35 |
36 | [tabindex="-1"]:focus:not(:focus-visible) {
37 | outline: 0 !important;
38 | }
39 |
40 | hr {
41 | box-sizing: content-box;
42 | height: 0;
43 | overflow: visible;
44 | }
45 |
46 | h1, h2, h3, h4, h5, h6 {
47 | margin-top: 0;
48 | margin-bottom: 0.5rem;
49 | }
50 |
51 | p {
52 | margin-top: 0;
53 | margin-bottom: 1rem;
54 | }
55 |
56 | abbr[title],
57 | abbr[data-original-title] {
58 | text-decoration: underline;
59 | -webkit-text-decoration: underline dotted;
60 | text-decoration: underline dotted;
61 | cursor: help;
62 | border-bottom: 0;
63 | -webkit-text-decoration-skip-ink: none;
64 | text-decoration-skip-ink: none;
65 | }
66 |
67 | address {
68 | margin-bottom: 1rem;
69 | font-style: normal;
70 | line-height: inherit;
71 | }
72 |
73 | ol,
74 | ul,
75 | dl {
76 | margin-top: 0;
77 | margin-bottom: 1rem;
78 | }
79 |
80 | ol ol,
81 | ul ul,
82 | ol ul,
83 | ul ol {
84 | margin-bottom: 0;
85 | }
86 |
87 | dt {
88 | font-weight: 700;
89 | }
90 |
91 | dd {
92 | margin-bottom: .5rem;
93 | margin-left: 0;
94 | }
95 |
96 | blockquote {
97 | margin: 0 0 1rem;
98 | }
99 |
100 | b,
101 | strong {
102 | font-weight: bolder;
103 | }
104 |
105 | small {
106 | font-size: 80%;
107 | }
108 |
109 | sub,
110 | sup {
111 | position: relative;
112 | font-size: 75%;
113 | line-height: 0;
114 | vertical-align: baseline;
115 | }
116 |
117 | sub {
118 | bottom: -.25em;
119 | }
120 |
121 | sup {
122 | top: -.5em;
123 | }
124 |
125 | a {
126 | color: #007bff;
127 | text-decoration: none;
128 | background-color: transparent;
129 | }
130 |
131 | a:hover {
132 | color: #0056b3;
133 | text-decoration: underline;
134 | }
135 |
136 | a:not([href]) {
137 | color: inherit;
138 | text-decoration: none;
139 | }
140 |
141 | a:not([href]):hover {
142 | color: inherit;
143 | text-decoration: none;
144 | }
145 |
146 | pre,
147 | code,
148 | kbd,
149 | samp {
150 | font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
151 | font-size: 1em;
152 | }
153 |
154 | pre {
155 | margin-top: 0;
156 | margin-bottom: 1rem;
157 | overflow: auto;
158 | }
159 |
160 | figure {
161 | margin: 0 0 1rem;
162 | }
163 |
164 | img {
165 | vertical-align: middle;
166 | border-style: none;
167 | }
168 |
169 | svg {
170 | overflow: hidden;
171 | vertical-align: middle;
172 | }
173 |
174 | table {
175 | border-collapse: collapse;
176 | }
177 |
178 | caption {
179 | padding-top: 0.75rem;
180 | padding-bottom: 0.75rem;
181 | color: #6c757d;
182 | text-align: left;
183 | caption-side: bottom;
184 | }
185 |
186 | th {
187 | text-align: inherit;
188 | }
189 |
190 | label {
191 | display: inline-block;
192 | margin-bottom: 0.5rem;
193 | }
194 |
195 | button {
196 | border-radius: 0;
197 | }
198 |
199 | button:focus {
200 | outline: 1px dotted;
201 | outline: 5px auto -webkit-focus-ring-color;
202 | }
203 |
204 | input,
205 | button,
206 | select,
207 | optgroup,
208 | textarea {
209 | margin: 0;
210 | font-family: inherit;
211 | font-size: inherit;
212 | line-height: inherit;
213 | }
214 |
215 | button,
216 | input {
217 | overflow: visible;
218 | }
219 |
220 | button,
221 | select {
222 | text-transform: none;
223 | }
224 |
225 | select {
226 | word-wrap: normal;
227 | }
228 |
229 | button,
230 | [type="button"],
231 | [type="reset"],
232 | [type="submit"] {
233 | -webkit-appearance: button;
234 | }
235 |
236 | button:not(:disabled),
237 | [type="button"]:not(:disabled),
238 | [type="reset"]:not(:disabled),
239 | [type="submit"]:not(:disabled) {
240 | cursor: pointer;
241 | }
242 |
243 | button::-moz-focus-inner,
244 | [type="button"]::-moz-focus-inner,
245 | [type="reset"]::-moz-focus-inner,
246 | [type="submit"]::-moz-focus-inner {
247 | padding: 0;
248 | border-style: none;
249 | }
250 |
251 | input[type="radio"],
252 | input[type="checkbox"] {
253 | box-sizing: border-box;
254 | padding: 0;
255 | }
256 |
257 | input[type="date"],
258 | input[type="time"],
259 | input[type="datetime-local"],
260 | input[type="month"] {
261 | -webkit-appearance: listbox;
262 | }
263 |
264 | textarea {
265 | overflow: auto;
266 | resize: vertical;
267 | }
268 |
269 | fieldset {
270 | min-width: 0;
271 | padding: 0;
272 | margin: 0;
273 | border: 0;
274 | }
275 |
276 | legend {
277 | display: block;
278 | width: 100%;
279 | max-width: 100%;
280 | padding: 0;
281 | margin-bottom: .5rem;
282 | font-size: 1.5rem;
283 | line-height: inherit;
284 | color: inherit;
285 | white-space: normal;
286 | }
287 |
288 | progress {
289 | vertical-align: baseline;
290 | }
291 |
292 | [type="number"]::-webkit-inner-spin-button,
293 | [type="number"]::-webkit-outer-spin-button {
294 | height: auto;
295 | }
296 |
297 | [type="search"] {
298 | outline-offset: -2px;
299 | -webkit-appearance: none;
300 | }
301 |
302 | [type="search"]::-webkit-search-decoration {
303 | -webkit-appearance: none;
304 | }
305 |
306 | ::-webkit-file-upload-button {
307 | font: inherit;
308 | -webkit-appearance: button;
309 | }
310 |
311 | output {
312 | display: inline-block;
313 | }
314 |
315 | summary {
316 | display: list-item;
317 | cursor: pointer;
318 | }
319 |
320 | template {
321 | display: none;
322 | }
323 |
324 | [hidden] {
325 | display: none !important;
326 | }
327 | /*# sourceMappingURL=bootstrap-reboot.css.map */
--------------------------------------------------------------------------------
/examples/OddJobsCliExample.lhs:
--------------------------------------------------------------------------------
1 | === 1. Create a table to store jobs
2 |
3 | In this example, our jobs table will be called `jobs_test`
4 |
5 |
6 | ```
7 | ghci> import Database.PostgreSQL.Simple (connectPostgreSQL)
8 | ghci> import OddJobs.Migrations
9 | ghci> conn <- connectPostgreSQL "dbname=jobs_test user=jobs_test password=jobs_test host=localhost"
10 | ghci> createJobTable conn "jobs_test"
11 | ```
12 |
13 |
14 | === 2. Create a module for your job-runner
15 |
16 | Ideally, this module should be compiled into a separate executable and should depend on your application's library module. If you do not wish to deploy odd-jobs as an independent executable, you may embed it within your main application's executable as well. This is described in [deployment](#deployment).
17 |
18 | \begin{code}
19 | {-# LANGUAGE DeriveAnyClass #-}
20 | {-# LANGUAGE DeriveGeneric #-}
21 | {-# LANGUAGE OverloadedStrings #-}
22 | {-# LANGUAGE LambdaCase #-}
23 | {-# LANGUAGE RecordWildCards #-}
24 |
25 | module OddJobsCliExample where
26 |
27 | import OddJobs.Job (Job(..), ConcurrencyControl(..), Config(..), throwParsePayload, startJobRunner, LogLevel(..), LogEvent(..))
28 | import OddJobs.ConfigBuilder (mkConfig, withConnectionPool, defaultTimedLogger, defaultLogStr, defaultJobType, mkUIConfig)
29 | import OddJobs.Cli (runCli, defaultWebUI, CliType(..))
30 |
31 | -- Note: It is not necessary to use fast-logger. You can use any logging library
32 | -- that can give you a logging function in the IO monad.
33 | import System.Log.FastLogger(withTimedFastLogger, LogType'(..), defaultBufSize, newTimedFastLogger)
34 | import System.Log.FastLogger.Date (newTimeCache, simpleTimeFormat)
35 | import Database.PostgreSQL.Simple as PGS
36 | import Data.Pool
37 | import Data.Text (Text)
38 | import Data.Aeson as Aeson
39 | import GHC.Generics
40 |
41 | -- This example is using these functions to introduce an artificial delay of a
42 | -- few seconds in one of the jobs. Otherwise it is not really needed.
43 | import OddJobs.Types (delaySeconds, Seconds(..))
44 | \end{code}
45 |
46 | === 3. Set-up a Haskell type to represent your job-payload
47 |
48 | - Ideally, this data-type should be defined _inside_ your application's code and the module containing this type-definition should be part of the `exposed-modules` stanza.
49 | - To work with all the default settings provided by 'OddJobs.ConfigBuilder' this data-type should have a **"tagged" JSON serialisation,** i.e.:
50 |
51 | ```json
52 | {"tag": "SendWelcomEmail", "contents": 10}
53 | ```
54 |
55 | In case your JSON payload does not conform to this structure, please look at [customising the job-payload's structure](#custom-payload-structure).
56 |
57 | - In this example, we are _blindly_ deriving `ToJSON` and `FromJSON` instances because the default behaviour of Aeson is to generate a tagged JSON as-per the example given above.
58 |
59 | \begin{code}
60 | data MyJob
61 | = SendWelcomeEmail Int
62 | | SendPasswordResetEmail Text
63 | | SetupSampleData Int
64 | deriving (Eq, Show, Generic, ToJSON, FromJSON)
65 | \end{code}
66 |
67 | === 4. Write the core job-runner function
68 |
69 | In this example, the core job-runner function is in the `IO` monad. In all probability, you application's code will be in a custom monad, and not IO. Pleae refer to TODO, on how to work with custom monads.
70 |
71 | \begin{code}
72 | myJobRunner :: Job -> IO ()
73 | myJobRunner job = do
74 | throwParsePayload job >>= \case
75 | SendWelcomeEmail userId -> do
76 | putStrLn $ "This should call the function that actually sends the welcome email. " <>
77 | "\nWe are purposely waiting 60 seconds before completing this job so that graceful shutdown can be demonstrated."
78 | delaySeconds (Seconds 60)
79 | putStrLn $ "SendWelcomeEmail to user: " <> show userId <> " complete (60 second wait is now over...)"
80 | SendPasswordResetEmail _tkn ->
81 | putStrLn "This should call the function that actually sends the password-reset email"
82 | SetupSampleData _userId -> do
83 | _ <- Prelude.error "User onboarding is incomplete"
84 | putStrLn "This should call the function that actually sets up sample data in a newly registered user's account"
85 | \end{code}
86 |
87 | === 5. Write the main function using `OddJobs.Cli`
88 |
89 | \begin{code}
90 | main :: IO ()
91 | main = runCli CliBoth{..}
92 | where
93 | cliStartJobRunner cfgOverrideFn = do
94 | withConnectionPool (Left "dbname=jobs_test user=jobs_test password=jobs_test host=localhost")$ \dbPool -> do
95 | tcache <- newTimeCache simpleTimeFormat
96 | withTimedFastLogger tcache (LogFileNoRotate "oddjobs.log" defaultBufSize) $ \logger -> do
97 | let jobLogger = defaultTimedLogger logger (defaultLogStr defaultJobType)
98 | startJobRunner $
99 | mkConfig jobLogger "jobs" dbPool (MaxConcurrentJobs 50) myJobRunner cfgOverrideFn
100 |
101 | cliStartWebUI uiStartArgs cfgOverrideFn = do
102 | withConnectionPool (Left "dbname=jobs_test user=jobs_test password=jobs_test host=localhost")$ \dbPool -> do
103 | tcache <- newTimeCache simpleTimeFormat
104 | withTimedFastLogger tcache (LogFileNoRotate "oddjobs-web.log" defaultBufSize) $ \logger -> do
105 | let jobLogger = defaultTimedLogger logger (defaultLogStr defaultJobType)
106 | defaultWebUI uiStartArgs $
107 | mkUIConfig jobLogger "jobs" dbPool cfgOverrideFn
108 | \end{code}
109 |
110 | === 6. Compile and start the Odd Jobs runner
111 |
112 |
113 | ```
114 | $ stack install :exe:odd-jobs-cli
115 | $ odd-jobs-cli start --daemonize --web-ui-basic-auth=oddjobs --web-ui-basic-password=awesome
116 | ```
117 |
118 |
119 |
120 | === 7. Enqueue some jobs from within your application's code
121 |
122 |
123 | ```
124 | ghci> import OddJobs.Job (createJob)
125 | ghci> import Database.PostgreSQL.Simple
126 | ghci> conn <- connectPostgreSQL "dbname=jobs_test user=jobs_test password=jobs_test host=localhost"
127 | ghci> createJob conn $ SendWelcomeEmail 10
128 | ghci> createJob conn $ SetupSampleData 10
129 | ```
130 |
131 |
132 | === 8. Check-out the awesome web UI
133 |
134 | Visit [http://localhost:7777](http://localhost:7777) (`username=oddjobs` / `password=awesome` as configured earlier).
135 |
136 | === 9. Check-out the log file to see what Odd Jobs is doing
137 |
138 |
139 | ```
140 | $ tail -f oddjobs.log
141 | ```
142 |
143 |
144 | === 10. Finally, shutdown Odd Jobs _gracefully_
145 |
146 | Please read [graceful shutdown](#graceful-shutdown) to know more.
147 |
148 |
149 | ```
150 | $ odd-jobs-cli stop --timeout 65
151 | ```
152 |
153 |
154 |
--------------------------------------------------------------------------------
/src/OddJobs/Endpoints.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE TypeOperators, DeriveGeneric, NamedFieldPuns, DataKinds, StandaloneDeriving, FlexibleContexts, RecordWildCards, RankNTypes #-}
2 |
3 |
4 | -- | TODO: Rename this to OddJobs.Servant
5 |
6 | module OddJobs.Endpoints where
7 |
8 | import OddJobs.Web as Web hiding (Routes(..))
9 | import qualified OddJobs.Web as Web
10 | import OddJobs.Job as Job
11 | import OddJobs.Types
12 | import GHC.Generics
13 |
14 | import Servant
15 | import Servant.API.Generic
16 | import Servant.Server.Generic
17 |
18 | import Servant.HTML.Lucid
19 | import Lucid
20 | import Lucid.Html5
21 | import Lucid.Base
22 | import qualified Data.Text as T
23 | import Data.Text (Text)
24 | import Network.Wai.Handler.Warp (run)
25 | import Servant.Server.StaticFiles (serveDirectoryFileServer)
26 | import UnliftIO hiding (Handler)
27 | import Database.PostgreSQL.Simple as PGS
28 | import Data.Pool as Pool
29 | import Control.Monad.Reader
30 | import Data.String.Conv (toS)
31 | import Control.Monad.Except
32 | import Data.Time as Time
33 | import Data.Aeson as Aeson
34 | import qualified Data.HashMap.Strict as HM
35 | import GHC.Exts (toList)
36 | import Data.Maybe (fromMaybe, mapMaybe)
37 | import Control.Applicative ((<|>))
38 | -- import qualified OddJobs.Links as Links
39 | import Data.List ((\\))
40 | import qualified System.Log.FastLogger as FLogger
41 | import qualified System.Log.FastLogger.Date as FLogger
42 | import Control.Monad.Logger as MLogger
43 | import qualified Data.ByteString.Lazy as BSL
44 | import qualified Data.List as DL
45 | import UnliftIO.IORef
46 | import Debug.Trace
47 | import qualified OddJobs.ConfigBuilder as Builder
48 | import Servant.Static.TH (createApiAndServerDecs)
49 | import Control.Monad (void)
50 |
51 | -- startApp :: IO ()
52 | -- startApp = undefined
53 |
54 | -- stopApp :: IO ()
55 | -- stopApp = undefined
56 |
57 | $(createApiAndServerDecs "StaticAssetRoutes" "staticAssetServer" "assets")
58 |
59 | data Routes route = Routes
60 | { rFilterResults :: route :- QueryParam "filters" Web.Filter :> Get '[HTML] (Html ())
61 | , rEnqueue :: route :- "enqueue" :> Capture "jobId" JobId :> Post '[HTML] NoContent
62 | , rRunNow :: route :- "run" :> Capture "jobId" JobId :> Post '[HTML] NoContent
63 | , rCancel :: route :- "cancel" :> Capture "jobId" JobId :> Post '[HTML] NoContent
64 | , rKill :: route :- "kill" :> Capture "jobId" JobId :> Post '[HTML] NoContent
65 | , rRefreshJobTypes :: route :- "refresh-job-types" :> Post '[HTML] NoContent
66 | , rRefreshJobRunners :: route :- "refresh-job-runners" :> Post '[HTML] NoContent
67 | } deriving (Generic)
68 |
69 |
70 | type FinalAPI =
71 | ToServant Routes AsApi :<|>
72 | "assets" :> StaticAssetRoutes
73 |
74 | data Env = Env
75 | { envRoutes :: Web.Routes
76 | , envJobTypesRef :: IORef [Text]
77 | , envJobRunnersRef :: IORef [JobRunnerName]
78 | }
79 |
80 | mkEnv :: (MonadIO m) => UIConfig -> (Text -> Text) -> m Env
81 | mkEnv cfg@UIConfig{} linksFn = do
82 | allJobTypes <- fetchAllJobTypes cfg
83 | allJobRunners <- fetchAllJobRunners cfg
84 | envJobTypesRef <- newIORef allJobTypes
85 | envJobRunnersRef <- newIORef allJobRunners
86 | let envRoutes = routes linksFn
87 |
88 | pure Env{..}
89 | -- TODO: remove hard-coded port
90 | -- run 8080 $ genericServe (server cfg Env{..})
91 |
92 | -- let nt :: ReaderT Job.Config IO a -> Servant.Handler a
93 | -- nt action = (liftIO $ try $ runReaderT action jm) >>= \case
94 | -- Left (e :: SomeException) -> Servant.Handler $ ExceptT $ pure $ Left $ err500 { errBody = toS $ show e }
95 | -- Right a -> Servant.Handler $ ExceptT $ pure $ Right a
96 | -- appProxy = (Proxy :: Proxy (ToServant Routes AsApi))
97 |
98 | -- finally
99 | -- (run 8080 $ genericServe (server jm dbPool jobTypesRef jobRunnerRef))
100 | -- (cleanup >> (Pool.destroyAllResources dbPool))
101 |
102 | stopApp :: IO ()
103 | stopApp = pure ()
104 |
105 |
106 | server :: forall m . (MonadIO m)
107 | => UIConfig
108 | -> Env
109 | -> (forall a . Handler a -> m a)
110 | -> ServerT FinalAPI m
111 | server cfg env nt =
112 | toServant routeServer :<|> staticAssetServer
113 | where
114 | routeServer :: Routes (AsServerT m)
115 | routeServer = Routes
116 | { rFilterResults = nt . filterResults cfg env
117 | , rEnqueue = nt . enqueueJob cfg env
118 | , rCancel = nt . cancelJob cfg env
119 | , rKill = nt . killJob cfg env
120 | , rRunNow = nt . runJobNow cfg env
121 | , rRefreshJobTypes = nt $ refreshJobTypes cfg env
122 | , rRefreshJobRunners = nt $ refreshJobRunners cfg env
123 | }
124 |
125 | server2 :: UIConfig
126 | -> Env
127 | -> Routes AsServer
128 | server2 cfg env = Routes
129 | { rFilterResults = filterResults cfg env
130 | , rEnqueue = enqueueJob cfg env
131 | , rCancel = cancelJob cfg env
132 | , rKill = killJob cfg env
133 | , rRunNow = runJobNow cfg env
134 | , rRefreshJobTypes = refreshJobTypes cfg env
135 | , rRefreshJobRunners = refreshJobRunners cfg env
136 | }
137 |
138 |
139 | refreshJobRunners :: UIConfig
140 | -> Env
141 | -> Handler NoContent
142 | refreshJobRunners cfg@UIConfig{} Env{envRoutes=Web.Routes{..}, envJobRunnersRef} = do
143 | allJobRunners <- fetchAllJobRunners cfg
144 | atomicModifyIORef' envJobRunnersRef (const (allJobRunners, ()))
145 | throwError $ err302{errHeaders=[("Location", toS $ rFilterResults Nothing)]}
146 |
147 | refreshJobTypes :: UIConfig
148 | -> Env
149 | -> Handler NoContent
150 | refreshJobTypes cfg Env{envRoutes=Web.Routes{..}, envJobTypesRef} = do
151 | allJobTypes <- fetchAllJobTypes cfg
152 | atomicModifyIORef' envJobTypesRef (const (allJobTypes, ()))
153 | throwError $ err302{errHeaders=[("Location", toS $ rFilterResults Nothing)]}
154 |
155 | cancelJob :: UIConfig
156 | -> Env
157 | -> JobId
158 | -> Handler NoContent
159 | cancelJob UIConfig{..} env jid = do
160 | liftIO $ withResource uicfgDbPool $ \conn -> void $ cancelJobIO conn uicfgTableName jid
161 | redirectToHome env
162 |
163 | killJob :: UIConfig
164 | -> Env
165 | -> JobId
166 | -> Handler NoContent
167 | killJob UIConfig{..} env jid = do
168 | liftIO $ withResource uicfgDbPool $ \conn -> void $ killJobIO conn uicfgTableName jid
169 | redirectToHome env
170 |
171 | runJobNow :: UIConfig
172 | -> Env
173 | -> JobId
174 | -> Handler NoContent
175 | runJobNow UIConfig{..} env jid = do
176 | liftIO $ withResource uicfgDbPool $ \conn -> void $ runJobNowIO conn uicfgTableName jid
177 | redirectToHome env
178 |
179 | enqueueJob :: UIConfig
180 | -> Env
181 | -> JobId
182 | -> Handler NoContent
183 | enqueueJob UIConfig{..} env jid = do
184 | liftIO $ withResource uicfgDbPool $ \conn -> do
185 | void $ unlockJobIO conn uicfgTableName jid
186 | void $ runJobNowIO conn uicfgTableName jid
187 | redirectToHome env
188 |
189 | redirectToHome :: Env -> Handler NoContent
190 | redirectToHome Env{envRoutes=Web.Routes{..}} = do
191 | throwError $ err301{errHeaders=[("Location", toS $ rFilterResults Nothing)]}
192 |
193 |
194 | filterResults :: UIConfig
195 | -> Env
196 | -> Maybe Filter
197 | -> Handler (Html ())
198 | filterResults cfg@UIConfig{uicfgJobToHtml, uicfgDbPool} Env{..} mFilter = do
199 | let filters = fromMaybe mempty mFilter
200 | (jobs, runningCount) <- liftIO $ Pool.withResource uicfgDbPool $ \conn -> (,)
201 | <$> filterJobs cfg conn filters
202 | <*> countJobs cfg conn filters{ filterStatuses = [Job.Locked] }
203 | t <- liftIO getCurrentTime
204 | js <- liftIO (DL.zip jobs <$> uicfgJobToHtml jobs)
205 | allJobTypes <- readIORef envJobTypesRef
206 | let navHtml = Web.sideNav envRoutes allJobTypes [] t filters
207 | bodyHtml = Web.resultsPanel envRoutes t filters js runningCount
208 | pure $ Web.pageLayout envRoutes navHtml bodyHtml
209 |
210 | routes :: (Text -> Text) -> Web.Routes
211 | routes linkFn = Web.Routes
212 | { Web.rFilterResults = rFilterResults
213 | , Web.rEnqueue = rEnqueue
214 | , Web.rRunNow = rRunNow
215 | , Web.rCancel = rCancel
216 | , Web.rKill = rKill
217 | , Web.rRefreshJobTypes = rRefreshJobTypes
218 | , Web.rRefreshJobRunners = rRefreshJobRunners
219 | , Web.rStaticAsset = linkFn
220 | }
221 | where
222 | OddJobs.Endpoints.Routes{..} = allFieldLinks' (linkFn . toS . show . linkURI) :: OddJobs.Endpoints.Routes (AsLink Text)
223 |
224 | -- absText :: Link -> Text
225 | -- absText l = "/" <> (toS $ show $ linkURI l)
226 |
--------------------------------------------------------------------------------
/odd-jobs.cabal:
--------------------------------------------------------------------------------
1 | cabal-version: 1.12
2 |
3 | -- This file has been generated from package.yaml by hpack version 0.35.2.
4 | --
5 | -- see: https://github.com/sol/hpack
6 | --
7 | -- hash: dff1ced317b40e54688799203ec3743b119bf71f2fc98109a5e79f12c6e10fba
8 |
9 | name: odd-jobs
10 | version: 0.2.3
11 | synopsis: A full-featured PostgreSQL-backed job queue (with an admin UI)
12 | description: - Background jobs library for Haskell.
13 | - Extracted from production code at [Vacation Labs](https://www.vacationlabs.com).
14 | - Inspired by the Delayed Jobs library in Rails.
15 | - Queue backend is a Postgres table.
16 | - Queue is monitored using, both, LISTEN/NOTIFY and polling the DB.
17 | - Comes with an in-built admin UI.
18 | - Comes with utilities to help you built a CLI for your job-queue.
19 | - Highly configurable and monitorable
20 | category: Web
21 | homepage: https://www.haskelltutorials.com/odd-jobs
22 | bug-reports: https://github.com/saurabhnanda/odd-jobs/issues
23 | author: Saurabh Nanda
24 | maintainer: saurabhnanda@gmail.com
25 | copyright: 2016-2020 Saurabh Nanda
26 | license: BSD3
27 | license-file: LICENSE
28 | build-type: Simple
29 | extra-source-files:
30 | README.md
31 | CHANGELOG.md
32 | assets/bootstrap/css/bootstrap-grid.css
33 | assets/bootstrap/css/bootstrap-grid.min.css
34 | assets/bootstrap/css/bootstrap-reboot.css
35 | assets/bootstrap/css/bootstrap-reboot.min.css
36 | assets/bootstrap/css/bootstrap.css
37 | assets/bootstrap/css/bootstrap.min.css
38 | assets/bootstrap/js/bootstrap.bundle.js
39 | assets/bootstrap/js/bootstrap.bundle.min.js
40 | assets/bootstrap/js/bootstrap.js
41 | assets/bootstrap/js/bootstrap.min.js
42 | assets/css/Navigation-Clean1.css
43 | assets/css/logo-slider.css
44 | assets/css/styles.css
45 | assets/js/custom.js
46 | assets/js/jquery.min.js
47 | assets/js/logo-slider.js
48 | assets/odd-jobs-color-logo.png
49 |
50 | library
51 | exposed-modules:
52 | OddJobs.Job
53 | OddJobs.Migrations
54 | OddJobs.Web
55 | OddJobs.Endpoints
56 | OddJobs.Cli
57 | OddJobs.Types
58 | OddJobs.ConfigBuilder
59 | other-modules:
60 | UI
61 | OddJobs.Job.Query
62 | Paths_odd_jobs
63 | hs-source-dirs:
64 | src
65 | default-extensions:
66 | NamedFieldPuns
67 | LambdaCase
68 | TemplateHaskell
69 | ScopedTypeVariables
70 | GeneralizedNewtypeDeriving
71 | QuasiQuotes
72 | OverloadedStrings
73 | ghc-options: -Wall -fno-warn-orphans -fno-warn-unused-imports -fno-warn-dodgy-exports -Werror=missing-fields -Werror=incomplete-patterns
74 | build-depends:
75 | aeson
76 | , async ==2.2.4
77 | , base >=4.7 && <5
78 | , bytestring
79 | , containers
80 | , directory
81 | , either
82 | , fast-logger
83 | , filepath
84 | , friendly-time
85 | , generic-deriving
86 | , hdaemonize
87 | , hostname
88 | , lucid
89 | , monad-control
90 | , monad-logger
91 | , mtl
92 | , optparse-applicative
93 | , postgresql-simple
94 | , resource-pool >= 0.4.0.0 && < 0.5.0.0
95 | , safe
96 | , servant
97 | , servant-lucid
98 | , servant-server
99 | , servant-static-th
100 | , string-conv
101 | , text
102 | , text-conversions
103 | , time
104 | , timing-convenience
105 | , unix
106 | , unliftio
107 | , unliftio-core
108 | , unordered-containers
109 | , wai
110 | , warp
111 | default-language: Haskell2010
112 |
113 | executable devel
114 | main-is: DevelMain.hs
115 | other-modules:
116 | OddJobs.Cli
117 | OddJobs.ConfigBuilder
118 | OddJobs.Endpoints
119 | OddJobs.Job
120 | OddJobs.Migrations
121 | OddJobs.Job.Query
122 | OddJobs.Types
123 | OddJobs.Web
124 | UI
125 | Paths_odd_jobs
126 | hs-source-dirs:
127 | dev
128 | src
129 | default-extensions:
130 | NamedFieldPuns
131 | LambdaCase
132 | TemplateHaskell
133 | ScopedTypeVariables
134 | GeneralizedNewtypeDeriving
135 | QuasiQuotes
136 | OverloadedStrings
137 | ghc-options: -Wall -fno-warn-orphans -fno-warn-unused-imports -fno-warn-dodgy-exports -Werror=missing-fields -Werror=incomplete-patterns -threaded -with-rtsopts=-N -main-is DevelMain
138 | build-depends:
139 | aeson
140 | , async ==2.2.4
141 | , base >=4.7 && <5
142 | , bytestring
143 | , containers
144 | , directory
145 | , either
146 | , fast-logger
147 | , filepath
148 | , foreign-store
149 | , friendly-time
150 | , generic-deriving
151 | , hdaemonize
152 | , hostname
153 | , lucid
154 | , monad-control
155 | , monad-logger
156 | , mtl
157 | , odd-jobs
158 | , optparse-applicative
159 | , postgresql-simple
160 | , resource-pool
161 | , safe
162 | , servant
163 | , servant-lucid
164 | , servant-server
165 | , servant-static-th
166 | , string-conv
167 | , text
168 | , text-conversions
169 | , time
170 | , timing-convenience
171 | , unix
172 | , unliftio
173 | , unliftio-core
174 | , unordered-containers
175 | , wai
176 | , warp
177 | default-language: Haskell2010
178 |
179 | executable odd-jobs-cli-example
180 | main-is: OddJobsCliExample.lhs
181 | other-modules:
182 | Paths_odd_jobs
183 | hs-source-dirs:
184 | examples
185 | default-extensions:
186 | NamedFieldPuns
187 | LambdaCase
188 | TemplateHaskell
189 | ScopedTypeVariables
190 | GeneralizedNewtypeDeriving
191 | QuasiQuotes
192 | OverloadedStrings
193 | ghc-options: -Wall -fno-warn-orphans -fno-warn-unused-imports -fno-warn-dodgy-exports -Werror=missing-fields -Werror=incomplete-patterns -threaded -with-rtsopts=-N -main-is OddJobsCliExample
194 | build-depends:
195 | aeson
196 | , async ==2.2.4
197 | , base >=4.7 && <5
198 | , bytestring
199 | , directory
200 | , either
201 | , fast-logger
202 | , filepath
203 | , friendly-time
204 | , generic-deriving
205 | , hdaemonize
206 | , hostname
207 | , lucid
208 | , monad-control
209 | , monad-logger
210 | , mtl
211 | , odd-jobs
212 | , optparse-applicative
213 | , postgresql-simple
214 | , resource-pool
215 | , safe
216 | , servant
217 | , servant-lucid
218 | , servant-server
219 | , servant-static-th
220 | , string-conv
221 | , text
222 | , text-conversions
223 | , time
224 | , timing-convenience
225 | , unix
226 | , unliftio
227 | , unliftio-core
228 | , unordered-containers
229 | , wai
230 | , warp
231 | default-language: Haskell2010
232 |
233 | test-suite jobrunner
234 | type: exitcode-stdio-1.0
235 | main-is: Test.hs
236 | other-modules:
237 | CliParser
238 | OddJobs.Cli
239 | OddJobs.ConfigBuilder
240 | OddJobs.Endpoints
241 | OddJobs.Job
242 | OddJobs.Migrations
243 | OddJobs.Types
244 | OddJobs.Web
245 | UI
246 | Paths_odd_jobs
247 | hs-source-dirs:
248 | test
249 | src
250 | default-extensions:
251 | NamedFieldPuns
252 | LambdaCase
253 | TemplateHaskell
254 | ScopedTypeVariables
255 | GeneralizedNewtypeDeriving
256 | QuasiQuotes
257 | OverloadedStrings
258 | ghc-options: -Wall -fno-warn-orphans -fno-warn-unused-imports -fno-warn-dodgy-exports -Werror=missing-fields -Werror=incomplete-patterns -threaded -with-rtsopts=-N -main-is Test
259 | build-depends:
260 | aeson
261 | , async ==2.2.4
262 | , base >=4.7 && <5
263 | , bytestring
264 | , containers
265 | , directory
266 | , either
267 | , fast-logger
268 | , filepath
269 | , friendly-time
270 | , generic-deriving
271 | , hdaemonize
272 | , hedgehog
273 | , hostname
274 | , lifted-async
275 | , lifted-base
276 | , lucid
277 | , mmorph
278 | , monad-control
279 | , monad-logger
280 | , mtl
281 | , optparse-applicative
282 | , postgresql-simple
283 | , random
284 | , resource-pool
285 | , safe
286 | , servant
287 | , servant-lucid
288 | , servant-server
289 | , servant-static-th
290 | , string-conv
291 | , tasty
292 | , tasty-discover
293 | , tasty-hedgehog
294 | , tasty-hunit
295 | , text
296 | , text-conversions
297 | , time
298 | , timing-convenience
299 | , unix
300 | , unliftio
301 | , unliftio-core
302 | , unordered-containers
303 | , wai
304 | , warp
305 | default-language: Haskell2010
306 |
--------------------------------------------------------------------------------
/src/OddJobs/Cli.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE RecordWildCards #-}
2 | {-# LANGUAGE PartialTypeSignatures #-}
3 | {-# LANGUAGE TypeOperators #-}
4 | {-# LANGUAGE DataKinds #-}
5 | module OddJobs.Cli where
6 |
7 | import Options.Applicative as Opts
8 | import Control.Concurrent.Async (race)
9 | import Control.Concurrent.MVar (newEmptyMVar, takeMVar, tryPutMVar, tryTakeMVar)
10 | import Data.Coerce (coerce)
11 | import Data.Functor (void)
12 | import qualified Data.Text as T
13 | import Data.Text (Text)
14 | import OddJobs.Job (startJobRunner, Config(..), LogLevel(..), LogEvent(..))
15 | import OddJobs.Types (UIConfig(..), Seconds(..), delaySeconds)
16 | import qualified System.Posix.Daemonize as Daemonize
17 | import System.FilePath (FilePath, takeBaseName, takeDirectory)
18 | import System.Posix.Process (getProcessID)
19 | import System.Posix.Signals (Handler(CatchOnce), installHandler, sigTERM)
20 | import qualified System.Directory as Dir
21 | import qualified System.Exit as Exit
22 | import System.Environment (getProgName)
23 | import qualified System.Posix.Signals as Sig
24 | import qualified UnliftIO.Async as Async
25 | import UnliftIO (bracket_)
26 | import Safe (fromJustNote)
27 | import qualified OddJobs.Endpoints as UI
28 | import Servant.Server as Servant
29 | import Servant.API
30 | import Data.Proxy
31 | import Data.Text.Encoding (decodeUtf8)
32 | import Network.Wai.Handler.Warp as Warp
33 | import Debug.Trace
34 | import Data.String.Conv (toS)
35 |
36 | -- * Introduction
37 | --
38 | -- $intro
39 | --
40 | -- This module has a bunch of functions (that use the 'optparse-applicative'
41 | -- library) to help you rapidly build a standalone job-runner deamon based on
42 | -- odd-jobs. You should probably start-off by using the pre-packaged [default
43 | -- behaviour](#defaultBehaviour), notably the 'defaultMain' function. If the
44 | -- default behaviour of the resultant CLI doesn't suit your needs, consider
45 | -- reusing\/extending the [individual argument parsers](#parsers). If you find
46 | -- you cannot reuse those, then it would be best to write your CLI from scratch
47 | -- instead of force-fitting whatever has been provided here.
48 | --
49 | -- It is __highly recommended__ that you read the following links before putting
50 | -- odd-jobs into production.
51 | --
52 | -- * The [getting started
53 | -- guide](https://www.haskelltutorials.com/odd-jobs/guide.html#getting-started)
54 | -- which will walk you through a bare-bones example of using the
55 | -- 'defaultMain' function. It should make the callback-within-callback
56 | -- clearer.
57 | --
58 | -- * The [section on
59 | -- deployment](https://www.haskelltutorials.com/odd-jobs/guide.html#deployment)
60 | -- in the guide.
61 |
62 | -- * Default behaviour
63 | --
64 | -- $defaultBehaviour
65 | --
66 | -- #defaultBehaviour#
67 | --
68 |
69 |
70 | data CliType = CliOnlyJobRunner { cliStartJobRunner :: (Config -> Config) -> IO () }
71 | | CliOnlyWebUi { cliStartWebUI :: UIStartArgs -> (UIConfig -> UIConfig) -> IO () }
72 | | CliBoth { cliStartJobRunner :: (Config -> Config) -> IO (), cliStartWebUI :: UIStartArgs -> (UIConfig -> UIConfig) -> IO () }
73 |
74 | {-|
75 | Please do not get scared by the type-signature of the first argument.
76 | Conceptually, it's a callback, within another callback.
77 |
78 | The callback function that you pass to 'defaultMain' will be executed once the
79 | job-runner has forked as a background dameon. Your callback function will be
80 | given another callback function, i.e. the @Config -> IO ()@ part that you need
81 | to call once you've setup @Config@ and whatever environment is required to run
82 | your application code.
83 |
84 | This complication is necessary because immediately after forking a new daemon
85 | process, a bunch of resources will need to be allocated for the job-runner to
86 | start functioning. At the minimum, a DB connection pool and logger. However,
87 | realistically, a bunch of additional resources will also be required for
88 | setting up the environment needed for running jobs in your application's
89 | monad.
90 |
91 | All of these resource allocations need to be bracketed so that when the
92 | job-runner exits, they may be cleaned-up gracefully.
93 |
94 | Please take a look at the [getting started
95 | guide](https://www.haskelltutorials.com/odd-jobs/guide.html#getting-started) for
96 | an example of how to use this function.
97 | -}
98 | runCli :: CliType -> IO ()
99 | runCli cliType = do
100 | Args{argsCommand} <- customExecParser defaultCliParserPrefs (defaultCliInfo cliType)
101 | case argsCommand of
102 | Start commonArgs mUIArgs ->
103 | defaultStartCommand commonArgs mUIArgs cliType
104 | Stop cmdArgs -> do
105 | defaultStopCommand cmdArgs
106 | Status ->
107 | Prelude.error "not implemented yet"
108 |
109 | -- | Ensure that @SIGTERM@ is handled gracefully, because it's how containers are stopped.
110 | --
111 | -- @action@ will receive an 'AsyncCancelled' exception if @SIGTERM@ is received by the process.
112 | --
113 | -- Typical use:
114 | --
115 | -- > main :: IO ()
116 | -- > main = withGracefulTermination_ $ do
117 | --
118 | -- Note that although the Haskell runtime handles @SIGINT@ it doesn't do anything with @SIGTERM@.
119 | -- Therefore, the program will be killed immediately and no cleanup will be performed. In particular,
120 | -- exception handlers in `bracket`, `finally`, `onException`, etc. won't be run. However, if the
121 | -- program is running as PID 1 in a container, @SIGTERM@ will be ignored and the program will keep
122 | -- running. This will likely result in a @SIGKILL@ being sent a short while later and cleanup still
123 | -- won't be performed.
124 | withGracefulTermination :: IO a -> IO (Maybe a)
125 | withGracefulTermination ioAction = do
126 | var <- newEmptyMVar
127 | let terminate = void $ tryPutMVar var ()
128 | waitForTermination = takeMVar var
129 | void $ installHandler sigTERM (CatchOnce terminate) Nothing
130 | either (const Nothing) Just <$> race waitForTermination ioAction
131 |
132 | -- | Like 'withGracefulTermination' but ignoring the return value
133 | withGracefulTermination_ :: IO a -> IO ()
134 | withGracefulTermination_ = void . withGracefulTermination
135 |
136 | {-| Used by 'defaultMain' if the 'Start' command is issued via the CLI. If
137 | @--daemonize@ switch is also passed, it checks for 'startPidFile':
138 |
139 | * If it doesn't exist, it forks a background daemon, writes the PID file, and
140 | exits.
141 |
142 | * If it exists, it refuses to start, to prevent multiple invocations of the same
143 | background daemon.
144 | -}
145 | defaultStartCommand :: CommonStartArgs
146 | -> Maybe UIStartArgs
147 | -> CliType
148 | -> IO ()
149 | defaultStartCommand CommonStartArgs{..} mUIArgs cliType = do
150 | if startDaemonize then do
151 | Daemonize.serviced
152 | $ Daemonize.simpleDaemon
153 | { Daemonize.program = \() -> withGracefulTermination_ coreStartupFn
154 | , Daemonize.name = Just $ takeBaseName startPidFile
155 | , Daemonize.pidfileDirectory = Just $ takeDirectory startPidFile
156 | }
157 | else
158 | coreStartupFn
159 | where
160 | uiArgs = fromJustNote "Please specify Web UI Startup Args" $ traceShowId mUIArgs
161 | coreStartupFn =
162 | case cliType of
163 | CliOnlyJobRunner{..} -> do
164 | cliStartJobRunner Prelude.id
165 | CliOnlyWebUi{..} -> do
166 | traceM "CliOnlyWebUi before"
167 | cliStartWebUI uiArgs Prelude.id
168 | CliBoth{..} -> do
169 | traceM "CliBoth before"
170 | Async.withAsync (cliStartWebUI uiArgs Prelude.id) $ \_ -> do
171 | traceM "CliBoth inside withAsync"
172 | cliStartJobRunner Prelude.id
173 | traceM "CliBoth end"
174 |
175 | defaultWebUI :: UIStartArgs
176 | -> UIConfig
177 | -> IO ()
178 | defaultWebUI UIStartArgs{..} uicfg@UIConfig{..} = do
179 | env <- UI.mkEnv uicfg ("/" <>)
180 | case uistartAuth of
181 | AuthNone -> do
182 | let app = UI.server uicfg env Prelude.id
183 | uicfgLogger LevelInfo $ LogText $ "Starting admin UI on port " <> toS (show uistartPort)
184 | Warp.run uistartPort $ Servant.serve (Proxy :: Proxy UI.FinalAPI) app
185 | (AuthBasic u p) -> do
186 | let api = Proxy :: Proxy (BasicAuth "OddJobs Admin UI" OddJobsUser :> UI.FinalAPI)
187 | ctx = defaultBasicAuth (u, p) :. EmptyContext
188 | -- Now the app will receive an extra argument for OddJobsUser,
189 | -- which we aren't really interested in.
190 | app _ = UI.server uicfg env Prelude.id
191 | uicfgLogger LevelInfo $ LogText $ "Starting admin UI on port " <> toS (show uistartPort)
192 | Warp.run uistartPort $ Servant.serveWithContext api ctx app
193 |
194 | {-| Used by 'defaultMain' if 'Stop' command is issued via the CLI. Sends a
195 | @SIGTERM@ signal to the process indicated by 'shutPidFile'. Waits for a maximum
196 | of 'shutTimeout' seconds (controller by @--timeout@) for the daemon to shutdown
197 | gracefully, after which a @SIGKILL@ is issued
198 | -}
199 | defaultStopCommand :: StopArgs
200 | -> IO ()
201 | defaultStopCommand StopArgs{..} = do
202 | progName <- getProgName
203 | putStrLn $ "Sending SIGTERM to " <> show progName <>
204 | " and waiting " <> show (unSeconds shutTimeout) <> " seconds for graceful stop"
205 | flip Daemonize.serviced' Daemonize.Stop
206 | $ Daemonize.simpleDaemon
207 | { Daemonize.name = Just $ takeBaseName shutPidFile
208 | , Daemonize.pidfileDirectory = Just $ takeDirectory shutPidFile
209 | , Daemonize.killWait = Just $ coerce shutTimeout
210 | }
211 |
212 |
213 | -- * Default CLI parsers
214 | --
215 | -- $parsers$
216 | --
217 | -- #parsers#
218 | --
219 | -- If the [default behaviour](#defaultBehaviour) doesn't suit your needs, you
220 | -- can write a @main@ function yourself, and consider using\/extending the CLI
221 | -- parsers documented in this section.
222 |
223 |
224 | -- | The command-line is parsed into this data-structure using 'argParser'
225 | data Args = Args
226 | { argsCommand :: !Command
227 | } deriving (Eq, Show)
228 |
229 |
230 | -- | The top-level command-line parser
231 | argParser :: CliType -> Parser Args
232 | argParser cliType = Args <$> commandParser cliType
233 |
234 | -- ** Top-level command parser
235 |
236 | -- | CLI commands are parsed into this data-structure by 'commandParser'
237 | data Command
238 | = Start CommonStartArgs (Maybe UIStartArgs)
239 | | Stop StopArgs
240 | | Status
241 | deriving (Eq, Show)
242 |
243 | -- Parser for 'argsCommand'
244 | commandParser :: CliType -> Parser Command
245 | commandParser cliType = hsubparser
246 | ( command "start" (info (startCmdParser cliType) (progDesc "start the odd-jobs runner and/or admin UI")) <>
247 | command "stop" (info stopParser (progDesc "stop the odd-jobs runner and/or admin UI")) <>
248 | command "status" (info statusParser (progDesc "print status of all active jobs"))
249 | )
250 |
251 |
252 | data UIStartArgs = UIStartArgs
253 | { uistartAuth :: !WebUiAuth
254 | , uistartPort :: !Int
255 | } deriving (Eq, Show)
256 |
257 | -- ** Start command
258 |
259 | -- | @start@ command is parsed into this data-structure by 'startParser'
260 | data CommonStartArgs = CommonStartArgs
261 | { startDaemonize :: !Bool
262 | -- | PID file for the background dameon. Ref: 'pidFileParser'
263 | , startPidFile :: !FilePath
264 | } deriving (Eq, Show)
265 |
266 | uiStartArgsParser :: Parser UIStartArgs
267 | uiStartArgsParser = UIStartArgs
268 | <$> webUiAuthParser
269 | <*> option auto ( long "web-ui-port" <>
270 | metavar "PORT" <>
271 | value 7777 <>
272 | showDefault <>
273 | help "The port on which the Web UI listens. Please note, to actually enable the Web UI you need to pick one of the available auth schemes"
274 | )
275 |
276 | commonStartArgsParser :: Parser CommonStartArgs
277 | commonStartArgsParser = CommonStartArgs
278 | <$> switch ( long "daemonize" <>
279 | help "Fork the job-runner as a background daemon. If omitted, the job-runner remains in the foreground."
280 | )
281 | <*> pidFileParser
282 |
283 | startCmdParser :: CliType -> Parser Command
284 | startCmdParser cliType = Start
285 | <$> commonStartArgsParser
286 | <*> (case cliType of
287 | CliOnlyJobRunner _ -> pure Nothing
288 | CliOnlyWebUi _ -> Just <$> uiStartArgsParser
289 | CliBoth _ _ -> optional uiStartArgsParser
290 | )
291 |
292 | data WebUiAuth
293 | = AuthNone
294 | | AuthBasic !Text !Text
295 | deriving (Eq, Show)
296 |
297 | -- | Pick one of the following auth mechanisms for the web UI:
298 | --
299 | -- * No auth - @--web-ui-no-auth@ __NOT RECOMMENDED__
300 | -- * Basic auth - @--web-ui-basic-auth-user @ and
301 | -- @--web-ui-basic-auth-password @
302 | webUiAuthParser :: Parser WebUiAuth
303 | webUiAuthParser = basicAuthParser <|> noAuthParser
304 | where
305 | basicAuthParser = AuthBasic
306 | <$> strOption ( long "web-ui-basic-auth-user" <>
307 | metavar "USER" <>
308 | help "Username for basic auth"
309 | )
310 | <*> strOption ( long "web-ui-basic-auth-password" <>
311 | metavar "PASS" <>
312 | help "Password for basic auth"
313 | )
314 | noAuthParser = flag' AuthNone
315 | ( long "web-ui-no-auth" <>
316 | help "Start the web UI with any authentication. NOT RECOMMENDED."
317 | )
318 |
319 | -- ** Stop command
320 |
321 | -- | @stop@ command is parsed into this data-structure by 'stopParser'. Please
322 | -- note, that this command first sends a @SIGINT@ to the daemon and waits for
323 | -- 'shutTimeout' seconds. If the daemon doesn't shut down cleanly within that
324 | -- time, it sends a @SIGKILL@ to kill immediately.
325 | data StopArgs = StopArgs
326 | { -- | After sending a @SIGTERM@, how many seconds to wait before sending a
327 | -- @SIGKILL@
328 | shutTimeout :: !Seconds
329 | -- | PID file of the deamon. Ref: 'pidFileParser'
330 | , shutPidFile :: !FilePath
331 | } deriving (Eq, Show)
332 |
333 | stopParser :: Parser Command
334 | stopParser = fmap Stop $ StopArgs
335 | <$> option (Seconds <$> auto) ( long "timeout" <>
336 | metavar "TIMEOUT" <>
337 | help "Maximum seconds to wait before force-killing the background daemon."
338 | )
339 | <*> pidFileParser
340 |
341 |
342 | -- ** Status command
343 |
344 | -- | The @status@ command has not been implemented yet. PRs welcome :-)
345 | statusParser :: Parser Command
346 | statusParser = pure Status
347 |
348 | -- ** Other parsing utilities
349 |
350 | -- | If @--pid-file@ is not given as a command-line argument, this defaults to
351 | -- @./odd-jobs.pid@
352 | pidFileParser :: Parser FilePath
353 | pidFileParser =
354 | strOption ( long "pid-file" <>
355 | metavar "PIDFILE" <>
356 | value "./odd-jobs.pid" <>
357 | showDefault <>
358 | help "Path of the PID file for the daemon. Takes effect only during stop or only when using the --daemonize option at startup"
359 | )
360 |
361 | defaultCliParserPrefs :: ParserPrefs
362 | defaultCliParserPrefs = prefs $
363 | showHelpOnError <>
364 | showHelpOnEmpty
365 |
366 | defaultCliInfo :: CliType -> ParserInfo Args
367 | defaultCliInfo cliType =
368 | info (argParser cliType <**> helper) fullDesc
369 |
370 | -- ** Auth implementations for the default Web UI
371 |
372 | -- *** Basic Auth
373 |
374 | data OddJobsUser = OddJobsUser !Text !Text deriving (Eq, Show)
375 |
376 | defaultBasicAuth :: (Text, Text) -> BasicAuthCheck OddJobsUser
377 | defaultBasicAuth (user, pass) = BasicAuthCheck $ \b ->
378 | let u = decodeUtf8 (basicAuthUsername b)
379 | p = decodeUtf8 (basicAuthPassword b)
380 | in if u==user && p==pass
381 | then pure (Authorized $ OddJobsUser u p)
382 | else pure BadPassword
383 |
--------------------------------------------------------------------------------
/src/OddJobs/ConfigBuilder.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE RankNTypes #-}
2 | {-# LANGUAGE FlexibleContexts #-}
3 | {-# LANGUAGE CPP #-}
4 |
5 | module OddJobs.ConfigBuilder where
6 |
7 | import OddJobs.Types
8 | import Database.PostgreSQL.Simple as PGS
9 | import Data.Pool
10 | import Control.Monad.Logger (LogLevel(..), LogStr, toLogStr)
11 | import Data.Text (Text)
12 | import Lucid (Html, toHtml, class_, div_, span_, br_, button_, a_, href_, onclick_)
13 | import Data.Maybe (fromMaybe)
14 | import Data.List as DL
15 | import Data.Aeson as Aeson hiding (Success)
16 | import qualified Data.Text as T
17 | import GHC.Generics
18 | import Data.Proxy (Proxy(..))
19 | import Generics.Deriving.ConNames
20 | import Control.Monad
21 | import Data.String.Conv
22 | import GHC.Exts (toList)
23 | import qualified Data.ByteString as BS
24 | import UnliftIO (MonadUnliftIO, withRunInIO, bracket, liftIO)
25 | import qualified System.Log.FastLogger as FLogger
26 | import Data.Int (Int64)
27 | import Database.PostgreSQL.Simple.Types as PGS (Identifier(..))
28 |
29 | # if MIN_VERSION_aeson(2, 0, 0)
30 | import qualified Data.Aeson.KeyMap as HM
31 | import qualified Data.Aeson.Key as Key
32 | keyToText :: Key.Key -> T.Text
33 | keyToText = Key.toText
34 | # else
35 | import qualified Data.HashMap.Lazy as HM
36 | keyToText :: T.Text -> T.Text
37 | keyToText = id
38 | # endif
39 |
40 | -- | This function gives you a 'Config' with a bunch of sensible defaults
41 | -- already applied. It requires the bare minimum configuration parameters that
42 | -- this library cannot assume on your behalf.
43 | --
44 | -- It makes a few __important assumptions__ about your 'jobPayload 'JSON, which
45 | -- are documented in 'defaultJobType'.
46 | mkConfig :: (LogLevel -> LogEvent -> IO ())
47 | -- ^ "Structured logging" function. Ref: 'cfgLogger'
48 | -> TableName
49 | -- ^ DB table which holds your jobs. Ref: 'cfgTableName'
50 | -> Pool Connection
51 | -- ^ DB connection-pool to be used by job-runner. Ref: 'cfgDbPool'
52 | -> ConcurrencyControl
53 | -- ^ Concurrency configuration. Ref: 'cfgConcurrencyControl'
54 | -> (Job -> IO ())
55 | -- ^ The actual "job runner" which contains your application code. Ref: 'cfgJobRunner'
56 | -> (Config -> Config)
57 | -- ^ A function that allows you to modify the \"interim config\". The
58 | -- \"interim config\" will cotain a bunch of in-built default config
59 | -- params, along with the config params that you\'ve just provided
60 | -- (i.e. logging function, table name, DB pool, etc). You can use this
61 | -- function to override values in the \"interim config\". If you do not
62 | -- wish to modify the \"interim config\" just pass 'Prelude.id' as an
63 | -- argument to this parameter. __Note:__ it is strongly recommended
64 | -- that you __do not__ modify the generated 'Config' outside of this
65 | -- function, unless you know what you're doing.
66 | -> Config
67 | -- ^ The final 'Config' that can be used to start various job-runners
68 | mkConfig logger tname dbpool ccControl jrunner configOverridesFn =
69 | let cfg = configOverridesFn $ Config
70 | { cfgPollingInterval = defaultPollingInterval
71 | , cfgOnJobSuccess = const $ pure ()
72 | , cfgOnJobFailed = []
73 | , cfgJobRunner = jrunner
74 | , cfgLogger = logger
75 | , cfgDbPool = dbpool
76 | , cfgOnJobStart = const $ pure ()
77 | , cfgDefaultMaxAttempts = 10
78 | , cfgTableName = tname
79 | , cfgOnJobTimeout = const $ pure ()
80 | , cfgConcurrencyControl = ccControl
81 | , cfgJobType = defaultJobType
82 | , cfgDefaultJobTimeout = Seconds 600
83 | , cfgImmediateJobDeletion = defaultImmediateJobDeletion
84 | , cfgDelayedJobDeletion = Nothing
85 | , cfgDefaultRetryBackoff = \attempts -> pure $ Seconds $ 2 ^ attempts
86 | }
87 | in cfg
88 |
89 |
90 | mkUIConfig :: (LogLevel -> LogEvent -> IO ())
91 | -- ^ "Structured logging" function. Ref: 'uicfgLogger'
92 | -> TableName
93 | -- ^ DB table which holds your jobs. Ref: 'uicfgTableName'
94 | -> Pool Connection
95 | -- ^ DB connection-pool to be used by web UI. Ref: 'uicfgDbPool'
96 | -> (UIConfig -> UIConfig)
97 | -- ^ A function that allows you to modify the \"interim UI config\".
98 | -- The \"interim config\" will cotain a bunch of in-built default
99 | -- config params, along with the config params that you\'ve just
100 | -- provided (i.e. logging function, table name, DB pool, etc). You
101 | -- can use this function to override values in the \"interim UI
102 | -- config\". If you do not wish to modify the \"interim UI config\"
103 | -- just pass 'Prelude.id' as an argument to this parameter. __Note:__
104 | -- it is strongly recommended that you __do not__ modify the
105 | -- generated 'Config' outside of this function, unless you know what
106 | -- you're doing.
107 | -> UIConfig
108 | -- ^ The final 'UIConfig' that needs to be passed to
109 | -- 'OddJobs.Endpoint.mkEnv' and 'OddJobs.Cli.defaultWebUI'
110 | mkUIConfig logger tname dbpool configOverridesFn =
111 | let cfg = configOverridesFn $ UIConfig
112 | { uicfgLogger = logger
113 | , uicfgDbPool = dbpool
114 | , uicfgTableName = tname
115 | , uicfgJobType = defaultJobType
116 | , uicfgJobToHtml = defaultJobsToHtml (uicfgJobType cfg)
117 | , uicfgAllJobTypes = defaultDynamicJobTypes (uicfgTableName cfg) (uicfgJobTypeSql cfg)
118 | , uicfgJobTypeSql = defaultJobTypeSql
119 | }
120 | in cfg
121 |
122 |
123 |
124 |
125 | -- | If you aren't interested in structured logging, you can use this function
126 | -- to emit plain-text logs (or define your own).
127 | defaultLogStr :: (Job -> Text)
128 | -> LogLevel
129 | -> LogEvent
130 | -> LogStr
131 | defaultLogStr jobTypeFn logLevel logEvent =
132 | toLogStr (show logLevel) <> " | " <> str
133 | where
134 | jobToLogStr job@Job{jobId} =
135 | "JobId=" <> toLogStr (show jobId) <> " JobType=" <> toLogStr (jobTypeFn job)
136 |
137 | str = case logEvent of
138 | LogJobStart j ->
139 | "Started | " <> jobToLogStr j
140 | LogJobFailed j e fm t ->
141 | let tag = case fm of
142 | FailWithRetry -> "Failed (retry)"
143 | FailPermanent -> "Failed (permanent)"
144 | in tag <> " | " <> jobToLogStr j <> " | runtime=" <> toLogStr (show t) <> " | error=" <> toLogStr (show e)
145 | LogJobSuccess j t ->
146 | "Success | " <> jobToLogStr j <> " | runtime=" <> toLogStr (show t)
147 | LogJobTimeout j@Job{jobLockedAt, jobLockedBy} ->
148 | "Timeout | " <> jobToLogStr j <> " | lockedBy=" <> toLogStr (maybe "unknown" unJobRunnerName jobLockedBy) <>
149 | " lockedAt=" <> toLogStr (maybe "unknown" show jobLockedAt)
150 | LogKillJobSuccess j ->
151 | "Kill Job Success | " <> jobToLogStr j
152 | LogKillJobFailed j ->
153 | "Kill Job Failed | " <> jobToLogStr j <> "(the job might have completed or timed out)"
154 | LogPoll ->
155 | "Polling jobs table"
156 | LogDeletionPoll n ->
157 | "Job deletion polled and deleted " <> toLogStr n <> " jobs"
158 | LogWebUIRequest ->
159 | "WebUIRequest (TODO: Log the actual request)"
160 | LogText t ->
161 | toLogStr t
162 |
163 | defaultJobsToHtml :: (Job -> Text)
164 | -> [Job]
165 | -> IO [Html ()]
166 | defaultJobsToHtml jobType js = pure $ DL.map (defaultJobToHtml jobType) js
167 |
168 |
169 | defaultJobToHtml :: (Job -> Text)
170 | -> Job
171 | -> Html ()
172 | defaultJobToHtml jobType j =
173 | div_ [ class_ "job" ] $ do
174 | div_ [ class_ "job-type" ] $ do
175 | toHtml $ jobType j
176 | div_ [ class_ "job-payload" ] $ do
177 | defaultPayloadToHtml $ defaultJobContent $ jobPayload j
178 | case jobLastError j of
179 | Nothing -> mempty
180 | Just e -> do
181 | div_ [ class_ "job-error collapsed" ] $ do
182 | a_ [ href_ "javascript: void(0);", onclick_ "toggleError(this)" ] $ do
183 | span_ [ class_ "badge badge-secondary error-expand" ] "+ Last error"
184 | span_ [ class_ "badge badge-secondary error-collapse d-none" ] "- Last error"
185 | " "
186 | defaultErrorToHtml e
187 |
188 |
189 | defaultErrorToHtml :: Value -> Html ()
190 | defaultErrorToHtml e =
191 | case e of
192 | Aeson.String s -> handleLineBreaks s
193 | Aeson.Bool b -> toHtml $ show b
194 | Aeson.Number n -> toHtml $ show n
195 | Aeson.Null -> toHtml ("(null)" :: Text)
196 | Aeson.Object o -> toHtml $ show o -- TODO: handle this properly
197 | Aeson.Array a -> toHtml $ show a -- TODO: handle this properly
198 | where
199 | handleLineBreaks s = do
200 | forM_ (T.splitOn "\n" s) $ \x -> do
201 | toHtml x
202 | br_ []
203 |
204 | defaultJobContent :: Value -> Value
205 | defaultJobContent v = case v of
206 | Aeson.Object o -> fromMaybe v (HM.lookup "contents" o)
207 | _ -> v
208 |
209 | defaultPayloadToHtml :: Value -> Html ()
210 | defaultPayloadToHtml v = case v of
211 | Aeson.Object o -> do
212 | toHtml ("{ " :: Text)
213 | forM_ (HM.toList o) $ \(k, v2) -> do
214 | span_ [ class_ " key-value-pair " ] $ do
215 | span_ [ class_ "key" ] $ toHtml $ keyToText k <> ":"
216 | span_ [ class_ "value" ] $ defaultPayloadToHtml v2
217 | toHtml (" }" :: Text)
218 | Aeson.Array a -> do
219 | toHtml ("[" :: Text)
220 | forM_ (toList a) $ \x -> do
221 | defaultPayloadToHtml x
222 | toHtml (", " :: Text)
223 | toHtml ("]" :: Text)
224 | Aeson.String t -> toHtml t
225 | Aeson.Number n -> toHtml $ show n
226 | Aeson.Bool b -> toHtml $ show b
227 | Aeson.Null -> toHtml ("null" :: Text)
228 |
229 | defaultJobTypeSql :: PGS.Query
230 | defaultJobTypeSql = "payload->>'tag'"
231 |
232 | defaultConstantJobTypes :: forall a . (Generic a, ConNames (Rep a))
233 | => Proxy a
234 | -> AllJobTypes
235 | defaultConstantJobTypes _ =
236 | AJTFixed $ DL.map toS $ conNames (undefined :: a)
237 |
238 | defaultDynamicJobTypes :: TableName
239 | -> PGS.Query
240 | -> AllJobTypes
241 | defaultDynamicJobTypes tname jobTypeSql = AJTSql $ \conn -> do
242 | DL.map (fromMaybe "(unknown)" . fromOnly) <$> PGS.query conn ("select distinct(" <> jobTypeSql <> ") from ? order by 1 nulls last") (Only tname)
243 |
244 | -- | This makes __two important assumptions__. First, this /assumes/ that jobs
245 | -- in your app are represented by a sum-type. For example:
246 | --
247 | -- @
248 | -- data MyJob = SendWelcomeEmail Int
249 | -- | SendPasswordResetEmail Text
250 | -- | SetupSampleData Int
251 | -- @
252 | --
253 | -- Second, it /assumes/ that the JSON representatin of this sum-type is
254 | -- "tagged". For example, the following...
255 | --
256 | -- > let pload = SendWelcomeEmail 10
257 | --
258 | -- ...when converted to JSON, would look like...
259 | --
260 | -- > {"tag":"SendWelcomeEmail", "contents":10}
261 | --
262 | -- It uses this assumption to extract the "job type" from a 'Data.Aeson.Value'
263 | -- (which would be @SendWelcomeEmail@ in the example given above). This is used
264 | -- in logging and the admin UI.
265 | --
266 | -- Even if tihs assumption is violated, the job-runner /should/ continue to
267 | -- function. It's just that you won't get very useful log messages.
268 | --
269 | -- __Note:__ If your job payload does not conform to the structure described
270 | -- above, please read the section on [customising the job payload's
271 | -- structure](https://www.haskelltutorials.com/odd-jobs/guide.html#custom-payload-structure)
272 | -- in the implementation guide.
273 | defaultJobType :: Job -> Text
274 | defaultJobType Job{jobPayload} =
275 | case jobPayload of
276 | Aeson.Object hm -> case HM.lookup "tag" hm of
277 | Just (Aeson.String t) -> t
278 | _ -> "unknown"
279 | _ -> "unknown"
280 |
281 |
282 | -- | As the name says. Ref: 'cfgPollingInterval'
283 | defaultPollingInterval :: Seconds
284 | defaultPollingInterval = Seconds 5
285 |
286 | -- | Convenience function to create a DB connection-pool with some sensible
287 | -- defaults. Please see the source-code of this function to understand what it's
288 | -- doing.
289 | withConnectionPool :: (MonadUnliftIO m)
290 | => Either BS.ByteString PGS.ConnectInfo
291 | -> (Pool PGS.Connection -> m a)
292 | -> m a
293 | withConnectionPool connConfig action = withRunInIO $ \runInIO -> do
294 | bracket poolCreator destroyAllResources (runInIO . action)
295 | where
296 | poolCreator = liftIO $
297 | case connConfig of
298 | Left connString ->
299 | newPool $ defaultPoolConfig (PGS.connectPostgreSQL connString) PGS.close (fromIntegral $ 2 * unSeconds defaultPollingInterval) 8
300 | Right connInfo ->
301 | newPool $ defaultPoolConfig (PGS.connect connInfo) PGS.close (fromIntegral $ 2 * unSeconds defaultPollingInterval) 8
302 |
303 | -- | A convenience function to help you define a timed-logger with some sensible
304 | -- defaults.
305 | defaultTimedLogger :: FLogger.TimedFastLogger
306 | -> (LogLevel -> LogEvent -> LogStr)
307 | -> LogLevel
308 | -> LogEvent
309 | -> IO ()
310 | defaultTimedLogger logger logStrFn logLevel logEvent =
311 | if logLevel == LevelDebug
312 | then pure ()
313 | else logger $ \t -> toLogStr t <> " | " <>
314 | logStrFn logLevel logEvent <>
315 | "\n"
316 |
317 |
318 | defaultJsonLogEvent :: LogEvent -> Aeson.Value
319 | defaultJsonLogEvent logEvent =
320 | case logEvent of
321 | LogJobStart job ->
322 | Aeson.object [ "tag" Aeson..= ("LogJobStart" :: Text)
323 | , "contents" Aeson..= defaultJsonJob job ]
324 | LogJobSuccess job runTime ->
325 | Aeson.object [ "tag" Aeson..= ("LogJobSuccess" :: Text)
326 | , "contents" Aeson..= (defaultJsonJob job, runTime) ]
327 | LogJobFailed job e fm runTime ->
328 | Aeson.object [ "tag" Aeson..= ("LogJobFailed" :: Text)
329 | , "contents" Aeson..= (defaultJsonJob job, show e, defaultJsonFailureMode fm, runTime) ]
330 | LogJobTimeout job ->
331 | Aeson.object [ "tag" Aeson..= ("LogJobTimeout" :: Text)
332 | , "contents" Aeson..= defaultJsonJob job ]
333 | LogKillJobSuccess job ->
334 | Aeson.object [ "tag" Aeson..= ("LogKillJobSuccess" :: Text)
335 | , "contents" Aeson..= defaultJsonJob job ]
336 | LogKillJobFailed job ->
337 | Aeson.object [ "tag" Aeson..= ("LogKillJobFailed" :: Text)
338 | , "contents" Aeson..= defaultJsonJob job ]
339 | LogPoll ->
340 | Aeson.object [ "tag" Aeson..= ("LogJobPoll" :: Text)]
341 | LogDeletionPoll n ->
342 | Aeson.object [ "tag" Aeson..= ("LogDeletionPoll" :: Text), "contents" Aeson..= n ]
343 | LogWebUIRequest ->
344 | Aeson.object [ "tag" Aeson..= ("LogWebUIRequest" :: Text)]
345 | LogText t ->
346 | Aeson.object [ "tag" Aeson..= ("LogText" :: Text)
347 | , "contents" Aeson..= t ]
348 |
349 | defaultJsonJob :: Job -> Aeson.Value
350 | defaultJsonJob = genericToJSON Aeson.defaultOptions
351 |
352 | defaultJsonFailureMode :: FailureMode -> Aeson.Value
353 | defaultJsonFailureMode = genericToJSON Aeson.defaultOptions
354 |
355 | defaultImmediateJobDeletion :: Job -> IO Bool
356 | defaultImmediateJobDeletion Job{jobStatus} =
357 | if jobStatus == OddJobs.Types.Success
358 | then pure True
359 | else pure False
360 |
361 | -- | Use this function to get a sensible default implementation for the 'cfgDelayedJobDeletion'.
362 | -- You would typically use it as such:
363 | --
364 | -- @
365 | -- let tname = TableName "jobs"
366 | -- loggingFn _ _ = _todo
367 | -- dbPool = _todo
368 | -- myJobRunner = _todo
369 | -- cfg = mkConfig loggingFn tname (MaxConcurrentJobs 10) dbPool jobRunner $ \x ->
370 | -- x { cfgDelayedJobDeletion = Just (defaultDelayedJobDeletion tname "7 days") }
371 | -- @
372 | defaultDelayedJobDeletion ::
373 | TableName ->
374 | -- ^ DB table which holds your jobs. Ref: 'cfgTableName'
375 | String ->
376 | -- ^ Time interval after which successful, failed, and cancelled jobs
377 | -- should be deleted from the table. __NOTE:__ This needs to be expressed
378 | -- as an actual PostgreSQL interval, such as @"7 days"@ or @"12 hours"@
379 | PGS.Connection ->
380 | -- ^ the postgres connection that will be provided to this function,
381 | -- to be able to execute the @DELETE@ statement.
382 | IO Int64
383 | -- ^ number of rows\/jobs deleted
384 | defaultDelayedJobDeletion tname d conn =
385 | PGS.execute conn qry (tname, PGS.In statusList, d)
386 | where
387 | -- this function has been deliberately written like this to ensure that
388 | -- whenever a new Status is added/removed one is forced to update this
389 | -- list and decide what is to be done about the new Status
390 | statusList = flip DL.filter ([minBound..maxBound] :: [OddJobs.Types.Status]) $ \case
391 | Success -> True
392 | Queued -> False
393 | Failed -> True
394 | Cancelled -> True
395 | Retry -> False
396 | Locked -> False
397 | qry = "DELETE FROM ? WHERE status in ? AND run_at < current_timestamp - ? :: interval"
--------------------------------------------------------------------------------
/src/OddJobs/Types.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE ExistentialQuantification #-}
2 | {-# LANGUAGE FlexibleInstances #-}
3 | {-# LANGUAGE FlexibleContexts #-}
4 | {-# LANGUAGE DeriveGeneric #-}
5 | {-# LANGUAGE RankNTypes #-}
6 |
7 | module OddJobs.Types where
8 |
9 | import Database.PostgreSQL.Simple as PGS
10 | import Database.PostgreSQL.Simple.Types as PGS
11 | import UnliftIO (MonadIO)
12 | import UnliftIO.Concurrent (threadDelay)
13 | import Data.Text.Conversions
14 | import Database.PostgreSQL.Simple.FromField as FromField
15 | import Database.PostgreSQL.Simple.ToField as ToField
16 | import Database.PostgreSQL.Simple.FromRow as FromRow
17 | import Data.Time
18 | import UnliftIO.Exception
19 | import Data.Text (Text)
20 | import GHC.Generics
21 | import Data.Aeson as Aeson hiding (Success)
22 | import Data.String.Conv
23 | import Lucid (Html)
24 | import Data.Pool (Pool)
25 | import Control.Monad.Logger (LogLevel)
26 | import Data.Int (Int64)
27 |
28 | -- | An alias for 'QualifiedIdentifier' type. It is used for the job table name.
29 | -- Since this type has an instance of 'IsString',
30 | -- you do not need to do anything special to create a value for this type. Just
31 | -- ensure you have the @OverloadedStrings@ extention enabled. For example:
32 | --
33 | -- @
34 | -- {-\# LANGUAGE OverloadedStrings \#-}
35 | --
36 | -- myJobsTable :: TableName
37 | -- myJobsTable = "my_jobs"
38 | -- @
39 | --
40 | -- This should also work for table names qualified by the schema name. For example:
41 | --
42 | -- @
43 | -- {-\# LANGUAGE OverloadedStrings \#-}
44 | --
45 | -- myJobsTable :: TableName
46 | -- myJobsTable = "odd_jobs.jobs"
47 | -- @
48 |
49 | type TableName = PGS.QualifiedIdentifier
50 |
51 | pgEventName :: TableName -> PGS.Identifier
52 | pgEventName (PGS.QualifiedIdentifier Nothing tname) = PGS.Identifier $ "jobs_created_" <> tname
53 | pgEventName (PGS.QualifiedIdentifier (Just schema) tname) = PGS.Identifier $ "jobs_created_" <> schema <> "_" <> tname
54 |
55 | newtype Seconds = Seconds { unSeconds :: Int } deriving (Eq, Show, Ord, Num, Read)
56 |
57 | -- | Convenience wrapper on-top of 'threadDelay' which takes 'Seconds' as an
58 | -- argument, instead of micro-seconds.
59 | delaySeconds :: (MonadIO m) => Seconds -> m ()
60 | delaySeconds (Seconds s) = threadDelay $ oneSec * s
61 |
62 | oneSec :: Int
63 | oneSec = 1000000
64 |
65 |
66 | data LogEvent
67 | -- | Emitted when a job starts execution
68 | = LogJobStart !Job
69 | -- | Emitted when a job succeeds along with the time taken for execution.
70 | | LogJobSuccess !Job !NominalDiffTime
71 | -- | Emitted when a job fails (but will be retried) along with the time taken for
72 | -- /this/ attempt
73 | | LogJobFailed !Job !SomeException !FailureMode !NominalDiffTime
74 | -- | Emitted when a job times out and is picked-up again for execution
75 | | LogJobTimeout !Job
76 | -- | Emitted when user kills a job and the job thread sucessfully cancelled thereafter.
77 | | LogKillJobSuccess !Job
78 | -- | Emitted when user kills a job and the job thread is not found in the threadRef
79 | -- | (most likely the job has either got completed or timed out).
80 | | LogKillJobFailed !Job
81 | -- | Emitted whenever 'OddJobs.Job.jobPoller' polls the DB table
82 | | LogPoll
83 | -- | Emitted whenever 'OddJobs.Job.jobPoller' polls the DB table
84 | | LogDeletionPoll !Int64
85 | -- | TODO
86 | | LogWebUIRequest
87 | -- | Emitted whenever any other event occurs
88 | | LogText !Text
89 | deriving (Show, Generic)
90 |
91 | -- | Used by 'JobErrHandler' and 'LogEvent' to indicate the nature of failure.
92 | data FailureMode
93 | -- The job failed, but will be retried in the future.
94 | = FailWithRetry
95 | -- | The job failed and will no longer be retried (probably because it has
96 | -- been tried 'cfgDefaultMaxAttempts' times already).
97 | | FailPermanent deriving (Eq, Show, Generic)
98 |
99 | -- | Exception handler for jobs. This is conceptually very similar to how
100 | -- 'Control.Exception.Handler' and 'Control.Exception.catches' (from
101 | -- 'Control.Exception') work in-tandem. Using 'cfgOnJobFailed' you can install
102 | -- /multiple/ exception handlers, where each handler is responsible for one type
103 | -- of exception. OddJobs will execute the correct exception handler on the basis
104 | -- of the type of runtime exception raised. For example:
105 | --
106 | -- @
107 | -- cfgOnJobFailed =
108 | -- [ JobErrHandler $ \(e :: HttpException) job failMode -> ...
109 | -- , JobErrHandler $ \(e :: SqlException) job failMode -> ...
110 | -- , JobErrHandler $ \(e :: ) job failMode -> ...
111 | -- ]
112 | -- @
113 | --
114 | -- __Note:__ Please refer to the section on [alerts and
115 | -- notifications](https://www.haskelltutorials.com/odd-jobs/guide.html#alerts)
116 | -- in the implementation guide to understand how to use the machinery provided
117 | -- by 'JobErrHandler' and 'cfgOnJobFailed'.
118 | data JobErrHandler = forall a e . (Exception e) => JobErrHandler (e -> Job -> FailureMode -> IO a)
119 |
120 | type FunctionName = PGS.Identifier
121 |
122 | data ResourceCfg = ResourceCfg
123 | { resCfgResourceTable :: TableName
124 | -- ^ Table to use for tracking resources and their limits. Both this and
125 | -- 'resCfgUsageTable' should be created by 'OddJobs.Migrations.createResourceTables'.
126 |
127 | , resCfgUsageTable :: TableName
128 | -- ^ Table to use for tracking how jobs use resources.
129 |
130 | , resCfgCheckResourceFunction :: FunctionName
131 | -- ^ Name of the function that checks that the resources required to run a
132 | -- job are all available (i.e. that the total usage of each resource, plus
133 | -- the usage the job needs to run, does not exceed the resource limit). The
134 | -- function should have the signature @(int) RETURNS bool@, and return @TRUE@
135 | -- if the job with the given ID has its resources available.
136 |
137 | , resCfgDefaultLimit :: Int
138 | -- ^ When a job requires a resource not already in 'resCfgResourceTable',
139 | -- what should its limit be set to?
140 | } deriving (Show)
141 |
142 | newtype ResourceId = ResourceId { rawResourceId :: Text }
143 | deriving (Show)
144 |
145 | -- | __Note:__ Please read the section on [controlling
146 | -- concurrency](https://www.haskelltutorials.com/odd-jobs/guide.html#controlling-concurrency)
147 | -- in the implementation guide to understand the implications of each option
148 | -- given by the data-type.
149 | data ConcurrencyControl
150 | -- | The maximum number of concurrent jobs that /this instance/ of the
151 | -- job-runner can execute.
152 | = MaxConcurrentJobs Int
153 | -- | __Not recommended:__ Please do not use this in production unless you know
154 | -- what you're doing. No machine can support unlimited concurrency. If your
155 | -- jobs are doing anything worthwhile, running a sufficiently large number
156 | -- concurrently is going to max-out /some/ resource of the underlying machine,
157 | -- such as, CPU, memory, disk IOPS, or network bandwidth.
158 | | UnlimitedConcurrentJobs
159 |
160 | -- | Limit jobs according to their access to resources, as tracked in the DB
161 | -- according to the 'ResourceCfg'.
162 | --
163 | -- __Warning:__ without sufficient limits, this can easily hit the same problems
164 | -- as 'UnlimitedConcurrentJobs' where jobs are able to exhaust system resources.
165 | -- It is therefore recommended that all jobs in your system use /some/ DB-tracked
166 | -- resource.
167 | | ResourceLimits ResourceCfg
168 |
169 | -- | Use this to dynamically determine if the next job should be picked-up, or
170 | -- not. This is useful to write custom-logic to determine whether a limited
171 | -- resource is below a certain usage threshold (eg. CPU usage is below 80%).
172 | -- __Caveat:__ This feature has not been tested in production, yet.
173 | | DynamicConcurrency (IO Bool)
174 |
175 | instance Show ConcurrencyControl where
176 | show cc = case cc of
177 | MaxConcurrentJobs n -> "MaxConcurrentJobs " <> show n
178 | UnlimitedConcurrentJobs -> "UnlimitedConcurrentJobs"
179 | ResourceLimits cfg -> "ResourceLimits " <> show cfg
180 | DynamicConcurrency _ -> "DynamicConcurrency (IO Bool)"
181 |
182 | type JobId = Int
183 |
184 | data Status
185 | -- | In the current version of odd-jobs you /should not/ find any jobs having
186 | -- the 'Success' status, because successful jobs are immediately deleted.
187 | -- However, in the future, we may keep them around for a certain time-period
188 | -- before removing them from the jobs table.
189 | = Success
190 | -- | Jobs in 'Queued' status /may/ be picked up by the job-runner on the basis
191 | -- of the 'jobRunAt' field.
192 | | Queued
193 | -- | Jobs in 'Failed' status will not be retried by the job-runner.
194 | | Failed
195 | -- | Jobs with 'Cancelled' status are cancelled by the user and will not be
196 | -- retried by the job-runner
197 | | Cancelled
198 | -- | Jobs in 'Retry' status will be retried by the job-runner on the basis of
199 | -- the 'jobRunAt' field.
200 | | Retry
201 | -- | Jobs in 'Locked' status are currently being executed by a job-runner,
202 | -- which is identified by the 'jobLockedBy' field. The start of job-execution
203 | -- is indicated by the 'jobLocketAt' field.
204 | | Locked
205 | deriving (Eq, Show, Generic, Enum, Bounded)
206 |
207 | instance Ord Status where
208 | compare x y = compare (toText x) (toText y)
209 |
210 | instance ToJSON Status where
211 | toJSON s = toJSON $ toText s
212 |
213 | instance FromJSON Status where
214 | parseJSON = withText "Expecting text to convert into Job.Status" $ \t -> do
215 | case (fromText t :: Either String Status) of
216 | Left e -> fail e
217 | Right r -> pure r
218 |
219 |
220 | newtype JobRunnerName = JobRunnerName { unJobRunnerName :: Text } deriving (Eq, Show, FromField, ToField, Generic, ToJSON, FromJSON)
221 |
222 | data Job = Job
223 | { jobId :: JobId
224 | , jobCreatedAt :: UTCTime
225 | , jobUpdatedAt :: UTCTime
226 | , jobRunAt :: UTCTime
227 | , jobStatus :: Status
228 | , jobPayload :: Aeson.Value
229 | , jobLastError :: Maybe Value
230 | , jobAttempts :: Int
231 | , jobLockedAt :: Maybe UTCTime
232 | , jobLockedBy :: Maybe JobRunnerName
233 | } deriving (Eq, Show, Generic)
234 |
235 | instance ToText Status where
236 | toText s = case s of
237 | Success -> "success"
238 | Queued -> "queued"
239 | Retry -> "retry"
240 | Failed -> "failed"
241 | Cancelled -> "cancelled"
242 | Locked -> "locked"
243 |
244 | instance (StringConv Text a) => FromText (Either a Status) where
245 | fromText t = case t of
246 | "success" -> Right Success
247 | "queued" -> Right Queued
248 | "failed" -> Right Failed
249 | "cancelled" -> Right Cancelled
250 | "retry" -> Right Retry
251 | "locked" -> Right Locked
252 | x -> Left $ toS $ "Unknown job status: " <> x
253 |
254 | instance FromField Status where
255 | fromField f mBS = fromField f mBS >>= (\case
256 | Left e -> FromField.returnError PGS.ConversionFailed f e
257 | Right s -> pure s)
258 | . fromText
259 |
260 | instance ToField Status where
261 | toField s = toField $ toText s
262 |
263 | instance FromRow Job where
264 | fromRow = Job
265 | <$> field -- jobId
266 | <*> field -- createdAt
267 | <*> field -- updatedAt
268 | <*> field -- runAt
269 | <*> field -- status
270 | <*> field -- payload
271 | <*> field -- lastError
272 | <*> field -- attempts
273 | <*> field -- lockedAt
274 | <*> field -- lockedBy
275 |
276 | -- TODO: Add a sum-type for return status which can signal the monitor about
277 | -- whether the job needs to be retried, marked successfull, or whether it has
278 | -- completed failed.
279 | type JobRunner = Job -> IO ()
280 |
281 | -- | The web\/admin UI needs to know a \"master list\" of all job-types to be
282 | -- able to power the \"filter by job-type\" feature. This data-type helps in
283 | -- letting odd-jobs know /how/ to get such a master-list. The function specified
284 | -- by this type is run once when the job-runner starts (and stored in an
285 | -- internal @IORef@). After that the list of job-types needs to be updated
286 | -- manually by pressing the appropriate \"refresh\" link in the admin\/web UI.
287 | data AllJobTypes
288 | -- | A fixed-list of job-types. If you don't want to increase boilerplate,
289 | -- consider using 'OddJobs.ConfigBuilder.defaultConstantJobTypes' which will
290 | -- automatically generate the list of available job-types based on a sum-type
291 | -- that represents your job payload.
292 | = AJTFixed [Text]
293 | -- | Construct the list of job-types dynamically by looking at the actual
294 | -- payloads in 'cfgTableName' (using an SQL query).
295 | | AJTSql (Connection -> IO [Text])
296 | -- | A custom 'IO' action for fetching the list of job-types.
297 | | AJTCustom (IO [Text])
298 |
299 | -- | While odd-jobs is highly configurable and the 'Config' data-type might seem
300 | -- daunting at first, it is not necessary to tweak every single configuration
301 | -- parameter by hand.
302 | --
303 | -- __Recommendation:__ Please start-off by building a 'Config' by using the
304 | -- 'OddJobs.ConfigBuilder.mkConfig' function (to get something with sensible
305 | -- defaults) and then tweaking config parameters on a case-by-case basis.
306 | data Config = Config
307 | { -- | The DB table which holds your jobs. Please note, this should have been
308 | -- created by the 'OddJobs.Migrations.createJobTable' function.
309 | cfgTableName :: TableName
310 |
311 | -- | The actualy "job-runner" that __you__ need to provide. If this function
312 | -- throws a runtime exception, the job will be retried
313 | -- 'cfgDefaultMaxAttempts' times. Please look at the examples/tutorials if
314 | -- your applicaton's code is not in the @IO@ monad.
315 | , cfgJobRunner :: Job -> IO ()
316 |
317 | -- | The number of times a failing job is retried before it is considered is
318 | -- "permanently failed" and ignored by the job-runner. This config parameter
319 | -- is called "/default/ max attempts" because, in the future, it would be
320 | -- possible to specify the number of retry-attemps on a per-job basis
321 | -- (__Note:__ per-job retry-attempts has not been implemented yet)
322 | , cfgDefaultMaxAttempts :: Int
323 |
324 | -- | Controls how many jobs can be run concurrently by /this instance/ of
325 | -- the job-runner. __Please note,__ this is NOT the global concurrency of
326 | -- entire job-queue. It is possible to have job-runners running on multiple
327 | -- machines, and each will apply the concurrency control independnt of other
328 | -- job-runners. __Ref:__ Section on [controllng
329 | -- concurrency](https://www.haskelltutorials.com/odd-jobs/guide.html#controlling-concurrency)
330 | -- in the implementtion guide.
331 | , cfgConcurrencyControl :: ConcurrencyControl
332 |
333 | -- | The DB connection-pool to use for the job-runner. __Note:__ in case
334 | -- your jobs require a DB connection, please create a separate
335 | -- connection-pool for them. This pool will be used ONLY for monitoring jobs
336 | -- and changing their status. We need to have __at least 4 connections__ in
337 | -- this connection-pool for the job-runner to work as expected.
338 | , cfgDbPool :: Pool Connection
339 |
340 | -- | How frequently should the 'jobPoller' check for jobs where the Job's
341 | -- 'jobRunAt' field indicates that it's time for the job to be executed.
342 | -- __Ref:__ Please read the section on [how Odd Jobs works
343 | -- (architecture)](https://www.haskelltutorials.com/odd-jobs/guide.html#architecture)
344 | -- to find out more.
345 | , cfgPollingInterval :: Seconds
346 |
347 | -- | User-defined callback function that is called whenever a job succeeds.
348 | , cfgOnJobSuccess :: Job -> IO ()
349 |
350 | -- | User-defined error-handler that is called whenever a job fails (indicated
351 | -- by 'cfgJobRunner' throwing an unhandled runtime exception). Please refer to
352 | -- 'JobErrHandler' for documentation on how to use this.
353 | , cfgOnJobFailed :: [JobErrHandler]
354 |
355 | -- | User-defined callback function that is called whenever a job starts
356 | -- execution.
357 | , cfgOnJobStart :: Job -> IO ()
358 |
359 | -- | User-defined callback function that is called whenever a job times-out.
360 | -- Also check 'cfgDefaultJobTimeout'
361 | , cfgOnJobTimeout :: Job -> IO ()
362 |
363 | -- | File to store the PID of the job-runner process. This is used only when
364 | -- invoking the job-runner as an independent background deemon (the usual mode
365 | -- of deployment).
366 | -- , cfgPidFile :: Maybe FilePath
367 |
368 | -- | A "structured logging" function that __you__ need to provide. The
369 | -- @odd-jobs@ library does NOT use the standard logging interface provided by
370 | -- 'monad-logger' on purpose. Also look at 'cfgJobType' and 'defaultLogStr'
371 | --
372 | -- __Note:__ Please take a look at the section on [structured
373 | -- logging](https://www.haskelltutorials.com/odd-jobs/guide.html#structured-logging)
374 | -- to find out how to use this to log in JSON.
375 | , cfgLogger :: LogLevel -> LogEvent -> IO ()
376 |
377 | -- | How to extract the "job type" from a 'Job'. If you are overriding this,
378 | -- please consider overriding 'cfgJobTypeSql' as well. Related:
379 | -- 'OddJobs.ConfigBuilder.defaultJobType'
380 | , cfgJobType :: Job -> Text
381 |
382 | -- | How long can a job run after which it is considered to be "crashed" and
383 | -- picked up for execution again
384 | , cfgDefaultJobTimeout :: Seconds
385 |
386 | -- | After a job attempt, should it be immediately deleted to save table space? The default
387 | -- behaviour, as defined by 'OddJobs.ConfigBuilder.defaultImmediateJobDeletion' is to delete
388 | -- successful jobs immediately (and retain everything else). If you are providing your
389 | -- own implementation here, __be careful__ to check for the job's status before deciding
390 | -- whether to delete it, or not.
391 | --
392 | -- A /possible/ use-case for non-successful jobs could be check the 'jobResult' for a failed job
393 | -- and depending up on the 'jobResult' decide if there is no use retrying it, and if it should be
394 | -- immediately deleted.
395 | , cfgImmediateJobDeletion :: Job -> IO Bool
396 |
397 | -- | A funciton which will be run every 'cfgPollingInterval' seconds to delete
398 | -- old jobs that may be hanging around in the @jobs@ table (eg. failed jobs, cancelled jobs, or even
399 | -- successful jobs whose deletion has been delayed via a custom 'cfgImmediateJobDeletion' function).
400 | --
401 | -- Ref: 'OddJobs.ConfigBuilder.defaultDelayedJobDeletionSql'
402 | , cfgDelayedJobDeletion :: Maybe (PGS.Connection -> IO Int64)
403 |
404 | -- | How far into the future should jobs which can be retried be queued for?
405 | --
406 | -- The 'Int' argument is the number of times the job has been attepted. It will
407 | -- always be at least 1, since the job will have to have started at least once
408 | -- in order to fail and be retried. The default implementation is an exponential
409 | -- backoff of @'Seconds' $ 2 ^ 'jobAttempts'@.
410 | , cfgDefaultRetryBackoff :: Int -> IO Seconds
411 | }
412 |
413 |
414 | data UIConfig = UIConfig
415 | { -- | The DB table which holds your jobs. Please note, this should have been
416 | -- created by the 'OddJobs.Migrations.createJobTable' function.
417 | uicfgTableName :: TableName
418 |
419 | -- | The DB connection-pool to use for the web UI. __Note:__ the same DB
420 | -- pool used by your job-runner can be passed here if it has a sufficient
421 | -- number of connections to satisfy both use-scases. Else create a separate
422 | -- DB pool to be used only by the web UI (this DB pool can have just 1-3
423 | -- connection, because __typically__ the web UI doesn't serve too many
424 | -- concurrent user in most real-life cases)
425 | , uicfgDbPool :: Pool Connection
426 |
427 | -- | How to extract the "job type" from a 'Job'. If you are overriding this,
428 | -- please consider overriding 'cfgJobTypeSql' as well. __Note:__ Usually
429 | -- 'cfgJobType' and 'uicfgJobType' would use the same value. Related:
430 | -- 'OddJobs.ConfigBuilder.defaultJobType'
431 | , uicfgJobType :: Job -> Text
432 |
433 | -- | How to extract the \"job type\" directly in SQL. There are many places,
434 | -- especially in the web\/admin UI, where we need to know a job's type
435 | -- directly in SQL (because transferrring the entire @payload@ column to
436 | -- Haskell, and then parsing it into JSON, and then applying the
437 | -- 'cfgJobType' function on it would be too inefficient). Ref:
438 | -- 'OddJobs.ConfigBuilder.defaultJobTypeSql' and 'uicfgJobType'
439 | , uicfgJobTypeSql :: PGS.Query
440 |
441 | -- | How to convert a list of 'Job's to a list of HTML fragments. This is
442 | -- used in the Web\/Admin UI. This function accepts a /list/ of jobs and
443 | -- returns a /list/ of 'Html' fragments, because, in case, you need to query
444 | -- another table to fetch some metadata (eg. convert a primary-key to a
445 | -- human-readable name), you can do it efficiently instead of resulting in
446 | -- an N+1 SQL bug. Ref: 'defaultJobToHtml'
447 | , uicfgJobToHtml :: [Job] -> IO [Html ()]
448 |
449 | -- | How to get a list of all known job-types? This is used by the
450 | -- Web\/Admin UI to power the \"filter by job-type\" functionality. The
451 | -- default value for this is 'OddJobs.ConfigBuilder.defaultDynamicJobTypes'
452 | -- which does a @SELECT DISTINCT payload ->> ...@ to get a list of job-types
453 | -- directly from the DB.
454 | , uicfgAllJobTypes :: AllJobTypes
455 |
456 | -- | A "structured logging" function that __you__ need to provide. The
457 | -- @odd-jobs@ library does NOT use the standard logging interface provided by
458 | -- 'monad-logger' on purpose. Also look at 'cfgJobType' and 'defaultLogStr'
459 | --
460 | -- __Note:__ Please take a look at the section on [structured
461 | -- logging](https://www.haskelltutorials.com/odd-jobs/guide.html#structured-logging)
462 | -- to find out how to use this to log in JSON.
463 | , uicfgLogger :: LogLevel -> LogEvent -> IO ()
464 | }
465 |
--------------------------------------------------------------------------------
/src/OddJobs/Web.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE DeriveGeneric, NamedFieldPuns, DataKinds, RecordWildCards #-}
2 | {-# LANGUAGE CPP #-}
3 | module OddJobs.Web where
4 |
5 | import OddJobs.Types
6 | import OddJobs.Job as Job
7 | import Data.Time
8 | import Data.Aeson as Aeson
9 | import qualified Data.Text as T
10 | import Data.Text (Text)
11 | import GHC.Generics hiding (from, to)
12 | import Database.PostgreSQL.Simple as PGS
13 | import Database.PostgreSQL.Simple.ToRow as PGS
14 | import Database.PostgreSQL.Simple.ToField as PGS
15 | import Data.Pool as Pool
16 | import UnliftIO
17 | import Data.Maybe
18 | import Data.String (fromString)
19 | import Control.Applicative ((<|>))
20 | import Data.List (nub)
21 | import Servant
22 | import Servant.API.Generic
23 | import Servant.HTML.Lucid
24 | import Lucid
25 | import Lucid.Html5
26 | import Lucid.Base
27 | import Data.String.Conv
28 | import Data.List as DL hiding (filter, and)
29 | import Control.Monad
30 | import Data.Time.Format.Human (humanReadableTime')
31 | import Data.Time.Convenience (timeSince, Unit(..), Direction(..))
32 | import Data.Text.Conversions (fromText, toText)
33 | import Prelude hiding (filter, and)
34 |
35 | # if MIN_VERSION_aeson(2, 0, 0)
36 | import qualified Data.Aeson.KeyMap as HM
37 | # else
38 | import qualified Data.HashMap.Strict as HM
39 | # endif
40 |
41 | data OrderDirection = Asc | Desc deriving (Eq, Show, Generic, Enum)
42 |
43 | data OrderByField = OrdCreatedAt
44 | | OrdUpdatedAt
45 | | OrdLockedAt
46 | | OrdStatus
47 | | OrdJobType
48 | deriving (Eq, Show, Generic, Enum)
49 |
50 | data Filter = Filter
51 | { filterStatuses :: [Status]
52 | , filterCreatedAfter :: Maybe UTCTime
53 | , filterCreatedBefore :: Maybe UTCTime
54 | , filterUpdatedAfter :: Maybe UTCTime
55 | , filterUpdatedBefore :: Maybe UTCTime
56 | , filterJobTypes :: [Text]
57 | , filterOrder :: Maybe (OrderByField, OrderDirection)
58 | , filterPage :: Maybe (Int, Int)
59 | , filterRunAfter :: Maybe UTCTime
60 | , filterJobRunner :: [JobRunnerName]
61 | } deriving (Eq, Show, Generic)
62 |
63 | instance Semigroup Filter where
64 | (<>) a b = Filter
65 | { filterStatuses = nub (filterStatuses b <> filterStatuses a)
66 | , filterCreatedAfter = filterCreatedAfter b <|> filterCreatedAfter a
67 | , filterCreatedBefore = filterCreatedBefore b <|> filterCreatedBefore a
68 | , filterUpdatedAfter = filterUpdatedAfter b <|> filterUpdatedBefore a
69 | , filterUpdatedBefore = filterUpdatedBefore b <|> filterUpdatedBefore a
70 | , filterJobTypes = nub (filterJobTypes b <> filterJobTypes a)
71 | , filterOrder = filterOrder b <|> filterOrder a
72 | , filterPage = filterPage b <|> filterPage a
73 | , filterRunAfter = filterRunAfter b <|> filterRunAfter a
74 | , filterJobRunner = nub (filterJobRunner b <> filterJobRunner a)
75 | }
76 |
77 | instance Monoid Filter where
78 | mempty = blankFilter
79 |
80 | blankFilter :: Filter
81 | blankFilter = Filter
82 | { filterStatuses = []
83 | , filterCreatedAfter = Nothing
84 | , filterCreatedBefore = Nothing
85 | , filterUpdatedAfter = Nothing
86 | , filterUpdatedBefore = Nothing
87 | , filterJobTypes = []
88 | , filterOrder = Nothing
89 | , filterPage = Just (10, 0)
90 | , filterRunAfter = Nothing
91 | , filterJobRunner = []
92 | }
93 |
94 | instance ToJSON OrderDirection
95 | instance FromJSON OrderDirection
96 | instance ToJSON OrderByField
97 | instance FromJSON OrderByField
98 |
99 | instance ToJSON Filter where
100 | toJSON = Aeson.genericToJSON Aeson.defaultOptions{omitNothingFields = True}
101 | instance FromJSON Filter where
102 | parseJSON = Aeson.genericParseJSON Aeson.defaultOptions{omitNothingFields = True}
103 |
104 |
105 | instance FromHttpApiData Filter where
106 | parseQueryParam x = case eitherDecode (toS x) of
107 | Left e -> Left $ toS e
108 | Right r -> Right r
109 |
110 | instance ToHttpApiData Filter where
111 | toQueryParam x = toS $ Aeson.encode x
112 |
113 | -- data Routes route = Routes
114 | -- { rFilterResults :: route :- QueryParam "filters" Filter :> Get '[HTML] (Html ())
115 | -- , rStaticAssets :: route :- "assets" :> Raw
116 | -- , rEnqueue :: route :- "enqueue" :> Capture "jobId" JobId :> Post '[HTML] NoContent
117 | -- , rRunNow :: route :- "run" :> Capture "jobId" JobId :> Post '[HTML] NoContent
118 | -- , rCancel :: route :- "cancel" :> Capture "jobId" JobId :> Post '[HTML] NoContent
119 | -- , rKill :: route :- "kill" :> Capture "jobId" JobId :> Post '[HTML] NoContent
120 | -- , rRefreshJobTypes :: route :- "refresh-job-types" :> Post '[HTML] NoContent
121 | -- , rRefreshJobRunners :: route :- "refresh-job-runners" :> Post '[HTML] NoContent
122 | -- } deriving (Generic)
123 |
124 |
125 | data Routes = Routes
126 | { rFilterResults :: Maybe Filter -> Text
127 | , rEnqueue :: JobId -> Text
128 | , rRunNow :: JobId -> Text
129 | , rCancel :: JobId -> Text
130 | , rKill :: JobId -> Text
131 | , rRefreshJobTypes :: Text
132 | , rRefreshJobRunners :: Text
133 | , rStaticAsset :: Text -> Text
134 | }
135 |
136 |
137 | filterJobsQuery :: UIConfig -> Filter -> (PGS.Query, [Action])
138 | filterJobsQuery UIConfig{uicfgTableName, uicfgJobTypeSql} Filter{..} =
139 | ( "SELECT " <> Job.concatJobDbColumns <> " FROM ?" <> whereClause <> " " <> orderClause (fromMaybe (OrdUpdatedAt, Desc) filterOrder) <> " " <> limitOffsetClause
140 | , toRow (Only uicfgTableName) ++ whereActions
141 | )
142 | where
143 | orderClause (flt, dir) =
144 | let fname = case flt of
145 | OrdCreatedAt -> "created_at"
146 | OrdUpdatedAt -> "updated_at"
147 | OrdLockedAt -> "locked_at"
148 | OrdStatus -> "status"
149 | OrdJobType -> "payload->>'tag'"
150 | dname = case dir of
151 | Asc -> "asc nulls first"
152 | Desc -> "desc nulls last"
153 | in "ORDER BY " <> fname <> " " <> dname <> ", id desc"
154 |
155 | limitOffsetClause :: Query
156 | limitOffsetClause = case filterPage of
157 | Nothing -> mempty
158 | Just (l, o) -> "LIMIT " <> fromString (show l) <> " OFFSET " <> fromString (show o)
159 |
160 | (whereClause, whereActions) =
161 | let finalClause = statusClause `and` createdAfterClause `and`
162 | createdBeforeClause `and` updatedBeforeClause `and`
163 | updatedAfterClause `and` jobTypeClause `and`
164 | runAfterClause `and` jobRunnerClause
165 | in case finalClause of
166 | Nothing -> (mempty, toRow ())
167 | Just (q, as) -> (" WHERE " <> q, as)
168 |
169 | statusClause = if Prelude.null filterStatuses
170 | then Nothing
171 | else Just ("status IN ?", toRow (Only (In filterStatuses)))
172 |
173 | createdAfterClause = Prelude.fmap (\x -> ("created_at >= ?", toRow $ Only x)) filterCreatedAfter
174 | createdBeforeClause = Prelude.fmap (\x -> ("created_at < ?", toRow $ Only x)) filterCreatedBefore
175 | updatedAfterClause = Prelude.fmap (\x -> ("updated_at >= ?", toRow $ Only x)) filterUpdatedAfter
176 | updatedBeforeClause = Prelude.fmap (\x -> ("updated_at < ?", toRow $ Only x)) filterUpdatedBefore
177 | runAfterClause = Prelude.fmap (\x -> ("run_at > ?", toRow $ Only x)) filterRunAfter
178 |
179 | jobTypeClause :: Maybe (Query, [Action])
180 | jobTypeClause = case filterJobTypes of
181 | [] -> Nothing
182 | xs ->
183 | let qFragment = "(" <> uicfgJobTypeSql <> ")=?"
184 | build ys (q, vs) = case ys of
185 | [] -> (q, vs)
186 | [y] -> (qFragment <> q, toField y : vs)
187 | (y:ys_) -> build ys_ (" OR " <> qFragment <> q, toField y : vs)
188 | in Just $ build xs (mempty, [])
189 |
190 | jobRunnerClause :: Maybe (Query, [Action])
191 | jobRunnerClause = case filterJobRunner of
192 | [] -> Nothing
193 | xs -> Just ("locked_by in ?", toRow $ Only $ In xs)
194 |
195 | and :: Maybe (Query, [PGS.Action]) -> Maybe (Query, [PGS.Action]) -> Maybe (Query, [PGS.Action])
196 | and Nothing Nothing = Nothing
197 | and Nothing (Just (q, as)) = Just (q, as)
198 | and (Just (q, as)) Nothing = Just (q, as)
199 | and (Just (qa, as)) (Just (qb, bs)) = Just ("(" <> qa <> ") AND (" <> qb <> ")", as <> bs)
200 |
201 | -- orderClause = _
202 |
203 | filterJobs :: UIConfig -> Connection -> Filter -> IO [Job]
204 | filterJobs cfg conn f = do
205 | let (q, queryArgs) = filterJobsQuery cfg f
206 | PGS.query conn q queryArgs
207 |
208 | countJobs :: UIConfig -> Connection -> Filter -> IO Int
209 | countJobs cfg conn f = do
210 | let (q, queryArgs) = filterJobsQuery cfg f
211 | finalqry = "SELECT count(*) FROM (" <> q <> ") a"
212 | [Only r] <- PGS.query conn finalqry queryArgs
213 | pure r
214 |
215 |
216 | -- f = encode [FTStatuses [Job.Success, Queued], FTJobType "QueuedMail"]
217 |
218 | -- f = blankFilter
219 | -- { filterStatuses = [Job.Success, Queued]
220 | -- , filterJobTypes = ["QueuedMail", "ConfirmBooking"]
221 | -- }
222 |
223 |
224 |
225 | pageNav :: Routes -> Html ()
226 | pageNav Routes{..} = do
227 | div_ $ nav_ [ class_ "navbar navbar-default navigation-clean" ] $ div_ [ class_ "container-fluid" ] $ do
228 | div_ [ class_ "navbar-header" ] $ do
229 | a_ [ class_ "navbar-brand navbar-link", href_ "#", style_ "margin-left: 2px; padding: 0px;" ] $ img_ [ src_ $ rStaticAsset "assets/odd-jobs-color-logo.png", title_ "Odd Jobs Logo" ]
230 | button_ [ class_ "navbar-toggle collapsed", data_ "toggle" "collapse", data_ "target" "#navcol-1" ] $ do
231 | span_ [ class_ "sr-only" ] "Toggle navigation"
232 | span_ [ class_ "icon-bar" ] ""
233 | span_ [ class_ "icon-bar" ] ""
234 | span_ [ class_ "icon-bar" ] ""
235 | -- div_ [ class_ "collapse navbar-collapse", id_ "navcol-1" ] $ ul_ [ class_ "nav navbar-nav navbar-right" ] $ do
236 | -- li_ [ class_ "active", role_ "presentation" ] $ a_ [ href_ "#" ] $ "First Item"
237 | -- li_ [ role_ "presentation" ] $ a_ [ href_ "#" ] $ "Second Item"
238 | -- li_ [ role_ "presentation" ] $ a_ [ href_ "#" ] $ "Third Item"
239 | -- li_ [ class_ "dropdown" ] $ do
240 | -- a_ [ class_ "dropdown-toggle", data_ "toggle" "dropdown", ariaExpanded_ "false", href_ "#" ] $ do
241 | -- "Dropdown"
242 | -- span_ [ class_ "caret" ] $ ""
243 | -- ul_ [ class_ "dropdown-menu", role_ "menu" ] $ do
244 | -- li_ [ role_ "presentation" ] $ a_ [ href_ "#" ] $ "First Item"
245 | -- li_ [ role_ "presentation" ] $ a_ [ href_ "#" ] $ "Second Item"
246 | -- li_ [ role_ "presentation" ] $ a_ [ href_ "#" ] $ "Third Item"
247 |
248 | pageLayout :: Routes -> Html() -> Html () -> Html ()
249 | pageLayout routes@Routes{..} navHtml bodyHtml = do
250 | doctype_
251 | html_ $ do
252 | head_ $ do
253 | meta_ [ charset_ "utf-8" ]
254 | meta_ [ name_ "viewport", content_ "width=device-width, initial-scale=1.0" ]
255 | title_ "haskell-pg-queue"
256 | link_ [ rel_ "stylesheet", href_ $ rStaticAsset "assets/bootstrap/css/bootstrap.min.css" ]
257 | link_ [ rel_ "stylesheet", href_ $ rStaticAsset "https://fonts.googleapis.com/css?family=Lato:100i,300,300i,400,700,900" ]
258 | link_ [ rel_ "stylesheet", href_ $ rStaticAsset "assets/css/styles.css" ]
259 | body_ $ do
260 | pageNav routes
261 | div_ $ div_ [ class_ "container-fluid", style_ "/*background-color:#f2f2f2;*/" ] $ div_ [ class_ "row" ] $ do
262 | div_ [ class_ "d-none d-md-block col-md-2" ] navHtml
263 | div_ [ class_ "col-12 col-md-10" ] bodyHtml
264 | script_ [ src_ $ rStaticAsset "assets/js/jquery.min.js" ] ("" :: Text)
265 | script_ [ src_ $ rStaticAsset "assets/bootstrap/js/bootstrap.min.js" ] ("" :: Text)
266 | script_ [ src_ $ rStaticAsset "assets/js/custom.js" ] ("" :: Text)
267 |
268 | sideNav :: Routes -> [Text] -> [JobRunnerName] -> UTCTime -> Filter -> Html ()
269 | sideNav Routes{..} jobTypes jobRunnerNames _t filter@Filter{..} = do
270 | div_ [ class_ "filters mt-3" ] $ do
271 | jobStatusFilters
272 | jobTypeFilters
273 | jobRunnerFilters
274 | where
275 | jobStatusFilters = do
276 | h6_ "Filter by job status"
277 | div_ [ class_ "card" ] $ do
278 | ul_ [ class_ "list-group list-group-flush" ] $ do
279 | li_ [ class_ ("list-group-item " <> if null filterStatuses then "active-nav" else "") ] $ do
280 | let lnk = rFilterResults $ Just filter{filterStatuses = [], filterPage = OddJobs.Web.filterPage blankFilter}
281 | a_ [ href_ lnk ] $ do
282 | "all"
283 | -- span_ [ class_ "badge badge-pill badge-secondary float-right" ] "12"
284 | forM_ ((\\) (enumFrom minBound) [Job.Success]) $ \st -> do
285 | li_ [ class_ ("list-group-item " <> if st `elem` filterStatuses then "active-nav" else "") ] $ do
286 | let lnk = rFilterResults $ Just filter{filterStatuses = [st], filterPage = Nothing}
287 | a_ [ href_ lnk ] $ do
288 | toHtml $ toText st
289 | -- span_ [ class_ "badge badge-pill badge-secondary float-right" ] "12"
290 |
291 | jobRunnerFilters = do
292 | h6_ [ class_ "mt-3" ] $ do
293 | "Filter by job-runner"
294 | form_ [ method_ "post", action_ rRefreshJobRunners, class_ "d-inline"] $ do
295 | button_ [ type_ "submit", class_ "btn btn-link m-0 p-0 ml-1 float-right"] $ do
296 | small_ "refresh"
297 |
298 | div_ [ class_ "card" ] $ do
299 | ul_ [ class_ "list-group list-group-flush" ] $ do
300 | li_ [ class_ ("list-group-item " <> if null filterJobRunner then "active-nav" else "") ] $ do
301 | let lnk = rFilterResults $ Just filter{filterJobRunner = [], filterPage = OddJobs.Web.filterPage blankFilter}
302 | a_ [ href_ lnk ] "all"
303 | forM_ jobRunnerNames $ \jr -> do
304 | li_ [ class_ ("list-group-item" <> if jr `elem` filterJobRunner then " active-nav" else "")] $ do
305 | a_ [ href_ "#" ] $ toHtml $ unJobRunnerName jr
306 |
307 | jobTypeFilters = do
308 | h6_ [ class_ "mt-3" ] $ do
309 | "Filter by job-type"
310 | form_ [ method_ "post", action_ rRefreshJobTypes, class_ "d-inline"] $ do
311 | button_ [ type_ "submit", class_ "btn btn-link m-0 p-0 ml-1 float-right"] $ do
312 | small_ "refresh"
313 |
314 | div_ [ class_ "card" ] $ do
315 | ul_ [ class_ "list-group list-group-flush" ] $ do
316 | li_ [ class_ ("list-group-item " <> if null filterJobTypes then "active-nav" else "") ] $ do
317 | let lnk = rFilterResults $ Just filter{filterJobTypes = [], filterPage = OddJobs.Web.filterPage blankFilter}
318 | a_ [ href_ lnk ] "all"
319 | forM_ jobTypes $ \jt -> do
320 | li_ [ class_ ("list-group-item" <> if jt `elem` filterJobTypes then " active-nav" else "")] $ do
321 | a_ [ href_ (rFilterResults $ Just filter{filterJobTypes=[jt]}) ] $ toHtml jt
322 |
323 | searchBar :: Routes -> UTCTime -> Filter -> Html ()
324 | searchBar Routes{..} _t filter@Filter{filterStatuses, filterCreatedAfter, filterCreatedBefore, filterUpdatedAfter, filterUpdatedBefore, filterJobTypes, filterRunAfter} = do
325 | form_ [ style_ "padding-top: 2em;" ] $ do
326 | div_ [ class_ "form-group" ] $ do
327 | div_ [ class_ "search-container" ] $ do
328 | ul_ [ class_ "list-inline search-bar" ] $ do
329 | forM_ filterStatuses $ \s -> renderFilter "Status" (toText s) (rFilterResults $ Just filter{filterStatuses = filterStatuses \\ [s]})
330 | maybe mempty (\x -> renderFilter "Created after" (showText x) (rFilterResults $ Just filter{filterCreatedAfter = Nothing})) filterCreatedAfter
331 | maybe mempty (\x -> renderFilter "Created before" (showText x) (rFilterResults $ Just filter{filterCreatedBefore = Nothing})) filterCreatedBefore
332 | maybe mempty (\x -> renderFilter "Updated after" (showText x) (rFilterResults $ Just filter{filterUpdatedAfter = Nothing})) filterUpdatedAfter
333 | maybe mempty (\x -> renderFilter "Updated before" (showText x) (rFilterResults $ Just filter{filterUpdatedBefore = Nothing})) filterUpdatedBefore
334 | maybe mempty (\x -> renderFilter "Run after" (showText x) (rFilterResults $ Just filter{filterRunAfter = Nothing})) filterRunAfter
335 | forM_ filterJobTypes $ \x -> renderFilter "Job type" x (rFilterResults $ Just filter{filterJobTypes = filterJobTypes \\ [x]})
336 |
337 | button_ [ class_ "btn btn-default search-button", type_ "button" ] "Search"
338 | -- ul_ [ class_ "list-inline" ] $ do
339 | -- li_ $ span_ $ strong_ "Common searches:"
340 | -- li_ $ a_ [ href_ (rFilterResults $ Just mempty) ] $ "All jobs"
341 | -- li_ $ a_ [ href_ (rFilterResults $ Just $ filter{ filterStatuses = [Job.Locked] }) ] $ "Currently running"
342 | -- li_ $ a_ [ href_ (rFilterResults $ Just $ filter{ filterStatuses = [Job.Success] }) ] $ "Successful"
343 | -- li_ $ a_ [ href_ (rFilterResults $ Just $ filter{ filterStatuses = [Job.Failed] }) ] $ "Failed"
344 | -- li_ $ a_ [ href_ (rFilterResults $ Just $ filter{ filterRunAfter = Just t }) ] $ "Future"
345 | -- -- li_ $ a_ [ href_ "#" ] $ "Retried"
346 | -- li_ $ a_ [ href_ (rFilterResults $ Just $ filter{ filterStatuses = [Job.Queued] }) ] $ "Queued"
347 | -- li_ $ a_ [ href_ (rFilterResults $ Just $ filter{ filterUpdatedAfter = Just $ timeSince t 10 Minutes Ago }) ] $ "Last 10 mins"
348 | -- li_ $ a_ [ href_ (rFilterResults $ Just $ filter{ filterCreatedAfter = Just $ timeSince t 10 Minutes Ago }) ] $ "Recently created"
349 | where
350 | renderFilter :: Text -> Text -> Text -> Html ()
351 | renderFilter k v u = do
352 | li_ [ class_ "search-filter" ] $ do
353 | span_ [ class_ "filter-name" ] $ toHtml k
354 | span_ [ class_ "filter-value" ] $ do
355 | toHtml v
356 | a_ [ href_ u, class_ "text-danger" ] $ i_ [ class_ "glyphicon glyphicon-remove" ] ""
357 |
358 |
359 | timeDuration :: UTCTime -> UTCTime -> (Int, String)
360 | timeDuration from to = (diff, str)
361 | where
362 | str = if diff <= 0
363 | then "under 1s"
364 | else (if d>0 then show d <> "d" else "") <>
365 | (if m>0 then show m <> "m" else "") <>
366 | (if s>0 then show s <> "s" else "")
367 | diff = abs $ round $ diffUTCTime from to
368 | (m', s) = diff `divMod` 60
369 | (h', m) = m' `divMod` 60
370 | (d, _h) = h' `divMod` 24
371 |
372 | showText :: (Show a) => a -> Text
373 | showText a = toS $ show a
374 |
375 | jobContent :: Value -> Value
376 | jobContent v = case v of
377 | Aeson.Object o -> fromMaybe v (HM.lookup "contents" o)
378 | _ -> v
379 |
380 | jobRow :: Routes -> UTCTime -> (Job, Html ()) -> Html ()
381 | jobRow routes t (job@Job{..}, jobHtml) = do
382 | tr_ $ do
383 | td_ [ class_ "job-type" ] $ do
384 | let statusFn = case jobStatus of
385 | Job.Success -> statusSuccess
386 | Job.Failed -> statusFailed
387 | Job.Cancelled -> statusCancelled
388 | Job.Queued -> if jobRunAt > t
389 | then statusFuture
390 | else statusWaiting
391 | Job.Retry -> statusRetry
392 | Job.Locked -> statusLocked
393 | statusFn t job
394 |
395 | td_ jobHtml
396 | td_ $ do
397 | let actionsFn = case jobStatus of
398 | Job.Success -> const mempty
399 | Job.Failed -> actionsFailed
400 | Job.Cancelled -> actionsCancelled
401 | Job.Queued -> if jobRunAt > t
402 | then actionsFuture
403 | else actionsWaiting
404 | Job.Retry -> actionsRetry
405 | Job.Locked -> actionsLocked
406 | actionsFn routes job
407 |
408 |
409 | actionsFailed :: Routes -> Job -> Html ()
410 | actionsFailed Routes{..} Job{..} = do
411 | form_ [ action_ (rEnqueue jobId), method_ "post" ] $ do
412 | button_ [ class_ "btn btn-secondary", type_ "submit" ] "Enqueue again"
413 |
414 | actionsCancelled :: Routes -> Job -> Html ()
415 | actionsCancelled Routes{..} Job{..} = case (jobLockedAt, jobLockedBy) of
416 | (Just _, Just _) -> do
417 | span_ [ class_ "badge badge-light" ] "Killing job"
418 |
419 | _ -> do
420 | form_ [ action_ (rEnqueue jobId), method_ "post" ] $ do
421 | button_ [ class_ "btn btn-secondary", type_ "submit" ] "Enqueue again"
422 |
423 | actionsRetry :: Routes -> Job -> Html ()
424 | actionsRetry Routes{..} Job{..} = do
425 | form_ [ action_ (rRunNow jobId), method_ "post" ] $ do
426 | button_ [ class_ "btn btn-secondary", type_ "submit" ] "Run now"
427 |
428 | actionsFuture :: Routes -> Job -> Html ()
429 | actionsFuture Routes{..} Job{..} = do
430 | form_ [ action_ (rRunNow jobId), method_ "post" ] $ do
431 | button_ [ class_ "btn btn-secondary", type_ "submit" ] "Run now"
432 |
433 | actionsWaiting :: Routes -> Job -> Html ()
434 | actionsWaiting Routes{..} Job{..} = do
435 | form_ [ action_ (rCancel jobId), method_ "post" ] $ do
436 | button_ [ class_ "btn btn-danger", type_ "submit" ] "Cancel"
437 |
438 | actionsLocked :: Routes -> Job -> Html ()
439 | actionsLocked Routes{..} Job{..} = do
440 | form_ [ action_ (rKill jobId), method_ "post" ] $ do
441 | button_ [ class_ "btn btn-warning", type_ "submit" ] "Kill"
442 |
443 | statusSuccess :: UTCTime -> Job -> Html ()
444 | statusSuccess t Job{..} = do
445 | span_ [ class_ "badge badge-success" ] "Success"
446 | span_ [ class_ "job-run-time" ] $ do
447 | let (d, s) = timeDuration jobCreatedAt jobUpdatedAt
448 | abbr_ [ title_ (showText jobUpdatedAt) ] $ toHtml $ "Completed " <> humanReadableTime' t jobUpdatedAt <> ". "
449 | abbr_ [ title_ (showText d <> " seconds")] $ toHtml $ "Took " <> s
450 |
451 | statusFailed :: UTCTime -> Job -> Html ()
452 | statusFailed t Job{..} = do
453 | span_ [ class_ "badge badge-danger" ] "Failed"
454 | span_ [ class_ "job-run-time" ] $ do
455 | abbr_ [ title_ (showText jobUpdatedAt) ] $ toHtml $ "Failed " <> humanReadableTime' t jobUpdatedAt <> " after " <> show jobAttempts <> " attempts"
456 |
457 | statusCancelled :: UTCTime -> Job -> Html ()
458 | statusCancelled t Job{..} = do
459 | span_ [ class_ "badge badge-danger" ] "Cancelled"
460 | span_ [ class_ "job-run-time" ] $ do
461 | abbr_ [ title_ (showText jobUpdatedAt) ] $ toHtml $ "Cancelled " <> humanReadableTime' t jobUpdatedAt
462 |
463 | statusFuture :: UTCTime -> Job -> Html ()
464 | statusFuture t Job{..} = do
465 | span_ [ class_ "badge badge-secondary" ] "Future"
466 | span_ [ class_ "job-run-time" ] $ do
467 | abbr_ [ title_ (showText jobRunAt) ] $ toHtml $ humanReadableTime' t jobRunAt
468 |
469 | statusWaiting :: UTCTime -> Job -> Html ()
470 | statusWaiting _t Job{} = do
471 | span_ [ class_ "badge badge-warning" ] "Waiting"
472 | -- span_ [ class_ "job-run-time" ] ("Waiting to be picked up" :: Text)
473 |
474 | statusRetry :: UTCTime -> Job -> Html ()
475 | statusRetry t Job{..} = do
476 | span_ [ class_ "badge badge-warning" ] $ toHtml $ "Retries (" <> show jobAttempts <> ")"
477 | span_ [ class_ "job-run-time" ] $ do
478 | abbr_ [ title_ (showText jobUpdatedAt) ] $ toHtml $ "Retried " <> humanReadableTime' t jobUpdatedAt <> ". "
479 | abbr_ [ title_ (showText jobRunAt)] $ toHtml $ "Next retry in " <> humanReadableTime' t jobRunAt
480 |
481 | statusLocked :: UTCTime -> Job -> Html ()
482 | statusLocked _t Job{} = do
483 | span_ [ class_ "badge badge-info" ] $ toHtml ("Locked" :: Text)
484 | -- span_ [ class_ "job-run-time" ] $ do
485 | -- abbr_ [ title_ (showText jobUpdatedAt) ] $ toHtml $ "Retried " <> humanReadableTime' t jobUpdatedAt <> ". "
486 | -- abbr_ [ title_ (showText jobRunAt)] $ toHtml $ "Next retry in " <> humanReadableTime' t jobRunAt
487 |
488 | resultsPanel :: Routes -> UTCTime -> Filter -> [(Job, Html ())] -> Int -> Html ()
489 | resultsPanel routes@Routes{..} t filter@Filter{filterPage} js runningCount = do
490 | div_ [ class_ "card mt-3" ] $ do
491 | div_ [ class_ "card-header bg-secondary text-white" ] $ do
492 | "Currently running "
493 | span_ [ class_ "badge badge-primary badge-primary" ] $ toHtml (show runningCount)
494 | div_ [ class_ "currently-running" ] $ div_ [ class_ "" ] $ table_ [ class_ "table table-striped table-hover" ] $ do
495 | thead_ [ class_ "thead-dark"] $ do
496 | tr_ $ do
497 | th_ "Job status"
498 | th_ "Job"
499 | th_ [ style_ "min-width: 12em;" ] "Actions"
500 | tbody_ $ do
501 | forM_ js (jobRow routes t)
502 | div_ [ class_ "card-footer" ] $ do
503 | nav_ $ do
504 | ul_ [ class_ "pagination" ] $ do
505 | prevLink
506 | nextLink
507 | where
508 | prevLink = do
509 | let (extraClass, lnk) = case filterPage of
510 | Nothing -> ("disabled", "")
511 | Just (_l, 0) -> ("disabled", "")
512 | Just (l, o) -> ("", rFilterResults $ Just $ filter {filterPage = Just (l, max 0 $ o - l)})
513 | li_ [ class_ ("page-item previous " <> extraClass) ] $ do
514 | a_ [ class_ "page-link", href_ lnk ] "Prev"
515 |
516 | nextLink = do
517 | let (extraClass, lnk) = case filterPage of
518 | Nothing ->
519 | if DL.length js < 10
520 | then ("disabled", "")
521 | else ("", rFilterResults $ Just $ filter {filterPage = Just (10, 10)})
522 | Just (l, o) ->
523 | if DL.length js < l
524 | then ("disabled", "")
525 | else ("", rFilterResults $ Just $ filter {filterPage = Just (l, o + l)})
526 | li_ [ class_ ("page-item next " <> extraClass) ] $ do
527 | a_ [ class_ "page-link", href_ lnk ] "Next"
528 |
529 | ariaExpanded_ :: Text -> Attribute
530 | ariaExpanded_ = makeAttribute "aria-expanded"
531 |
532 |
--------------------------------------------------------------------------------
/src/OddJobs/Job.hs:
--------------------------------------------------------------------------------
1 | {-# LANGUAGE RankNTypes, FlexibleInstances, FlexibleContexts, PartialTypeSignatures, UndecidableInstances #-}
2 | {-# LANGUAGE ExistentialQuantification, RecordWildCards, ScopedTypeVariables, CPP #-}
3 |
4 | module OddJobs.Job
5 | (
6 | -- * Starting the job-runner
7 | --
8 | -- $startRunner
9 | startJobRunner
10 |
11 | -- * Configuring the job-runner
12 | --
13 | -- $config
14 | , Config(..)
15 | , ConcurrencyControl(..)
16 | , ResourceCfg(..)
17 |
18 | -- * Creating/scheduling jobs
19 | --
20 | -- $createJobs
21 | , createJob
22 | , scheduleJob
23 |
24 | , createJobWithResources
25 | , scheduleJobWithResources
26 |
27 | -- * @Job@ and associated data-types
28 | --
29 | -- $dataTypes
30 | , Job(..)
31 | , JobId
32 | , Status(..)
33 | , JobRunnerName(..)
34 | , TableName
35 | , delaySeconds
36 | , Seconds(..)
37 | , JobErrHandler(..)
38 | , AllJobTypes(..)
39 | , ResourceId(..)
40 | , FunctionName
41 | , RunnerEnv(..)
42 |
43 | -- ** Structured logging
44 | --
45 | -- $logging
46 | , LogEvent(..)
47 | , LogLevel(..)
48 |
49 | -- * Job-runner interals
50 | --
51 | -- $internals
52 | , jobMonitor
53 | , jobEventListener
54 | , jobPoller
55 | , jobPollingSql
56 | , pollRunJob
57 | , JobRunner
58 | , HasJobRunner (..)
59 |
60 | -- * Database helpers
61 | --
62 | -- $dbHelpers
63 | , findJobByIdIO
64 | , saveJobIO
65 | , runJobNowIO
66 | , unlockJobIO
67 | , cancelJobIO
68 | , killJobIO
69 | , jobDbColumns
70 | , jobPollingIO
71 | , concatJobDbColumns
72 | , fetchAllJobTypes
73 | , fetchAllJobRunners
74 |
75 | -- * JSON helpers
76 | --
77 | -- $jsonHelpers
78 | , eitherParsePayload
79 | , throwParsePayload
80 | , eitherParsePayloadWith
81 | , throwParsePayloadWith
82 | )
83 | where
84 |
85 | import OddJobs.Types
86 | import qualified Data.Pool as Pool
87 | import Data.Pool(Pool)
88 | import qualified Data.Text as T
89 | import Data.Text (Text)
90 | import Database.PostgreSQL.Simple as PGS
91 | import Database.PostgreSQL.Simple.Notification
92 | import UnliftIO.Async hiding (poll)
93 | import UnliftIO.Concurrent (threadDelay, myThreadId)
94 | import Data.String
95 | import System.Posix.Process (getProcessID)
96 | import Network.HostName (getHostName)
97 | import UnliftIO.MVar
98 | import Debug.Trace
99 | import Control.Monad.Logger as MLogger (LogLevel(..), LogStr, toLogStr)
100 | import UnliftIO.IORef
101 | import UnliftIO.Exception ( SomeException(..), try, catch, finally
102 | , catchAny, bracket, Exception(..), throwIO
103 | , catches, Handler(..), mask_, onException
104 | , throwString
105 | )
106 | import Data.Proxy
107 | import Control.Monad.Trans.Control
108 | import Control.Monad.IO.Unlift (MonadUnliftIO, withRunInIO, liftIO)
109 | import Data.Text.Conversions
110 | import Data.Time
111 | import Data.Aeson hiding (Success)
112 | import qualified Data.Aeson as Aeson
113 | import qualified Data.Aeson.Types as Aeson (Parser, parseMaybe)
114 | import Data.String.Conv (StringConv(..), toS)
115 | import Data.Functor ((<&>), void)
116 | import Control.Monad (forever, forM_, join)
117 | import Data.Maybe (isNothing, maybe, fromMaybe, listToMaybe, mapMaybe, maybeToList)
118 | import Data.Either (either)
119 | import Control.Monad.Reader
120 | import GHC.Generics
121 | import Data.Map (Map)
122 | import qualified Data.HashMap.Strict as HM
123 | import qualified Data.List as DL
124 | import qualified Data.Map as DM
125 | import qualified Data.ByteString as BS
126 | import qualified Data.ByteString.Lazy as BSL
127 | import System.FilePath (FilePath)
128 | import qualified System.Directory as Dir
129 | import Prelude hiding (log)
130 | import GHC.Exts (toList)
131 | import Database.PostgreSQL.Simple.Types as PGS (Identifier(..))
132 | import Database.PostgreSQL.Simple.ToField as PGS (toField)
133 | import OddJobs.Job.Query
134 | import Data.Int (Int64)
135 | #if MIN_VERSION_aeson(2,2,0)
136 | import Data.Aeson.Types
137 | #else
138 | import Data.Aeson.Internal (iparse, IResult(..), formatError)
139 | #endif
140 |
141 | -- | The documentation of odd-jobs currently promotes 'startJobRunner', which
142 | -- expects a fairly detailed 'Config' record, as a top-level function for
143 | -- initiating a job-runner. However, internally, this 'Config' record is used as
144 | -- an enviroment for a 'ReaderT', and almost all functions are written in this
145 | -- 'ReaderT' monad which impleents an instance of the 'HasJobRunner' type-class.
146 | --
147 | -- __In future,__ this /internal/ implementation detail will allow us to offer a
148 | -- type-class based interface as well (similar to what
149 | -- 'Yesod.JobQueue.YesodJobQueue' provides).
150 | class (MonadUnliftIO m, MonadBaseControl IO m) => HasJobRunner m where
151 | getPollingInterval :: m Seconds
152 | onJobSuccess :: Job -> m ()
153 | immediateJobDeletion :: m (Job -> IO Bool)
154 | delayedJobDeletion :: m (Maybe (PGS.Connection -> IO Int64))
155 | onJobFailed :: m [JobErrHandler]
156 | getJobRunner :: m (Job -> IO ())
157 | getDbPool :: m (Pool Connection)
158 | getTableName :: m TableName
159 | onJobStart :: Job -> m ()
160 | getDefaultMaxAttempts :: m Int
161 | getRunnerEnv :: m RunnerEnv
162 | getConcurrencyControl :: m ConcurrencyControl
163 | log :: LogLevel -> LogEvent -> m ()
164 | getDefaultJobTimeout :: m Seconds
165 | onJobTimeout :: Job -> m ()
166 | getDefaultRetryBackoff :: Int -> m Seconds
167 |
168 | -- $logging
169 | --
170 | -- OddJobs uses \"structured logging\" for important events that occur during
171 | -- the life-cycle of a job-runner. This is useful if you're using JSON\/XML for
172 | -- aggegating logs of your entire application to something like Kibana, AWS
173 | -- CloudFront, GCP StackDriver Logging, etc.
174 | --
175 | -- If you're not interested in using structured logging, look at
176 | -- 'OddJobs.ConfigBuilder.defaultLogStr' to output plain-text logs (or you can
177 | -- write your own function, as well).
178 | --
179 |
180 |
181 | data RunnerEnv = RunnerEnv
182 | { envConfig :: !Config
183 | , envJobThreadsRef :: !(IORef (Map JobId (Async ())))
184 | }
185 |
186 | type RunnerM = ReaderT RunnerEnv IO
187 |
188 | logCallbackErrors :: (HasJobRunner m) => JobId -> Text -> m () -> m ()
189 | logCallbackErrors jid msg action = catchAny action $ \e -> log LevelError $ LogText $ msg <> " Job ID=" <> toS (show jid) <> ": " <> toS (show e)
190 |
191 | instance HasJobRunner RunnerM where
192 | getPollingInterval = asks (cfgPollingInterval . envConfig)
193 | onJobFailed = asks (cfgOnJobFailed . envConfig)
194 | onJobSuccess job = do
195 | fn <- asks (cfgOnJobSuccess . envConfig)
196 | logCallbackErrors (jobId job) "onJobSuccess" $ liftIO $ fn job
197 | immediateJobDeletion = asks (cfgImmediateJobDeletion . envConfig)
198 | delayedJobDeletion = asks (cfgDelayedJobDeletion . envConfig)
199 |
200 | getJobRunner = asks (cfgJobRunner . envConfig)
201 | getDbPool = asks (cfgDbPool . envConfig)
202 | getTableName = asks (cfgTableName . envConfig)
203 | onJobStart job = do
204 | fn <- asks (cfgOnJobStart . envConfig)
205 | logCallbackErrors (jobId job) "onJobStart" $ liftIO $ fn job
206 |
207 | getDefaultMaxAttempts = asks (cfgDefaultMaxAttempts . envConfig)
208 |
209 | getRunnerEnv = ask
210 |
211 | getConcurrencyControl = asks (cfgConcurrencyControl . envConfig)
212 |
213 | log logLevel logEvent = do
214 | loggerFn <- asks (cfgLogger . envConfig)
215 | liftIO $ loggerFn logLevel logEvent
216 |
217 | getDefaultJobTimeout = asks (cfgDefaultJobTimeout . envConfig)
218 | onJobTimeout job = do
219 | fn <- asks (cfgOnJobTimeout . envConfig)
220 | logCallbackErrors (jobId job) "onJobTimeout" $ liftIO $ fn job
221 |
222 | getDefaultRetryBackoff attempts = do
223 | retryFn <- asks (cfgDefaultRetryBackoff . envConfig)
224 | liftIO $ retryFn attempts
225 |
226 | -- | Start the job-runner in the /current/ thread, i.e. you'll need to use
227 | -- 'forkIO' or 'async' manually, if you want the job-runner to run in the
228 | -- background. Consider using 'OddJobs.Cli' to rapidly build your own
229 | -- standalone daemon.
230 | startJobRunner :: Config -> IO ()
231 | startJobRunner jm = do
232 | r <- newIORef DM.empty
233 | let monitorEnv = RunnerEnv
234 | { envConfig = jm
235 | , envJobThreadsRef = r
236 | }
237 | runReaderT jobMonitor monitorEnv
238 |
239 | jobWorkerName :: IO String
240 | jobWorkerName = do
241 | pid <- getProcessID
242 | hname <- getHostName
243 | pure $ hname ++ ":" ++ show pid
244 |
245 | findJobByIdQuery :: PGS.Query
246 | findJobByIdQuery = "SELECT " <> concatJobDbColumns <> " FROM ? WHERE id = ?"
247 |
248 | withResource :: MonadUnliftIO m => Pool a -> (a -> m b) -> m b
249 | withResource pool fa =
250 | withRunInIO $ \runInIO -> Pool.withResource pool (runInIO . fa)
251 |
252 | withDbConnection :: (HasJobRunner m)
253 | => (Connection -> m a)
254 | -> m a
255 | withDbConnection action = do
256 | pool <- getDbPool
257 | withResource pool action
258 |
259 | --
260 | -- $dbHelpers
261 | --
262 | -- A bunch of functions that help you query 'cfgTableName' and change the status
263 | -- of individual jobs. Most of these functions are in @IO@ and you /might/ want
264 | -- to write wrappers that lift them into you application's custom monad.
265 | --
266 | -- __Note:__ When passing a 'Connection' to these function, it is
267 | -- __recommended__ __to not__ take a connection from 'cfgDbPool'. Use your
268 | -- application\'s database pool instead.
269 | --
270 |
271 | findJobById :: (HasJobRunner m)
272 | => JobId
273 | -> m (Maybe Job)
274 | findJobById jid = do
275 | tname <- getTableName
276 | withDbConnection $ \conn -> liftIO $ findJobByIdIO conn tname jid
277 |
278 | findJobByIdIO :: Connection -> TableName -> JobId -> IO (Maybe Job)
279 | findJobByIdIO conn tname jid = PGS.query conn findJobByIdQuery (tname, jid) >>= \case
280 | [] -> pure Nothing
281 | [j] -> pure (Just j)
282 | _js -> Prelude.error $ "Not expecting to find multiple jobs by id=" <> show jid
283 |
284 |
285 | saveJobQuery :: PGS.Query
286 | saveJobQuery = "UPDATE ? set run_at = ?, status = ?, payload = ?, last_error = ?, attempts = ?, locked_at = ?, locked_by = ? WHERE id = ? RETURNING " <> concatJobDbColumns
287 |
288 | deleteJobQuery :: PGS.Query
289 | deleteJobQuery = "DELETE FROM ? WHERE id = ?"
290 |
291 | saveJob :: (HasJobRunner m) => Job -> m Job
292 | saveJob j = do
293 | tname <- getTableName
294 | withDbConnection $ \conn -> liftIO $ saveJobIO conn tname j
295 |
296 | saveJobIO :: Connection -> TableName -> Job -> IO Job
297 | saveJobIO conn tname Job{jobRunAt, jobStatus, jobPayload, jobLastError, jobAttempts, jobLockedBy, jobLockedAt, jobId} = do
298 | rs <- PGS.query conn saveJobQuery
299 | ( tname
300 | , jobRunAt
301 | , jobStatus
302 | , jobPayload
303 | , jobLastError
304 | , jobAttempts
305 | , jobLockedAt
306 | , jobLockedBy
307 | , jobId
308 | )
309 | case rs of
310 | [] -> Prelude.error $ "Could not find job while updating it id=" <> show jobId
311 | [j] -> pure j
312 | _js -> Prelude.error $ "Not expecting multiple rows to ber returned when updating job id=" <> show jobId
313 |
314 | deleteJob :: (HasJobRunner m) => JobId -> m ()
315 | deleteJob jid = do
316 | tname <- getTableName
317 | withDbConnection $ \conn -> liftIO $ deleteJobIO conn tname jid
318 |
319 | deleteJobIO :: Connection -> TableName -> JobId -> IO ()
320 | deleteJobIO conn tname jid = do
321 | void $ PGS.execute conn deleteJobQuery (tname, jid)
322 |
323 | runJobNowIO :: Connection -> TableName -> JobId -> IO (Maybe Job)
324 | runJobNowIO conn tname jid = do
325 | t <- getCurrentTime
326 | updateJobHelper tname conn (Queued, [Queued, Retry, Failed, Cancelled], Just t, jid)
327 |
328 | -- | TODO: First check in all job-runners if this job is still running, or not,
329 | -- and somehow send an uninterruptibleCancel to that thread.
330 | unlockJobIO :: Connection -> TableName -> JobId -> IO (Maybe Job)
331 | unlockJobIO conn tname jid = do
332 | listToMaybe <$> PGS.query conn q (tname, Retry, jid, In [Locked])
333 | where
334 | q = "update ? set status=?, run_at=now(), locked_at=null, locked_by=null where id=? and status in ? returning " <> concatJobDbColumns
335 |
336 | cancelJobIO :: Connection -> TableName -> JobId -> IO (Maybe Job)
337 | cancelJobIO conn tname jid =
338 | updateJobHelper tname conn (Cancelled, [Queued, Retry], Nothing, jid)
339 |
340 | killJobIO :: Connection -> TableName -> JobId -> IO (Maybe Job)
341 | killJobIO conn tname jid =
342 | updateJobHelper tname conn (Cancelled, [Locked], Nothing, jid)
343 |
344 | updateJobHelper :: TableName
345 | -> Connection
346 | -> (Status, [Status], Maybe UTCTime, JobId)
347 | -> IO (Maybe Job)
348 | updateJobHelper tname conn (newStatus, existingStates, mRunAt, jid) =
349 | listToMaybe <$> PGS.query conn q (tname, newStatus, runAt, jid, PGS.In existingStates)
350 | where
351 | q = "update ? set attempts=0, status=?, run_at=? where id=? and status in ? returning " <> concatJobDbColumns
352 | runAt = case mRunAt of
353 | Nothing -> PGS.toField $ PGS.Identifier "run_at"
354 | Just t -> PGS.toField t
355 |
356 |
357 | data TimeoutException = TimeoutException deriving (Eq, Show)
358 | instance Exception TimeoutException
359 |
360 | -- | this spawns the job on a a seperate thread, and
361 | -- also spawns a monitoring thread, whichever finishes
362 | -- first kills the other.
363 | runJobWithTimeout :: (HasJobRunner m)
364 | => Seconds
365 | -> Job
366 | -> m ()
367 | runJobWithTimeout timeoutSec job@Job{jobId} = do
368 | threadsRef <- envJobThreadsRef <$> getRunnerEnv
369 | jobRunner_ <- getJobRunner
370 |
371 | a <- async $ liftIO $ jobRunner_ job
372 |
373 | _x <- atomicModifyIORef' threadsRef $ \threads ->
374 | ( DM.insert jobId a threads
375 | , DL.map asyncThreadId $ DM.elems $ DM.insert jobId a threads
376 | )
377 |
378 | -- liftIO $ putStrLn $ "Threads: " <> show x
379 | log LevelDebug $ LogText $ toS $ "Spawned job in " <> show (asyncThreadId a)
380 |
381 | t <- async $ do
382 | delaySeconds timeoutSec
383 | throwIO TimeoutException
384 |
385 | void $ finally
386 | (waitEitherCancel a t)
387 | (atomicModifyIORef' threadsRef $ \threads -> (DM.delete jobId threads, ()))
388 |
389 |
390 | -- | runs a job, blocks for as long as it's in progress
391 | runJob :: (HasJobRunner m) => JobId -> m ()
392 | runJob jid = do
393 | findJobById jid >>= \case
394 | Nothing -> Prelude.error $ "Could not find job id=" <> show jid
395 | Just job -> do
396 | startTime <- liftIO getCurrentTime
397 | lockTimeout <- getDefaultJobTimeout
398 | log LevelInfo $ LogJobStart job
399 | flip catch (exceptionHandler job startTime) $ do
400 | onJobStart job
401 | runJobWithTimeout lockTimeout job
402 | endTime <- liftIO getCurrentTime
403 | let newJob = job{jobStatus=OddJobs.Types.Success, jobLockedBy=Nothing, jobLockedAt=Nothing, jobUpdatedAt = endTime}
404 | shouldDeleteJob <- immediateJobDeletion >>= (\fn -> liftIO $ fn newJob)
405 | if shouldDeleteJob
406 | then deleteJob jid
407 | else void $ saveJob newJob
408 | log LevelInfo $ LogJobSuccess newJob (diffUTCTime endTime startTime)
409 | onJobSuccess newJob
410 | pure ()
411 | where
412 | exceptionHandler job startTime (e :: SomeException) = retryOrFail (toException e) job startTime
413 | retryOrFail e job@Job{jobAttempts} startTime = do
414 | endTime <- liftIO getCurrentTime
415 | defaultMaxAttempts <- getDefaultMaxAttempts
416 | let runTime = diffUTCTime endTime startTime
417 | (newStatus, failureMode, logLevel) = if jobAttempts >= defaultMaxAttempts
418 | then ( Failed, FailPermanent, LevelError )
419 | else ( Retry, FailWithRetry, LevelWarn )
420 | t <- liftIO getCurrentTime
421 | backoffInSeconds <- getDefaultRetryBackoff jobAttempts
422 | newJob <- saveJob job{ jobStatus=newStatus
423 | , jobLockedBy=Nothing
424 | , jobLockedAt=Nothing
425 | , jobLastError=Just $ toJSON $ show e -- TODO: convert errors to json properly
426 | , jobRunAt=addUTCTime (fromIntegral $ unSeconds backoffInSeconds) t
427 | }
428 | case fromException e :: Maybe TimeoutException of
429 | Nothing -> do
430 | log logLevel $ LogJobFailed newJob e failureMode runTime
431 | let tryHandler (JobErrHandler handler) res = case fromException e of
432 | Nothing -> res
433 | Just e_ -> void $ handler e_ newJob failureMode
434 | handlers <- onJobFailed
435 | liftIO $ void $ Prelude.foldr tryHandler (throwIO e) handlers
436 |
437 | Just _ -> do
438 | log logLevel $ LogJobTimeout newJob
439 | onJobTimeout newJob
440 |
441 | pure ()
442 |
443 | killJob :: (HasJobRunner m) => JobId -> m ()
444 | killJob jid = do
445 | threadsRef <- envJobThreadsRef <$> getRunnerEnv
446 | threads <- liftIO $ readIORef threadsRef
447 | mJob <- findJobById jid
448 |
449 | case (mJob, jid `DM.lookup` threads) of
450 | (Just job, Just thread) -> do
451 | log LevelInfo $ LogKillJobSuccess job
452 | void $ finally
453 | (uninterruptibleCancel thread)
454 | (atomicModifyIORef' threadsRef $ \threads' -> (DM.delete jid threads', ()))
455 |
456 | (Just job, Nothing) -> do
457 | log LevelInfo $ LogKillJobFailed job
458 |
459 | (Nothing, _) ->
460 | log LevelError $ LogText $ "Unable to find job in db to kill, jobId = " <> toS (show jid)
461 |
462 | -- TODO: This might have a resource leak.
463 | restartUponCrash :: (HasJobRunner m, Show a) => Text -> m a -> m ()
464 | restartUponCrash name_ action = do
465 | a <- async action
466 | finally (waitCatch a >>= fn) $ do
467 | log LevelInfo $ LogText $ "Received shutdown: " <> toS name_
468 | cancel a
469 | where
470 | fn x = do
471 | case x of
472 | Left (e :: SomeException) -> log LevelError $ LogText $ name_ <> " seems to have exited with an error. Restarting: " <> toS (show e)
473 | Right r -> log LevelError $ LogText $ name_ <> " seems to have exited with the folloing result: " <> toS (show r) <> ". Restaring."
474 | traceM "CRASH OCCURRED"
475 | restartUponCrash name_ action
476 |
477 | -- | Spawns 'jobPoller' and 'jobEventListener' in separate threads and restarts
478 | -- them in the off-chance they happen to crash. Also responsible for
479 | -- implementing graceful shutdown, i.e. waiting for all jobs already being
480 | -- executed to finish execution before exiting the main thread.
481 | jobMonitor :: forall m . (HasJobRunner m) => m ()
482 | jobMonitor = do
483 | a1 <- async $ restartUponCrash "Job poller" jobPoller
484 | a2 <- async $ restartUponCrash "Job event listener" jobEventListener
485 | a3 <- async $ restartUponCrash "Job Kill poller" killJobPoller
486 | a4 <- delayedJobDeletion >>= \case
487 | Nothing -> pure Nothing
488 | Just deletionFn -> fmap Just $ async $ restartUponCrash "job deletion poller" (jobDeletionPoller deletionFn)
489 | let asyncThreads = [a1, a2, a3] <> maybeToList a4
490 | finally (void $ waitAnyCatch asyncThreads) $ do
491 | log LevelInfo (LogText "Stopping jobPoller and jobEventListener threads.")
492 | cancel a3
493 | cancel a2
494 | cancel a1
495 | maybe (pure ()) cancel a4
496 | log LevelInfo (LogText "Waiting for jobs to complete.")
497 | waitForJobs
498 |
499 | waitForJobs :: (HasJobRunner m)
500 | => m ()
501 | waitForJobs = do
502 | curJobs <- getRunnerEnv >>= (readIORef . envJobThreadsRef) <&> DM.elems
503 | case curJobs of
504 | [] -> log LevelInfo $ LogText "All job-threads exited"
505 | as -> do
506 | tid <- myThreadId
507 | void $ waitAnyCatch as
508 | log LevelDebug $ LogText $ toS $ "Waiting for " <> show (DL.length as) <> " jobs to complete before shutting down. myThreadId=" <> show tid
509 | delaySeconds (Seconds 1)
510 | waitForJobs
511 |
512 | data ConcurrencyAction
513 | = DontPoll
514 | | PollAny
515 | | PollWithResources ResourceCfg
516 |
517 | getConcurrencyControlFn :: (HasJobRunner m)
518 | => m (Connection -> m ConcurrencyAction)
519 | getConcurrencyControlFn = getConcurrencyControl >>= \case
520 | UnlimitedConcurrentJobs -> pure $ const $ pure PollAny
521 | MaxConcurrentJobs maxJobs -> pure $ const $ do
522 | curJobs <- getRunnerEnv >>= (readIORef . envJobThreadsRef) <&> DM.elems
523 | pure $ pollIf $ DL.length curJobs < maxJobs
524 | ResourceLimits resCfg -> pure $ const $ pure $ PollWithResources resCfg
525 | DynamicConcurrency fn -> pure $ const $ pollIf <$> liftIO fn
526 |
527 | where
528 | pollIf cond = if cond then PollAny else DontPoll
529 |
530 | jobPollingIO :: Connection -> String -> TableName -> Seconds -> IO [Only JobId]
531 | jobPollingIO pollerDbConn processName tname lockTimeout = do
532 | t <- getCurrentTime
533 | PGS.query pollerDbConn jobPollingSql
534 | ( tname
535 | , Locked
536 | , t
537 | , processName
538 | , tname
539 | , t
540 | , In [Queued, Retry]
541 | , Locked
542 | , addUTCTime (fromIntegral $ negate $ unSeconds lockTimeout) t)
543 |
544 | -- | Executes 'jobPollingSql' every 'cfgPollingInterval' seconds to pick up jobs
545 | -- for execution. Uses @UPDATE@ along with @SELECT...FOR UPDATE@ to efficiently
546 | -- find a job that matches /all/ of the following conditions:
547 | --
548 | -- * 'jobRunAt' should be in the past
549 | -- * /one of the following/ conditions match:
550 | --
551 | -- * 'jobStatus' should be 'Queued' or 'Retry'
552 | --
553 | -- * 'jobStatus' should be 'Locked' and 'jobLockedAt' should be
554 | -- 'defaultLockTimeout' seconds in the past, thus indicating that the
555 | -- job was picked up execution, but didn't complete on time (possible
556 | -- because the thread/process executing it crashed without being able to
557 | -- update the DB)
558 | jobPoller :: (HasJobRunner m) => m ()
559 | jobPoller = do
560 | processName <- liftIO jobWorkerName
561 | log LevelInfo $ LogText $ toS $ "Starting the job monitor via DB polling with processName=" <> processName
562 | concurrencyControlFn <- getConcurrencyControlFn
563 | pool <- getDbPool
564 | forever $ do
565 | concurencyPolicy <- withResource pool concurrencyControlFn
566 | case concurencyPolicy of
567 | DontPoll -> log LevelWarn $ LogText "NOT polling the job queue due to concurrency control"
568 | PollAny -> void $ pollRunJob processName Nothing
569 | PollWithResources resCfg -> void $ pollRunJob processName (Just resCfg)
570 |
571 | -- | Polls a job and runs it, or executes a delay action if no job was found
572 | --
573 | -- returns an async to the task that was started, allowing you
574 | -- to block until it's finished
575 | pollRunJob :: (HasJobRunner m) => String -> Maybe ResourceCfg -> m (Maybe (Async ()))
576 | pollRunJob processName mResCfg = do
577 | tname <- getTableName
578 | -- note it's better to use fine grained pool connection,
579 | -- since data.pool already has it's internal resource cache.
580 | -- this way the user can configure how long a connection
581 | -- needs to remain open.
582 | pool <- getDbPool
583 | lockTimeout <- getDefaultJobTimeout
584 | join $ withResource pool $ \pollerDbConn -> mask_ $ do
585 | log LevelDebug $ LogText $ toS $ "[" <> processName <> "] Polling the job queue.."
586 | t <- liftIO getCurrentTime
587 | r <- case mResCfg of
588 | Nothing -> liftIO $
589 | PGS.query pollerDbConn jobPollingSql
590 | ( tname
591 | , Locked
592 | , t
593 | , processName
594 | , tname
595 | , t
596 | , In [Queued, Retry]
597 | , Locked
598 | , addUTCTime (fromIntegral $ negate $ unSeconds lockTimeout) t)
599 | Just ResourceCfg{..} -> liftIO $
600 | PGS.query pollerDbConn jobPollingWithResourceSql
601 | ( tname
602 | , Locked
603 | , t
604 | , processName
605 | , tname
606 | , t
607 | , In [Queued, Retry]
608 | , Locked
609 | , addUTCTime (fromIntegral $ negate $ unSeconds lockTimeout) t
610 | , resCfgCheckResourceFunction
611 | )
612 | case r of
613 | -- When we don't have any jobs to run, we can relax a bit...
614 | [] -> pure (Nothing <$ delayAction)
615 |
616 | -- When we find a job to run, fork and try to find the next job without any delay...
617 | [Only (jid :: JobId)] -> do
618 | x <- async $ runJob jid
619 | pure $ Just x <$ noDelayAction
620 |
621 | x -> error $ "WTF just happened? I was supposed to get only a single row, but got: " ++ show x
622 | where
623 | delayAction = delaySeconds =<< getPollingInterval
624 | noDelayAction = pure ()
625 |
626 | -- | Executes 'killJobPollingSql' every 'cfgPollingInterval' seconds to pick up jobs
627 | -- that are cancelled and need to be killed. Uses @UPDATE@ along with @SELECT...
628 | -- ..FOR UPDATE@ to efficiently find a job that matches /all/ of the following
629 | -- conditions:
630 | --
631 | -- * 'jobStatus' should be 'cancelled'
632 | -- * 'jobLockedAt' should be in the past
633 | -- * 'jobLockedBy' should be the current job worker name
634 |
635 | killJobPoller :: (HasJobRunner m) => m ()
636 | killJobPoller = do
637 | processName <- liftIO jobWorkerName
638 | pool <- getDbPool
639 | tname <- getTableName
640 |
641 | let pollJobToKill conn = join $ mask_ $ do
642 | currentTime <- liftIO getCurrentTime
643 | result <- liftIO $ PGS.query conn killJobPollingSql
644 | (tname, tname, Cancelled, processName, currentTime)
645 |
646 | case result of
647 | [] ->
648 | pure delayAction
649 |
650 | [Only (jobId :: JobId)] -> do
651 | void $ async $ killJob jobId
652 | pure noDelayAction
653 |
654 | x ->
655 | error $ "I was supposed to get only a single row, but got: " ++ show x
656 |
657 | withResource pool (forever . pollJobToKill)
658 |
659 | where
660 | delayAction = delaySeconds =<< getPollingInterval
661 | noDelayAction = pure ()
662 |
663 | -- | Uses PostgreSQL's LISTEN/NOTIFY to be immediately notified of newly created
664 | -- jobs.
665 | jobEventListener :: (HasJobRunner m)
666 | => m ()
667 | jobEventListener = do
668 | log LevelInfo $ LogText "Starting the job monitor via LISTEN/NOTIFY..."
669 | pool <- getDbPool
670 | tname <- getTableName
671 | jwName <- liftIO jobWorkerName
672 | concurrencyControlFn <- getConcurrencyControlFn
673 |
674 | let tryLockingJob jid mResCfg = withDbConnection $ \conn -> do
675 | let q = "UPDATE ? SET status=?, locked_at=now(), locked_by=?, attempts=attempts+1 WHERE id=? AND status in ? RETURNING id"
676 | result <- case mResCfg of
677 | Nothing -> liftIO $ PGS.query conn q (tname, Locked, jwName, jid, In [Queued, Retry])
678 | Just ResourceCfg{..} -> liftIO $ PGS.query conn qWithResources
679 | (tname, Locked, jwName, jid, In [Queued, Retry], resCfgCheckResourceFunction)
680 |
681 | case result of
682 | [] -> do
683 | log LevelDebug $ LogText $ toS $ "Job was locked by someone else before I could start. Skipping it. JobId=" <> show jid
684 | pure Nothing
685 | [Only (_ :: JobId)] -> pure $ Just jid
686 | x -> error $ "WTF just happned? Was expecting a single row to be returned, received " ++ show x
687 |
688 | withResource pool $ \monitorDbConn -> do
689 | void $ liftIO $ PGS.execute monitorDbConn "LISTEN ?" (Only $ pgEventName tname)
690 | forever $ do
691 | log LevelDebug $ LogText "[LISTEN/NOTIFY] Event loop"
692 | notif <- liftIO $ getNotification monitorDbConn
693 |
694 | let pload = notificationData notif
695 | runNotifWithFilter :: HasJobRunner m => Maybe ResourceCfg -> m ()
696 | runNotifWithFilter mResCfg = do
697 | log LevelDebug $ LogText $ toS $ "NOTIFY | " <> show pload
698 | case eitherDecode $ toS pload of
699 | Left e -> log LevelError $ LogText $ toS $ "Unable to decode notification payload received from Postgres. Payload=" <> show pload <> " Error=" <> show e
700 |
701 | -- Checking if job needs to be fired immediately AND it is not already
702 | -- taken by some othe thread, by the time it got to us
703 | Right (v :: Value) -> case Aeson.parseMaybe parser v of
704 | Nothing -> log LevelError $ LogText $ toS $ "Unable to extract id/run_at/locked_at from " <> show pload
705 | Just (jid, runAt_, mLockedAt_) -> do
706 | t <- liftIO getCurrentTime
707 | if (runAt_ <= t) && isNothing mLockedAt_
708 | then do log LevelDebug $ LogText $ toS $ "Job needs needs to be run immediately. Attempting to fork in background. JobId=" <> show jid
709 | void $ async $ do
710 | -- Let's try to lock the job first... it is possible that it has already
711 | -- been picked up by the poller by the time we get here.
712 | tryLockingJob jid mResCfg >>= \case
713 | Nothing -> pure ()
714 | Just lockedJid -> runJob lockedJid
715 | else log LevelDebug $ LogText $ toS $ "Job is either for future, is already locked, or would violate concurrency constraints. Skipping. JobId=" <> show jid
716 |
717 | concurrencyControlFn monitorDbConn >>= \case
718 | DontPoll -> log LevelWarn $ LogText "Received job event, but ignoring it due to concurrency control"
719 | PollAny -> runNotifWithFilter Nothing
720 | PollWithResources resCfg -> runNotifWithFilter (Just resCfg)
721 | where
722 | parser :: Value -> Aeson.Parser (JobId, UTCTime, Maybe UTCTime)
723 | parser = withObject "expecting an object to parse job.run_at and job.locked_at" $ \o -> do
724 | runAt_ <- o .: "run_at"
725 | mLockedAt_ <- o .:? "locked_at"
726 | jid <- o .: "id"
727 | pure (jid, runAt_, mLockedAt_)
728 |
729 |
730 | jobDeletionPoller :: (HasJobRunner m) => (Connection -> IO Int64) -> m ()
731 | jobDeletionPoller deletionFn = do
732 | i <- getPollingInterval
733 | dbPool <- getDbPool
734 | withDbConnection $ \conn -> do
735 | forever $ do
736 | n <- liftIO $ deletionFn conn
737 | log LevelDebug $ LogDeletionPoll n
738 | delaySeconds i
739 |
740 | -- $createJobs
741 | --
742 | -- Ideally you'd want to create wrappers for 'createJob' and 'scheduleJob' in
743 | -- your application so that instead of being in @IO@ they can be in your
744 | -- application's monad @m@ instead (this saving you from a @liftIO@ every time
745 | -- you want to enqueue a job
746 |
747 | -- | Create a job for immediate execution.
748 | --
749 | -- Internally calls 'scheduleJob' passing it the current time. Read
750 | -- 'scheduleJob' for further documentation.
751 | createJob :: ToJSON p
752 | => Connection
753 | -> TableName
754 | -> p
755 | -> IO Job
756 | createJob conn tname payload = do
757 | t <- getCurrentTime
758 | scheduleJob conn tname payload t
759 |
760 | -- | Create a job for execution at the given time.
761 | --
762 | -- * If time has already past, 'jobEventListener' is going to pick this up
763 | -- for execution immediately.
764 | --
765 | -- * If time is in the future, 'jobPoller' is going to pick this up with an
766 | -- error of +/- 'cfgPollingInterval' seconds. Please do not expect very high
767 | -- accuracy of when the job is actually executed.
768 | scheduleJob :: ToJSON p
769 | => Connection -- ^ DB connection to use. __Note:__ This should
770 | -- /ideally/ come out of your application's DB pool,
771 | -- not the 'cfgDbPool' you used in the job-runner.
772 | -> TableName -- ^ DB-table which holds your jobs
773 | -> p -- ^ Job payload
774 | -> UTCTime -- ^ when should the job be executed
775 | -> IO Job
776 | scheduleJob conn tname payload runAt = do
777 | let args = ( tname, runAt, Queued, toJSON payload, Nothing :: Maybe Value, 0 :: Int, Nothing :: Maybe Text, Nothing :: Maybe Text )
778 | queryFormatter = toS <$> PGS.formatQuery conn createJobQuery args
779 | rs <- PGS.query conn createJobQuery args
780 | case rs of
781 | [] -> Prelude.error . (<> "Not expecting a blank result set when creating a job. Query=") <$> queryFormatter
782 | [r] -> pure r
783 | _ -> Prelude.error . (<> "Not expecting multiple rows when creating a single job. Query=") <$> queryFormatter
784 |
785 | type ResourceList = [(ResourceId, Int)]
786 |
787 | createJobWithResources
788 | :: ToJSON p
789 | => Connection
790 | -> TableName
791 | -> ResourceCfg
792 | -> p
793 | -> ResourceList
794 | -> IO Job
795 | createJobWithResources conn tname resCfg payload resources = do
796 | t <- getCurrentTime
797 | scheduleJobWithResources conn tname resCfg payload resources t
798 |
799 | scheduleJobWithResources
800 | :: ToJSON p
801 | => Connection
802 | -> TableName
803 | -> ResourceCfg
804 | -> p
805 | -> ResourceList
806 | -> UTCTime
807 | -> IO Job
808 | scheduleJobWithResources conn tname ResourceCfg{..} payload resources runAt = do
809 | -- We insert everything in a single transaction to delay @NOTIFY@ calls,
810 | -- so a job isn't picked up before its resources are inserted.
811 | PGS.begin conn
812 | let args = ( tname, runAt, Queued, toJSON payload, Nothing :: Maybe Value, 0 :: Int, Nothing :: Maybe Text, Nothing :: Maybe Text )
813 | queryFormatter = toS <$> PGS.formatQuery conn createJobQuery args
814 | rs <- PGS.query conn createJobQuery args
815 |
816 | job <- flip onException (PGS.rollback conn) $ case rs of
817 | [] -> Prelude.error . (<> "Not expecting a blank result set when creating a job. Query=") <$> queryFormatter
818 | [r] -> pure r
819 | _ -> Prelude.error . (<> "Not expecting multiple rows when creating a single job. Query=") <$> queryFormatter
820 |
821 | forM_ resources $ \(resourceId, usage) -> do
822 | void $ PGS.execute conn ensureResource (resCfgResourceTable, rawResourceId resourceId, resCfgDefaultLimit)
823 | void $ PGS.execute conn registerResourceUsage (resCfgUsageTable, jobId job, rawResourceId resourceId, usage)
824 |
825 | PGS.commit conn
826 |
827 | pure job
828 |
829 | -- getRunnerEnv :: (HasJobRunner m) => m RunnerEnv
830 | -- getRunnerEnv = ask
831 |
832 | eitherParsePayload :: (FromJSON a)
833 | => Job
834 | -> Either String a
835 | eitherParsePayload =
836 | eitherParsePayloadWith parseJSON
837 |
838 | throwParsePayload :: (FromJSON a)
839 | => Job
840 | -> IO a
841 | throwParsePayload =
842 | throwParsePayloadWith parseJSON
843 |
844 | eitherParsePayloadWith :: (Aeson.Value -> Aeson.Parser a)
845 | -> Job
846 | -> Either String a
847 | eitherParsePayloadWith parser Job{jobPayload} = do
848 | case iparse parser jobPayload of
849 | -- TODO: throw a custom exception so that error reporting is better
850 | IError jpath e -> Left $ formatError jpath e
851 | ISuccess r -> Right r
852 |
853 | throwParsePayloadWith :: (Aeson.Value -> Aeson.Parser a)
854 | -> Job
855 | -> IO a
856 | throwParsePayloadWith parser job =
857 | either throwString pure (eitherParsePayloadWith parser job)
858 |
859 |
860 | -- | Used by the web\/admin UI to fetch a \"master list\" of all known
861 | -- job-types. Ref: 'cfgAllJobTypes'
862 | fetchAllJobTypes :: (MonadIO m)
863 | => UIConfig
864 | -> m [Text]
865 | fetchAllJobTypes UIConfig{uicfgAllJobTypes, uicfgDbPool} = liftIO $ do
866 | case uicfgAllJobTypes of
867 | AJTFixed jts -> pure jts
868 | AJTSql fn -> withResource uicfgDbPool fn
869 | AJTCustom fn -> fn
870 |
871 | -- | Used by web\/admin IO to fetch a \"master list\" of all known job-runners.
872 | -- There is a known issue with the way this has been implemented:
873 | --
874 | -- * Since this looks at the 'jobLockedBy' column of 'cfgTableName', it will
875 | -- discover only those job-runners that are actively executing at least one
876 | -- job at the time this function is executed.
877 | fetchAllJobRunners :: (MonadIO m)
878 | => UIConfig
879 | -> m [JobRunnerName]
880 | fetchAllJobRunners UIConfig{uicfgTableName, uicfgDbPool} = liftIO $ withResource uicfgDbPool $ \conn -> do
881 | mapMaybe fromOnly <$> PGS.query conn "select distinct locked_by from ?" (Only uicfgTableName)
882 |
--------------------------------------------------------------------------------