├── .github └── workflows │ ├── build_on_pr.yml │ ├── build_on_tag.yml │ └── go.yml ├── .gitignore ├── BUGS.md ├── LICENSE ├── Makefile ├── README.md ├── TODO.md ├── VERSION ├── cmd └── scom │ ├── scom.conf │ └── scom.go ├── go.mod ├── go.sum ├── images ├── ct.png ├── ct_filter.png ├── ct_filter_on.png ├── jd.png ├── jft.png ├── jft_edit.png ├── jh.png ├── jobqueue.gif ├── jq.png ├── jq_actions.png ├── jq_filter.png ├── jq_filter_on.png └── jq_info.png ├── internal ├── cmdline │ └── cmdline.go ├── command │ ├── commConfig.go │ └── command.go ├── config │ └── config.go ├── defaults │ └── defaults.go ├── generic │ └── generic.go ├── keybindings │ └── keybindings.go ├── logger │ └── logger.go ├── model │ ├── init.go │ ├── model.go │ ├── tabs │ │ ├── abouttab │ │ │ └── abouttab.go │ │ ├── clustertab │ │ │ ├── clustertab.go │ │ │ ├── clustertabcommands.go │ │ │ ├── clustertabkeys.go │ │ │ ├── clustertabtable.go │ │ │ └── clustertabview.go │ │ ├── jobdetailstab │ │ │ ├── jobdetailstab.go │ │ │ ├── jobdetailstabkeys.go │ │ │ └── jobdetailstabview.go │ │ ├── jobfromtemplate │ │ │ ├── jobfromtemplate.go │ │ │ ├── jobfromtemplatekeys.go │ │ │ └── jobfromtemplateview.go │ │ ├── jobhisttab │ │ │ ├── jobhisttab.go │ │ │ ├── jobhisttabcommands.go │ │ │ ├── jobhisttabkeys.go │ │ │ ├── jobhisttabtable.go │ │ │ └── jobhisttabview.go │ │ └── jobtab │ │ │ ├── jobtab.go │ │ │ ├── jobtabcommands.go │ │ │ ├── jobtabkeys.go │ │ │ ├── jobtabmenu.go │ │ │ ├── jobtabtable.go │ │ │ └── jobtabview.go │ ├── update.go │ └── view.go ├── openapi │ ├── openapi.gen.go │ ├── openapi.go │ ├── openapi_0.0.37_21.08.8.json │ ├── openapi_0.0.37_21.08.8.json.orig │ ├── openapi_0.0.39_master.json │ └── openapi_0.0.39_master.json.orig ├── openapidb │ ├── openapi_0.0.37_21.08.8.json │ ├── openapi_0.0.37_21.08.8.json.orig │ ├── openapi_db.gen.go │ └── openapidb.go ├── popupmenu │ └── popupmenu.go ├── slurm │ ├── sacct.go │ ├── sinfo.go │ ├── sinfo_test.go │ └── squeue.go ├── stats │ └── stats.go ├── styles │ └── styles.go ├── table │ ├── table.go │ └── table_test.go └── version │ └── version.go └── sc-demo.gif /.github/workflows/build_on_pr.yml: -------------------------------------------------------------------------------- 1 | name: Build_on_PR 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v2 14 | 15 | - name: Set up Go 16 | uses: actions/setup-go@v2 17 | with: 18 | go-version: 1.18 19 | 20 | - name: Test 21 | run: make test 22 | 23 | - name: Build 24 | run: make build -e version=${{ github.sha }} 25 | 26 | - name: Install 27 | run: make install -e version=${{ github.sha }} 28 | 29 | - name: Upload a Build Artifact 30 | uses: actions/upload-artifact@v2.3.1 31 | with: 32 | name: slurmcommander-${{ github.sha }} 33 | path: build/slurmcommander-${{ github.sha }}/ 34 | -------------------------------------------------------------------------------- /.github/workflows/build_on_tag.yml: -------------------------------------------------------------------------------- 1 | name: Build_on_tag 2 | 3 | on: 4 | push: 5 | tags: 6 | - v** 7 | 8 | jobs: 9 | 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v2 14 | 15 | - name: Set up Go 16 | uses: actions/setup-go@v2 17 | with: 18 | go-version: 1.18 19 | 20 | - name: Test 21 | run: make test 22 | 23 | - name: Build 24 | run: make build -e version=${{ github.ref_name }} 25 | 26 | - name: Install 27 | run: make install -e version=${{ github.ref_name }} 28 | 29 | - name: Upload a Build Artifact 30 | uses: actions/upload-artifact@v2.3.1 31 | with: 32 | name: slurmcommander-${{ github.ref_name }} 33 | path: build/slurmcommander-${{ github.ref_name }}/ 34 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | 3 | on: workflow_dispatch 4 | 5 | jobs: 6 | 7 | build: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | 12 | - name: Set up Go 13 | uses: actions/setup-go@v2 14 | with: 15 | go-version: 1.18 16 | 17 | - name: Test 18 | run: make test 19 | 20 | - name: Build 21 | run: make build -e version=${{ github.ref_name }} 22 | 23 | - name: Install 24 | run: make install -e version=${{ github.ref_name }} 25 | 26 | - name: Upload a Build Artifact 27 | uses: actions/upload-artifact@v2.3.1 28 | with: 29 | name: slurmcommander-${{ github.ref_name }} 30 | path: build/slurmcommander-${{ github.ref_name }}/ 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | debug.log 3 | scdebug.log 4 | scripts/** 5 | /scom 6 | *.sbatch 7 | tmp/** 8 | build/** 9 | -------------------------------------------------------------------------------- /BUGS.md: -------------------------------------------------------------------------------- 1 | * ~~filter (only in!!!) ClusterTab: if there are no matches->PANIC!~~ 2 | * ~~JobQueue tab, if info is turned on and filtered list has no results -> PANIC~~ 3 | * ~~jobfromtemplate tab, panic on select/enter if empty list~~ 4 | * ~~jobwait time from job details, shows 0?~~ 5 | * ~~cluster: blocks of nodes on prod sometime show weird same percentages? double-check~~ display ok, was wrong 6 | * ~~ssh to node after success returns error message:~~ 7 | * ~~slurm 22 vs 21, job_resources not visible in pending jobs? causes panic in InfoBox (see: vimdiff singlejobsqueue.json source-slurm22.json)~~ 8 | * ~~pending jobs break Run times Stats in Job History~~ 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Petar Jager 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: clean build test test_new list all 2 | 3 | .ONESHELL: 4 | 5 | # Inject into binary via linker: 6 | # ...in github actions comes from make -e version=git_ref 7 | version=$(shell cat VERSION) 8 | commit=$(shell git show --no-patch --format=format:%H HEAD) 9 | buildVersionVar=github.com/CLIP-HPC/SlurmCommander/internal/version.BuildVersion 10 | buildCommitVar=github.com/CLIP-HPC/SlurmCommander/internal/version.BuildCommit 11 | 12 | # various directories 13 | bindirs=$(wildcard ./cmd/*) 14 | installdir=build/slurmcommander-$(version) 15 | 16 | # list of files to include in build 17 | bins=$(notdir $(bindirs)) 18 | readme=README.md 19 | templates= 20 | config=./cmd/scom/scom.conf 21 | 22 | # can be replaced with go test ./... construct 23 | testdirs=$(sort $(dir $(shell find ./ -name *_test.go))) 24 | 25 | all: list test build install 26 | 27 | list: 28 | @echo "================================================================================" 29 | @echo "bindirs found: $(bindirs)" 30 | @echo "bins found: $(bins)" 31 | @echo "testdirs found: $(testdirs)" 32 | @echo "================================================================================" 33 | 34 | build: 35 | @echo "********************************************************************************" 36 | @echo Building $(bindirs) 37 | @echo Variables: 38 | @echo buildVersionVar: $(buildVersionVar) 39 | @echo version: $(version) 40 | @echo buildCommitVar: $(buildCommitVar) 41 | @echo commit: $(commit) 42 | @echo "********************************************************************************" 43 | for i in $(bindirs); 44 | do 45 | echo "................................................................................" 46 | echo "--> Now building: $$i" 47 | echo "................................................................................" 48 | go build -v -ldflags '-X $(buildVersionVar)=$(version) -X $(buildCommitVar)=$(commit)' $$i; 49 | done; 50 | 51 | install: 52 | mkdir -p $(installdir) 53 | cp $(bins) $(readme) $(config) $(installdir) 54 | 55 | test_new: 56 | $(foreach dir, $(testdirs), go test -v -count=1 $(dir) || exit $$?;) 57 | 58 | test: 59 | @echo "********************************************************************************" 60 | @echo Testing 61 | @echo "********************************************************************************" 62 | go test -v -cover -count=1 ./... 63 | 64 | 65 | clean: 66 | rm $(bins) 67 | rm -rf $(installdir) 68 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # SlurmCommander 3 | 4 | > ## News: 5 | > 6 | > ### Slurm 23.02.0 7 | > Slurm 23 has been released, and as already reported [here](https://github.com/CLIP-HPC/SlurmCommander/issues/22) scom does not work with it. 8 | > 9 | > The [issue](https://github.com/CLIP-HPC/SlurmCommander/issues/22) contains details and will track any potential progress on the support for slurm 23. 10 | > 11 | > --- 12 | > 13 | > [Discussions](https://github.com/CLIP-HPC/SlurmCommander/discussions) are open! 14 | > 15 | > Wishlist discussion thread: [here](https://github.com/CLIP-HPC/SlurmCommander/discussions/20) 16 | 17 | ## Description 18 | 19 | SlurmCommander is a simple, lightweight, no-dependencies text-based user interface (TUI) to your cluster. 20 | It ties together multiple slurm commands to provide you with a simple and efficient interaction point with slurm. 21 | 22 | [Installation](#installation) does not require any special privileges or environment. Simply download the [binary](https://github.com/CLIP-HPC/SlurmCommander/releases/latest), fill out a small [config file](./cmd/scom/scom.conf) and it's ready to run. 23 | 24 | You can view, search, analyze and interact with: 25 | 26 | ### Job Queue 27 | 28 | Job Queue shows jobs currently in the queue, additional information and breakdowns can be turned on with `s`,`c` and `i` keys: 29 | ![Job Queue main window](./images/jq.png) 30 | 31 | `Enter` key opens menu window with different actions available based on the job state (RUNNING, PENDING, etc.) 32 | ![Job Queue actions window](./images/jq_actions.png) 33 | 34 | `\` turns on __filtering__. It works by concatenating multiple job columns into a single string, and accepts [golang re2 regular expressions](https://github.com/google/re2/wiki/Syntax) thus allowing you to do some very imaginative filters. 35 | 36 | Example: `grid.*alice.\*(RUN|PEND)` =~ _jobs from account grid, user alice, in RUNNING OR PENDING state_ 37 | ![Job Queue filtering](./images/jq_filter.png) 38 | 39 | ### Job history 40 | 41 | Browse, filter and inspect past jobs 42 | ![Job History tab](./images/jh.png) 43 | 44 | ![Job Details tab](./images/jd.png) 45 | 46 | ### Edit and submit jobs from predefined templates 47 | 48 | * In the config file, set the `templatedirs` list of directories where to look for _.sbatch_ templates and their _.desc_ description files 49 | 50 | ![Job from Template tab](./images/jft.png) 51 | ![Job from Template tab](./images/jft_edit.png) 52 | 53 | ### Examine state of cluster nodes and partitions 54 | 55 | ![Cluster tab](./images/ct.png) 56 | 57 | Same as with _Job Queue_ and _Job History_ tabs, filtering is available here. 58 | ![Cluster tab filtering](./images/ct_filter.png) 59 | 60 | So if we would like to see only nodes whose name contains _clip-c_ that are _idle_ and _POWERED\_DOWN_, we can easily filter those out with a filter: `clip-c.*idle.\*POWER` 61 | ![Cluster tab filter on](./images/ct_filter_on.png) 62 | 63 | 64 | ### Example Job Queue tab demo: 65 | ![demo](./images/jobqueue.gif) 66 | 67 | ## Installation 68 | 69 | SlurmCommander does not require any special privileges to be installed, see instructions below. 70 | 71 | > Hard requirement: json-output capable slurm commands 72 | 73 | ### Regular users 74 | 75 | 1. Download the pre-built [binary](https://github.com/CLIP-HPC/SlurmCommander/releases/latest) 76 | 2. Download the [annotated config](./cmd/scom/scom.conf) file 77 | 3. Edit the config file, follow instructions inside 78 | 4. Create scom directory in your $HOME and place the edited config there: `mkdir $HOME/scom` 79 | 5. Run 80 | 81 | ### Site administrators 82 | 83 | Instructions are same as for the regular users, with one minor perk. 84 | Place the [config file](./cmd/scom/scom.conf) in one of the following locations to be used as global configuration source for all scom instances on that machine. 85 | 86 | 1. /etc/scom/scom.conf 87 | 2. Any location, providing users with the environment variable `SCOM_CONF` containing path to config. file 88 | 3. Users $XDG_CONFIG_HOME/scom/scom.conf 89 | 90 | 91 | __NOTE__: Users can still override global configuration options by changing config stanzas in their local `$HOME/scom/scom.conf` 92 | 93 | ## Usage tips 94 | 95 | SlurmCommander is developed for 256 color terminals (black background) and requires at least 185x43 (columns x rows) to work. 96 | 97 | * If you experience _funky_ colors on startup, try setting your `TERM` environment variable to something like `xterm-256color`. 98 | * If you get a message like this: 99 | `FATAL: Window too small to run without breaking view. Have 80x24. Need at least 185x43.`, check your terminal resolution with `stty -a` and try resizing the window or reduce the font. 100 | 101 | 102 | ``` 103 | [pja@ SlurmCommander-dev]$ [DEBUG=1] [TERM=xterm-256color]./scom -h 104 | Welcome to Slurm Commander! 105 | 106 | Usage of ./scom: 107 | -d uint 108 | Jobs history fetch last N days (default 7) 109 | -t uint 110 | Job history fetch timeout, seconds (default 30) 111 | -v Display version 112 | 113 | ``` 114 | 115 | To run in _debug_ mode, set `DEBUG` env. variable. You will see an extra debug message line in the user interface and scom will record a `scdebug.log` file with (_lots of_) internal log messages. 116 | 117 | > Tested on: 118 | > * slurm 21.08.8 119 | > * slurm 22.05.5 120 | 121 | ## Feedback 122 | 123 | __Is most welcome. Of any kind.__ 124 | 125 | * bug reports 126 | * broken UI elements 127 | * code panic reports 128 | * ideas 129 | * wishes 130 | * code contributions 131 | * usage stories 132 | * kudos 133 | * coffee and/or beer 134 | * ... 135 | 136 | ## Acknowledgments 137 | 138 | Powered by amazing [Bubble Tea](https://github.com/charmbracelet/bubbletea) framework/ecosystem. Kudos to glamurous Charm developers and community. 139 | 140 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | * ~~sacct --json for time range < works with account specification~~ 2 | * ~~wrap it in timer for excessive accounts (e.g. eta > 40GB json)~~ 3 | * ~~configurable time-range with cmdline switch?~~ 4 | * ~~config file reading from 1. /etc/sc.conf , 2. ~/sc.conf~~ 5 | * job templates readong from 1. /etc/sc/templates , 2. ~/sc/templates , 3. templates_config from config file if it's set 6 | * build process, handling diferent openapi.json versions 7 | * table sorting, e.g. ctrl-1,2,3 (sort by 1st, 2nd, 3rd,...) 8 | * statistics: add counting per user, per group/account? 9 | * ~~UI table responsiveness when len(rows)>1++k~~ 10 | * terminal columns ~150 breaks stats windows 11 | * on low number of rows (<60), on jobtab, stats and info overlap, messing up screen (laptops) 12 | * ~~make sorting of boxes static! otherwise flapping in display happens on same number items~~ 13 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | v1.0.5 2 | -------------------------------------------------------------------------------- /cmd/scom/scom.conf: -------------------------------------------------------------------------------- 1 | # SlurmCommander configuration file 2 | # 3 | # Users can place it in their home folders: $HOME/scom/scom.conf 4 | # Site Admins can place it in: /etc/scom/scom.conf to be used globally 5 | # 6 | 7 | # Set the global prefix for ALL required slurm commands (see list in [binpaths] below) 8 | # It will be set ONLY for non-specified ones, [binpaths] specification of a command, if exists, will not be overwritten 9 | # 10 | #prefix="/usr/bin" 11 | 12 | # Directories in which to search for job templates. 13 | # Files ending with .sbatch will be appended to the list. 14 | # Same filenames with .desc ending will be read (1st line only) for description column. 15 | # 16 | #templatedirs = [ 17 | # "/etc/slurmcommander/templates", 18 | # "/software/scom/templates", 19 | #] 20 | 21 | # How often do JobQueue and Cluster tab refresh (seconds) 22 | # Default if unset = 3 23 | # Min. value = 3 24 | # 25 | #tick=3 26 | 27 | # Job History Configuration 28 | # 29 | # The starttime and endtime are the values passed to the -S and -E flags respectively, 30 | # please look at the manpage for sacct for the correct formating of these. 31 | # 32 | # The are the default values if left unset (commented out). 33 | #[jobhist] 34 | #starttime="now-1days" 35 | #endtime="" 36 | #timeout=30 37 | 38 | # Paths to required slurm commands 39 | # If some, or all of the following binaries reside in different directories, 40 | # you can set their respective paths below: 41 | # (unset ones will be set to the following default values, or prepended with prefix, if it's specified) 42 | # 43 | #[binpaths] 44 | #squeue="/bin/squeue" 45 | #sinfo="/bin/sinfo" 46 | #sacct="/bin/sacctall" 47 | #scancel="/bin/scancel" 48 | #shold="/bin/scontrol" 49 | #srequeue="/bin/scontrol" 50 | #sbatch="/bin/sbatch" 51 | #sacctmgr="/bin/sacctmgr" 52 | -------------------------------------------------------------------------------- /cmd/scom/scom.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | 8 | "github.com/charmbracelet/bubbles/help" 9 | "github.com/charmbracelet/bubbles/textinput" 10 | "github.com/charmbracelet/bubbles/viewport" 11 | "github.com/charmbracelet/lipgloss" 12 | tea "github.com/charmbracelet/bubbletea" 13 | 14 | "github.com/CLIP-HPC/SlurmCommander/internal/cmdline" 15 | "github.com/CLIP-HPC/SlurmCommander/internal/command" 16 | "github.com/CLIP-HPC/SlurmCommander/internal/config" 17 | "github.com/CLIP-HPC/SlurmCommander/internal/logger" 18 | "github.com/CLIP-HPC/SlurmCommander/internal/model" 19 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/clustertab" 20 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/jobdetailstab" 21 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/jobfromtemplate" 22 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/jobhisttab" 23 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/jobtab" 24 | "github.com/CLIP-HPC/SlurmCommander/internal/styles" 25 | "github.com/CLIP-HPC/SlurmCommander/internal/table" 26 | "github.com/CLIP-HPC/SlurmCommander/internal/version" 27 | ) 28 | 29 | func main() { 30 | 31 | var ( 32 | debugSet bool = false 33 | args *cmdline.CmdArgs 34 | ) 35 | 36 | fmt.Printf("Welcome to Slurm Commander!\n\n") 37 | 38 | cc := config.NewConfigContainer() 39 | err := cc.GetConfig() 40 | if err != nil { 41 | log.Printf("ERROR: parsing config files: %s\n", err) 42 | } 43 | 44 | args, err = cmdline.NewCmdArgs() 45 | if err != nil { 46 | log.Fatalf("ERROR: parsing cmdline args: %s\n", err) 47 | } 48 | 49 | if *args.Version { 50 | version.DumpVersion() 51 | os.Exit(0) 52 | } 53 | 54 | // TODO: JFT We have the CMDline switches and config, now overwrite/append what's changed 55 | //log.Println(cc.DumpConfig()) 56 | 57 | log.Printf("INFO: %s\n", cc.DumpConfig()) 58 | // TODO: this is ugly, but quick. Rework, use model... 59 | command.NewCmdCC(*cc) 60 | jobtab.NewCmdCC(*cc) 61 | clustertab.NewCmdCC(*cc) 62 | jobhisttab.NewCmdCC(*cc) 63 | 64 | // TODO: move all this away to view/styles somewhere... 65 | s := table.DefaultStyles() 66 | s.Header = s.Header. 67 | BorderStyle(lipgloss.NormalBorder()). 68 | BorderForeground(styles.Bluegrey). 69 | BorderBottom(true). 70 | Bold(false) 71 | s.Selected = s.Selected. 72 | Background(styles.Blue). 73 | Foreground(styles.Yellow). 74 | Bold(false) 75 | 76 | // Filter TextInput 77 | ti := textinput.New() 78 | ti.Placeholder = "" 79 | ti.Focus() 80 | ti.CharLimit = 30 81 | ti.Width = 30 82 | 83 | // logging 84 | debugSet, l := logger.SetupLogger() 85 | 86 | // setup help 87 | hlp := help.New() 88 | hlp.Styles.ShortKey = styles.TextYellow 89 | hlp.Styles.ShortDesc = styles.TextBlue 90 | hlp.Styles.ShortSeparator = styles.TextBlueGrey 91 | 92 | /// JD viewport 93 | vp := viewport.New(10, 10) 94 | vp.Style = styles.JDviewportBox 95 | 96 | m := model.Model{ 97 | Globals: model.Globals{ 98 | Help: hlp, 99 | ActiveTab: 0, 100 | Log: l, 101 | Debug: debugSet, 102 | ConfigContainer: *cc, 103 | }, 104 | JobTab: jobtab.JobTab{ 105 | SqueueTable: table.New(table.WithColumns(jobtab.SqueueTabCols), table.WithRows(jobtab.TableRows{}), table.WithStyles(s)), 106 | Filter: ti, 107 | }, 108 | JobHistTab: jobhisttab.JobHistTab{ 109 | SacctTable: table.New(table.WithColumns(jobhisttab.SacctTabCols), table.WithRows(jobtab.TableRows{}), table.WithStyles(s)), 110 | Filter: ti, 111 | UserInputs: jobhisttab.NewUserInputs(cc.JobHist.Timeout, cc.JobHist.Starttime, cc.JobHist.Endtime), 112 | HistFetched: false, 113 | HistFetchFail: false, 114 | JobHistStart: cc.JobHist.Starttime, 115 | JobHistEnd: cc.JobHist.Endtime, 116 | JobHistTimeout: cc.JobHist.Timeout, 117 | }, 118 | JobDetailsTab: jobdetailstab.JobDetailsTab{ 119 | SelJobIDNew: -1, 120 | ViewPort: vp, 121 | }, 122 | JobFromTemplateTab: jobfromtemplate.JobFromTemplateTab{ 123 | EditTemplate: false, 124 | NewJobScript: "", 125 | TemplatesTable: table.New( 126 | table.WithColumns(jobfromtemplate.TemplatesListCols), 127 | table.WithRows(jobfromtemplate.TemplatesListRows{}), 128 | table.WithStyles(s), 129 | ), 130 | }, 131 | ClusterTab: clustertab.ClusterTab{ 132 | SinfoTable: table.New(table.WithColumns(clustertab.SinfoTabCols), table.WithRows(jobtab.TableRows{}), table.WithStyles(s)), 133 | Filter: ti, 134 | }, 135 | } 136 | 137 | // OLD: bubbletea@v0.22.1 138 | //p := tea.NewProgram(tea.Model(m), tea.WithAltScreen()) 139 | //if err := p.Start(); err != nil { 140 | // log.Fatalf("ERROR: starting tea program: %q\n", err) 141 | //} 142 | 143 | // NEW: bubbletea@v0.23.0 144 | // Run returns the model as a tea.Model. 145 | p := tea.NewProgram(tea.Model(m), tea.WithAltScreen()) 146 | ret, err := p.Run() 147 | if err != nil { 148 | fmt.Printf("Error starting program: %s", err) 149 | os.Exit(1) 150 | } 151 | 152 | if retMod, ok := ret.(model.Model); ok && retMod.Globals.SizeErr != "" { 153 | fmt.Printf("%s\n", retMod.Globals.SizeErr) 154 | } 155 | fmt.Printf("Goodbye!\n") 156 | } 157 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/CLIP-HPC/SlurmCommander 2 | 3 | go 1.18 4 | 5 | require ( 6 | github.com/BurntSushi/toml v1.2.1 7 | github.com/charmbracelet/bubbles v0.14.0 8 | github.com/charmbracelet/bubbletea v0.23.0 9 | github.com/charmbracelet/lipgloss v0.6.0 10 | github.com/dustin/go-humanize v1.0.0 11 | github.com/mattn/go-runewidth v0.0.14 12 | gonum.org/v1/gonum v0.12.0 13 | ) 14 | 15 | require ( 16 | github.com/atotto/clipboard v0.1.4 // indirect 17 | github.com/aymanbagabas/go-osc52 v1.0.3 // indirect 18 | github.com/charmbracelet/harmonica v0.2.0 // indirect 19 | github.com/containerd/console v1.0.3 // indirect 20 | github.com/lucasb-eyer/go-colorful v1.2.0 // indirect 21 | github.com/mattn/go-isatty v0.0.16 // indirect 22 | github.com/mattn/go-localereader v0.0.1 // indirect 23 | github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b // indirect 24 | github.com/muesli/cancelreader v0.2.2 // indirect 25 | github.com/muesli/reflow v0.3.0 // indirect 26 | github.com/muesli/termenv v0.13.0 // indirect 27 | github.com/rivo/uniseg v0.2.0 // indirect 28 | github.com/sahilm/fuzzy v0.1.0 // indirect 29 | golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect 30 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect 31 | golang.org/x/text v0.3.7 // indirect 32 | ) 33 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= 2 | github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= 3 | github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= 4 | github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= 5 | github.com/aymanbagabas/go-osc52 v1.0.3 h1:DTwqENW7X9arYimJrPeGZcV0ln14sGMt3pHZspWD+Mg= 6 | github.com/aymanbagabas/go-osc52 v1.0.3/go.mod h1:zT8H+Rk4VSabYN90pWyugflM3ZhpTZNC7cASDfUCdT4= 7 | github.com/charmbracelet/bubbles v0.14.0 h1:DJfCwnARfWjZLvMglhSQzo76UZ2gucuHPy9jLWX45Og= 8 | github.com/charmbracelet/bubbles v0.14.0/go.mod h1:bbeTiXwPww4M031aGi8UK2HT9RDWoiNibae+1yCMtcc= 9 | github.com/charmbracelet/bubbletea v0.21.0/go.mod h1:GgmJMec61d08zXsOhqRC/AiOx4K4pmz+VIcRIm1FKr4= 10 | github.com/charmbracelet/bubbletea v0.23.0 h1:oGChhsNcm7kltiTdjxJbVlyh93N5fycluO7MsA2JEeg= 11 | github.com/charmbracelet/bubbletea v0.23.0/go.mod h1:JAfGK/3/pPKHTnAS8JIE2u9f61BjWTQY57RbT25aMXU= 12 | github.com/charmbracelet/harmonica v0.2.0 h1:8NxJWRWg/bzKqqEaaeFNipOu77YR5t8aSwG4pgaUBiQ= 13 | github.com/charmbracelet/harmonica v0.2.0/go.mod h1:KSri/1RMQOZLbw7AHqgcBycp8pgJnQMYYT8QZRqZ1Ao= 14 | github.com/charmbracelet/lipgloss v0.5.0/go.mod h1:EZLha/HbzEt7cYqdFPovlqy5FZPj0xFhg5SaqxScmgs= 15 | github.com/charmbracelet/lipgloss v0.6.0 h1:1StyZB9vBSOyuZxQUcUwGr17JmojPNm87inij9N3wJY= 16 | github.com/charmbracelet/lipgloss v0.6.0/go.mod h1:tHh2wr34xcHjC2HCXIlGSG1jaDF0S0atAUvBMP6Ppuk= 17 | github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= 18 | github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= 19 | github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= 20 | github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= 21 | github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= 22 | github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= 23 | github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= 24 | github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= 25 | github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= 26 | github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= 27 | github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= 28 | github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= 29 | github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= 30 | github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= 31 | github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= 32 | github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= 33 | github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= 34 | github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= 35 | github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b h1:1XF24mVaiu7u+CFywTdcDo2ie1pzzhwjt6RHqzpMU34= 36 | github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b/go.mod h1:fQuZ0gauxyBcmsdE3ZT4NasjaRdxmbCS0jRHsrWu3Ho= 37 | github.com/muesli/cancelreader v0.2.0/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= 38 | github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= 39 | github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= 40 | github.com/muesli/reflow v0.2.1-0.20210115123740-9e1d0d53df68/go.mod h1:Xk+z4oIWdQqJzsxyjgl3P22oYZnHdZ8FFTHAQQt5BMQ= 41 | github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= 42 | github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= 43 | github.com/muesli/termenv v0.11.1-0.20220204035834-5ac8409525e0/go.mod h1:Bd5NYQ7pd+SrtBSrSNoBBmXlcY8+Xj4BMJgh8qcZrvs= 44 | github.com/muesli/termenv v0.11.1-0.20220212125758-44cd13922739/go.mod h1:Bd5NYQ7pd+SrtBSrSNoBBmXlcY8+Xj4BMJgh8qcZrvs= 45 | github.com/muesli/termenv v0.13.0 h1:wK20DRpJdDX8b7Ek2QfhvqhRQFZ237RGRO0RQ/Iqdy0= 46 | github.com/muesli/termenv v0.13.0/go.mod h1:sP1+uffeLaEYpyOTb8pLCUctGcGLnoFjSn4YJK5e2bc= 47 | github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= 48 | github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= 49 | github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= 50 | github.com/sahilm/fuzzy v0.1.0 h1:FzWGaw2Opqyu+794ZQ9SYifWv2EIXpwP4q8dY1kDAwI= 51 | github.com/sahilm/fuzzy v0.1.0/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y= 52 | golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3 h1:n9HxLrNxWWtEb1cA950nuEEj3QnKbtsCJ6KjcgisNUs= 53 | golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 54 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 55 | golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 56 | golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 57 | golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 58 | golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU= 59 | golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 60 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= 61 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 62 | golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= 63 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= 64 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 65 | gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= 66 | gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= 67 | -------------------------------------------------------------------------------- /images/ct.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/SlurmCommander/c8722facaf382f5892eb7ba187ad170ac2a3a95f/images/ct.png -------------------------------------------------------------------------------- /images/ct_filter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/SlurmCommander/c8722facaf382f5892eb7ba187ad170ac2a3a95f/images/ct_filter.png -------------------------------------------------------------------------------- /images/ct_filter_on.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/SlurmCommander/c8722facaf382f5892eb7ba187ad170ac2a3a95f/images/ct_filter_on.png -------------------------------------------------------------------------------- /images/jd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/SlurmCommander/c8722facaf382f5892eb7ba187ad170ac2a3a95f/images/jd.png -------------------------------------------------------------------------------- /images/jft.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/SlurmCommander/c8722facaf382f5892eb7ba187ad170ac2a3a95f/images/jft.png -------------------------------------------------------------------------------- /images/jft_edit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/SlurmCommander/c8722facaf382f5892eb7ba187ad170ac2a3a95f/images/jft_edit.png -------------------------------------------------------------------------------- /images/jh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/SlurmCommander/c8722facaf382f5892eb7ba187ad170ac2a3a95f/images/jh.png -------------------------------------------------------------------------------- /images/jobqueue.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/SlurmCommander/c8722facaf382f5892eb7ba187ad170ac2a3a95f/images/jobqueue.gif -------------------------------------------------------------------------------- /images/jq.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/SlurmCommander/c8722facaf382f5892eb7ba187ad170ac2a3a95f/images/jq.png -------------------------------------------------------------------------------- /images/jq_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/SlurmCommander/c8722facaf382f5892eb7ba187ad170ac2a3a95f/images/jq_actions.png -------------------------------------------------------------------------------- /images/jq_filter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/SlurmCommander/c8722facaf382f5892eb7ba187ad170ac2a3a95f/images/jq_filter.png -------------------------------------------------------------------------------- /images/jq_filter_on.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/SlurmCommander/c8722facaf382f5892eb7ba187ad170ac2a3a95f/images/jq_filter_on.png -------------------------------------------------------------------------------- /images/jq_info.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/SlurmCommander/c8722facaf382f5892eb7ba187ad170ac2a3a95f/images/jq_info.png -------------------------------------------------------------------------------- /internal/cmdline/cmdline.go: -------------------------------------------------------------------------------- 1 | package cmdline 2 | 3 | import ( 4 | "errors" 5 | "flag" 6 | ) 7 | 8 | // CmdArgs holds currently supported command line parameters. 9 | type CmdArgs struct { 10 | Version *bool 11 | } 12 | 13 | // NewCmdArgs return the CmdArgs structure built from command line parameters. 14 | func NewCmdArgs() (*CmdArgs, error) { 15 | c := new(CmdArgs) 16 | 17 | c.Version = flag.Bool("v", false, "Display version") 18 | flag.Parse() 19 | if !flag.Parsed() { 20 | return nil, errors.New("failed to parse command line flags") 21 | } 22 | 23 | return c, nil 24 | } 25 | -------------------------------------------------------------------------------- /internal/command/commConfig.go: -------------------------------------------------------------------------------- 1 | package command 2 | 3 | var ( 4 | SacctJobCmdSwitches = []string{"-n", "--json", "-j"} 5 | ScancelJobCmdSwitches = []string{} 6 | SholdJobCmdSwitches = []string{"hold"} 7 | SrequeueJobCmdSwitches = []string{"requeue"} 8 | SbatchCmdSwitches = []string{} 9 | SacctmgrCmdSwitches = []string{"list", "Association", "format=account", "-P", "-n"} 10 | ) 11 | -------------------------------------------------------------------------------- /internal/command/command.go: -------------------------------------------------------------------------------- 1 | package command 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "log" 7 | "os/exec" 8 | "os/user" 9 | 10 | tea "github.com/charmbracelet/bubbletea" 11 | "github.com/CLIP-HPC/SlurmCommander/internal/config" 12 | ) 13 | 14 | var cc config.ConfigContainer 15 | 16 | // NewCmdCC sets the package variable ConfigContainer to the values from config files/defaults. 17 | // To be used in the package locally without being passed around. 18 | // Not the smartest choice, consider refactoring later. 19 | func NewCmdCC(config config.ConfigContainer) { 20 | cc = config 21 | } 22 | 23 | type ErrorMsg struct { 24 | From string 25 | ErrHelp string 26 | OrigErr error 27 | } 28 | 29 | // UserName is a linux username string. 30 | type UserName string 31 | 32 | // GetUserName returns the linux username string. 33 | // Used to call sacctmgr and fetch users associations. 34 | func GetUserName(l *log.Logger) tea.Cmd { 35 | 36 | return func() tea.Msg { 37 | 38 | l.Printf("Fetching UserName\n") 39 | u, err := user.Current() 40 | if err != nil { 41 | l.Printf("GetUserName FAILED: %s", err) 42 | return ErrorMsg{ 43 | From: "GetUserName", 44 | ErrHelp: "Failed to get username, hard to imagine why. Please open an issue with us here: https://github.com/CLIP-HPC/SlurmCommander/issues/new/choose", 45 | OrigErr: err, 46 | } 47 | } 48 | 49 | l.Printf("Return UserName: %s\n", u.Username) 50 | return UserName(u.Username) 51 | } 52 | } 53 | 54 | // UserAssoc is a list of associations between current username and slurm accounts. 55 | type UserAssoc []string 56 | 57 | // GetUserAssoc returns a list of associations between current username and slurm accounts. 58 | // Used later in sacct --json call to fetch users job history 59 | func GetUserAssoc(u string, l *log.Logger) tea.Cmd { 60 | return func() tea.Msg { 61 | var ( 62 | ua UserAssoc 63 | sw []string 64 | ) 65 | 66 | cmd := cc.Binpaths["sacctmgr"] 67 | sw = append(sw, SacctmgrCmdSwitches...) 68 | sw = append(sw, "user="+u) 69 | //c := exec.Command(sacctmgrCmd, sw...) 70 | c := exec.Command(cmd, sw...) 71 | 72 | l.Printf("GetUserAssoc about to run: %v %v\n", cmd, sw) 73 | stdOut, err := c.StdoutPipe() 74 | if err != nil { 75 | l.Printf("StdoutPipe call FAILED with %s\n", err) 76 | return ErrorMsg{ 77 | From: "GetUserAssoc", 78 | ErrHelp: "Failed setting up command StdoutPipe()", 79 | OrigErr: err, 80 | } 81 | } 82 | 83 | if e := c.Start(); e != nil { 84 | l.Printf("cmd.Run call FAILED with %s\n", err) 85 | return ErrorMsg{ 86 | From: "GetUserAssoc", 87 | ErrHelp: "Failed Start()ing sacctmgr", 88 | OrigErr: err, 89 | } 90 | } 91 | 92 | s := bufio.NewScanner(stdOut) 93 | for s.Scan() { 94 | l.Printf("Got UserAssoc %s -> %s\n", u, s.Text()) 95 | ua = append(ua, s.Text()) 96 | } 97 | if e := c.Wait(); e != nil { 98 | l.Printf("cmd.Wait call FAILED with %s\n", err) 99 | return ErrorMsg{ 100 | From: "GetUserAssoc", 101 | ErrHelp: "Failed Wait()ing for sacctmgr to exit", 102 | OrigErr: err, 103 | } 104 | } 105 | 106 | return ua 107 | } 108 | } 109 | 110 | type ScancelSent struct { 111 | Jobid string 112 | } 113 | 114 | func CallScancel(jobid string, l *log.Logger) tea.Cmd { 115 | return func() tea.Msg { 116 | var scret ScancelSent = ScancelSent{ 117 | Jobid: jobid, 118 | } 119 | 120 | cmd := cc.Binpaths["scancel"] 121 | switches := append(ScancelJobCmdSwitches, jobid) 122 | 123 | l.Printf("EXEC: %q %q\n", cmd, switches) 124 | out, err := exec.Command(cmd, switches...).CombinedOutput() 125 | if err != nil { 126 | l.Printf("Error exec scancel: %q\n", err) 127 | return ErrorMsg{ 128 | From: "CallScancel", 129 | ErrHelp: "Failed to run scancel: check command paths in scom.conf, check that you have permissions to cancel it", 130 | OrigErr: err, 131 | } 132 | } 133 | l.Printf("EXEC output: %q\n", out) 134 | 135 | return scret 136 | } 137 | } 138 | 139 | type SHoldSent struct { 140 | Jobid string 141 | } 142 | 143 | func CallScontrolHold(jobid string, l *log.Logger) tea.Cmd { 144 | return func() tea.Msg { 145 | var scret SHoldSent = SHoldSent{ 146 | Jobid: jobid, 147 | } 148 | 149 | cmd := cc.Binpaths["scontrol"] 150 | switches := append(SholdJobCmdSwitches, jobid) 151 | 152 | l.Printf("EXEC: %q %q\n", cmd, switches) 153 | out, err := exec.Command(cmd, switches...).CombinedOutput() 154 | if err != nil { 155 | l.Printf("Error exec hold: %q\n", err) 156 | return ErrorMsg{ 157 | From: "CallScontrolHold", 158 | ErrHelp: "Failed to run scontrol hold : check command paths in scom.conf, check that you have permissions to hold it", 159 | OrigErr: err, 160 | } 161 | } 162 | l.Printf("EXEC output: %q\n", out) 163 | 164 | return scret 165 | } 166 | } 167 | 168 | type SRequeueSent struct { 169 | Jobid string 170 | } 171 | 172 | // TODO: unify this to a single function 173 | func CallScontrolRequeue(jobid string, l *log.Logger) tea.Cmd { 174 | return func() tea.Msg { 175 | var scret SRequeueSent = SRequeueSent{ 176 | Jobid: jobid, 177 | } 178 | 179 | cmd := cc.Binpaths["scontrol"] 180 | switches := append(SrequeueJobCmdSwitches, jobid) 181 | 182 | l.Printf("EXEC: %q %q\n", cmd, switches) 183 | out, err := exec.Command(cmd, switches...).CombinedOutput() 184 | if err != nil { 185 | l.Printf("Error exec requeue: %q\n", err) 186 | l.Printf("Possible Reason: requeue can only be executed for batch jobs (e.g. won't work on srun --pty)\n") 187 | return ErrorMsg{ 188 | From: "CallScontrolRequeue", 189 | ErrHelp: "Failed to run scontrol requeue: check command paths in scom.conf, check that you have permissions to requeue it, check that it's not srun --pty type", 190 | OrigErr: err, 191 | } 192 | } 193 | l.Printf("EXEC output: %q\n", out) 194 | 195 | return scret 196 | } 197 | } 198 | 199 | type SBatchSent struct { 200 | JobFile string 201 | } 202 | 203 | // TODO: unify this to a single function 204 | func CallSbatch(jobfile string, l *log.Logger) tea.Cmd { 205 | return func() tea.Msg { 206 | var scret SBatchSent = SBatchSent{ 207 | JobFile: jobfile, 208 | } 209 | 210 | cmd := cc.Binpaths["sbatch"] 211 | switches := append(SbatchCmdSwitches, jobfile) 212 | 213 | l.Printf("EXEC: %q %q\n", cmd, switches) 214 | out, err := exec.Command(cmd, switches...).CombinedOutput() 215 | if err != nil { 216 | l.Printf("Error exec sbatch: %q\n", err) 217 | return ErrorMsg{ 218 | From: "CallSbatch", 219 | ErrHelp: "Failed to run sbatch, check scom.conf and the paths there.", 220 | OrigErr: err, 221 | } 222 | } 223 | l.Printf("EXEC output: %q\n", out) 224 | 225 | return scret 226 | } 227 | } 228 | 229 | type SshCompleted struct { 230 | SshNode string 231 | } 232 | 233 | func CallSsh(node string, l *log.Logger) tea.Cmd { 234 | l.Printf("Start ssh to %s\n", node) 235 | ssh := exec.Command("ssh", node) 236 | return tea.ExecProcess(ssh, func(err error) tea.Msg { 237 | if err != nil { 238 | l.Printf("End ssh with error: %s\n", err) 239 | return ErrorMsg{ 240 | From: "CallSsh", 241 | ErrHelp: fmt.Sprintf("Failed ssh to %s, possible reasons: you don't have a job on the node, ssh not allowed at all, etc.", node), 242 | OrigErr: err, 243 | } 244 | } else { 245 | return SshCompleted{ 246 | SshNode: node, 247 | } 248 | } 249 | }) 250 | } 251 | -------------------------------------------------------------------------------- /internal/config/config.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package config implements the ConfigContainer structure and accompanying methods. 3 | It holds the configuration data for all utilities. 4 | Configuration file format is the same for all. 5 | */ 6 | package config 7 | 8 | import ( 9 | "fmt" 10 | "log" 11 | "os" 12 | "time" 13 | 14 | "github.com/BurntSushi/toml" 15 | "github.com/CLIP-HPC/SlurmCommander/internal/defaults" 16 | ) 17 | 18 | type ConfigJobHistContainer struct { 19 | Starttime string 20 | Endtime string 21 | Timeout uint 22 | } 23 | 24 | type ConfigContainer struct { 25 | Prefix string // if this is set, then we prepend this path to all commands 26 | Binpaths map[string]string // else, we specify one by one 27 | Tick uint 28 | JobHist ConfigJobHistContainer 29 | TemplateDirs []string 30 | } 31 | 32 | func NewConfigContainer() *ConfigContainer { 33 | return new(ConfigContainer) 34 | } 35 | 36 | func (cc *ConfigContainer) GetTick() time.Duration { 37 | return time.Duration(cc.Tick) 38 | } 39 | 40 | // Read & unmarshall configuration from 'name' file into configContainer structure 41 | func (cc *ConfigContainer) GetConfig() error { 42 | var ( 43 | cfgPaths []string 44 | ) 45 | 46 | home, err := os.UserHomeDir() 47 | if err != nil { 48 | log.Printf("Conf: FAILED getting users $HOME %s\n", err) 49 | cfgPaths = []string{defaults.ConfFileName} 50 | } else { 51 | cfgPaths = []string{defaults.SiteConfFile, home + "/" + defaults.AppName + "/" + defaults.ConfFileName} 52 | } 53 | 54 | // SCOM_CONF content, if exists 55 | if scomConf, exists := os.LookupEnv(defaults.EnvConfVarName); exists && scomConf != "" { 56 | // SCOM_CONF set 57 | cfgPaths = append(cfgPaths, scomConf) 58 | } 59 | 60 | // $XDG_CONFIG_HOME/scom/scom.conf 61 | if xdgConfHome, exists := os.LookupEnv("XDG_CONFIG_HOME"); exists && xdgConfHome != "" { 62 | // XDG_CONFIG_HOME set 63 | cfgPaths = append(cfgPaths, xdgConfHome+"/"+defaults.AppName+"/"+defaults.ConfFileName) 64 | } else { 65 | // XDG_CONFIG_HOME unset or empty 66 | // If $XDG_CONFIG_HOME is either not set or empty, a default equal to $HOME/.config should be used. 67 | cfgPaths = append(cfgPaths, home+"/.config/"+defaults.AppName+"/"+defaults.ConfFileName) 68 | } 69 | 70 | for _, v := range cfgPaths { 71 | log.Printf("Trying conf file: %s\n", v) 72 | f, err := os.ReadFile(v) 73 | if err != nil { 74 | log.Printf("Conf: FAILED reading %s\n", v) 75 | continue 76 | } 77 | 78 | err = toml.Unmarshal(f, cc) 79 | if err != nil { 80 | log.Printf("Conf: FAILED unmarshalling %s with %s\n", v, err) 81 | } 82 | } 83 | 84 | // Here we test config limits and set them. 85 | // Also fill out unset config params. 86 | 87 | // if unset (==0) or less then 3, set to default 88 | if cc.Tick < defaults.TickMin { 89 | // set default Tick 90 | cc.Tick = defaults.TickMin 91 | } 92 | 93 | // if unset, set to default 94 | if len(cc.JobHist.Starttime) == 0 { 95 | cc.JobHist.Starttime = defaults.HistStart 96 | } 97 | // if unset (==0), set to default 98 | if cc.JobHist.Timeout < 1 { 99 | cc.JobHist.Timeout = defaults.HistTimeout 100 | } 101 | cc.testNsetBinPaths() 102 | cc.testNsetTemplateDirs() 103 | 104 | // We don't return error since we set sane defaults and 105 | // errors arising from bad config should be handled in app. 106 | // for now leave signature as-is, later remove error return 107 | 108 | return nil 109 | } 110 | 111 | func (cc *ConfigContainer) testNsetTemplateDirs() { 112 | if cc.TemplateDirs == nil { 113 | // Nothing set from config files 114 | cc.TemplateDirs = append(cc.TemplateDirs, defaults.TemplatesDir) 115 | } else { 116 | // Something exists from config, can be site-wide OR user-conf 117 | // QUESTION: should we do anything about it? prepend /etc/... one? or leave it as-is? 118 | // For now, we don't touch it. 119 | } 120 | 121 | } 122 | 123 | func (cc *ConfigContainer) testNsetBinPaths() { 124 | 125 | if cc.Binpaths == nil { 126 | cc.Binpaths = make(map[string]string) 127 | } 128 | 129 | for key, path := range defaults.BinPaths { 130 | if val, exists := cc.Binpaths[key]; !exists || val == "" { 131 | if cc.Prefix != "" { 132 | // prefix is set, prepend it 133 | cc.Binpaths[key] = cc.Prefix + "/" + key 134 | } else { 135 | cc.Binpaths[key] = path 136 | } 137 | } 138 | } 139 | 140 | } 141 | 142 | func (cc *ConfigContainer) DumpConfig() string { 143 | return fmt.Sprintf("Configuration: %#v\n", cc) 144 | } 145 | -------------------------------------------------------------------------------- /internal/defaults/defaults.go: -------------------------------------------------------------------------------- 1 | package defaults 2 | 3 | const ( 4 | TickMin = 3 // minimal time in seconds between that can be set in config file. If not set or less then, Set to this value. 5 | HistTimeout = 30 // in seconds; must be >= 1 6 | HistStart = "now-7days" // must be >= 1 7 | 8 | AppName = "scom" 9 | 10 | EnvConfVarName = "SCOM_CONF" 11 | ConfFileName = "scom.conf" 12 | SiteConfDir = "/etc/" + AppName + "/" 13 | SiteConfFile = SiteConfDir + ConfFileName 14 | 15 | TemplatesDir = SiteConfDir + "templates" 16 | TemplatesSuffix = ".sbatch" 17 | TemplatesDescSuffix = ".desc" 18 | ) 19 | 20 | var ( 21 | // default paths 22 | BinPaths = map[string]string{ 23 | "sacct": "/bin/sacct", 24 | "sstat": "/bin/sstat", 25 | "sinfo": "/bin/sinfo", 26 | "squeue": "/bin/squeue", 27 | "sbatch": "/bin/sbatch", 28 | "scancel": "/bin/scancel", 29 | "scontrol": "/bin/scontrol", 30 | "sacctmgr": "/bin/sacctmgr", 31 | } 32 | ) 33 | -------------------------------------------------------------------------------- /internal/generic/generic.go: -------------------------------------------------------------------------------- 1 | package generic 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "sort" 7 | "time" 8 | 9 | "github.com/charmbracelet/bubbles/textinput" 10 | ) 11 | 12 | type UserInputs struct { 13 | FocusIndex int 14 | ParamTexts []string 15 | Params []textinput.Model 16 | } 17 | 18 | type CountItemSlice []CountItem 19 | 20 | type CountItem struct { 21 | Name string 22 | Count uint 23 | Total uint 24 | } 25 | 26 | //type CountItemMap map[string]uint 27 | type CountItemMap map[string]*CountItem 28 | 29 | func SortItemMapBySel(what string, m *CountItemMap) CountItemSlice { 30 | var ret = CountItemSlice{} 31 | //ret := make(CountItemSlice, len(*m)) 32 | for k, v := range *m { 33 | ret = append(ret, CountItem{ 34 | Name: k, 35 | Count: v.Count, 36 | Total: v.Total, 37 | }) 38 | } 39 | 40 | sort.Slice(ret, func(i, j int) bool { 41 | switch what { 42 | case "Count": 43 | if ret[i].Count > ret[j].Count { 44 | return true 45 | } 46 | case "Name": 47 | if ret[i].Name < ret[j].Name { 48 | return true 49 | } 50 | } 51 | return false 52 | }) 53 | 54 | return ret 55 | } 56 | 57 | func Top5(src CountItemSlice) CountItemSlice { 58 | var ret CountItemSlice 59 | for i, v := range src { 60 | if i < 5 { 61 | ret = append(ret, v) 62 | } 63 | } 64 | return ret 65 | } 66 | 67 | func HumanizeDuration(t time.Duration, l *log.Logger) string { 68 | var ret string 69 | 70 | // total seconds 71 | s := int64(t.Seconds()) 72 | 73 | // days 74 | d := s / (24 * 60 * 60) 75 | s = s % (24 * 60 * 60) 76 | 77 | // hours 78 | h := s / 3600 79 | s = s % 3600 80 | 81 | // minutes 82 | m := s / 60 83 | s = s % 60 84 | 85 | ret += fmt.Sprintf("%.2d-%.2d:%.2d:%.2d", d, h, m, s) 86 | 87 | l.Printf("Humanized %f to %q\n", t.Seconds(), ret) 88 | return ret 89 | } 90 | 91 | // Generate statistics string, vertical. 92 | func GenCountStrVert(cnt map[string]uint, l *log.Logger) string { 93 | var ( 94 | scr string 95 | ) 96 | 97 | sm := make([]struct { 98 | name string 99 | val uint 100 | }, 0) 101 | 102 | // place map to slice 103 | for k, v := range cnt { 104 | sm = append(sm, struct { 105 | name string 106 | val uint 107 | }{name: k, val: uint(v)}) 108 | } 109 | 110 | // sort first by name 111 | sort.Slice(sm, func(i, j int) bool { 112 | if sm[i].name < sm[j].name { 113 | return true 114 | } else { 115 | return false 116 | } 117 | }) 118 | // then sort by numbers 119 | sort.Slice(sm, func(i, j int) bool { 120 | if sm[i].val > sm[j].val { 121 | return true 122 | } else { 123 | return false 124 | } 125 | }) 126 | 127 | // print it out 128 | //scr = "Count: " 129 | for _, v := range sm { 130 | scr += fmt.Sprintf("%-15s: %d\n", v.name, v.val) 131 | } 132 | scr += "\n" 133 | 134 | return scr 135 | } 136 | 137 | // Generate statistics string, horizontal. 138 | func GenCountStr(cnt map[string]uint, l *log.Logger) string { 139 | var ( 140 | scr string 141 | ) 142 | 143 | sm := make([]struct { 144 | name string 145 | val uint 146 | }, 0) 147 | 148 | // place map to slice 149 | for k, v := range cnt { 150 | sm = append(sm, struct { 151 | name string 152 | val uint 153 | }{name: k, val: uint(v)}) 154 | } 155 | 156 | // sort it 157 | sort.Slice(sm, func(i, j int) bool { 158 | if sm[i].name < sm[j].name { 159 | return true 160 | } else { 161 | return false 162 | } 163 | }) 164 | 165 | // print it out 166 | scr = "Count: " 167 | for _, v := range sm { 168 | scr += fmt.Sprintf("%s: %d ", v.name, v.val) 169 | } 170 | scr += "\n\n" 171 | 172 | return scr 173 | } 174 | -------------------------------------------------------------------------------- /internal/keybindings/keybindings.go: -------------------------------------------------------------------------------- 1 | package keybindings 2 | 3 | import ( 4 | "github.com/charmbracelet/bubbles/key" 5 | ) 6 | 7 | type KeyMap struct { 8 | TtabSel key.Binding 9 | Up key.Binding 10 | Down key.Binding 11 | Refresh key.Binding 12 | Quit key.Binding 13 | PageUp key.Binding 14 | PageDown key.Binding 15 | Tab key.Binding 16 | ShiftTab key.Binding 17 | Slash key.Binding 18 | TimeRange key.Binding 19 | Info key.Binding 20 | Enter key.Binding 21 | SaveSubmitJob key.Binding 22 | Escape key.Binding 23 | Stats key.Binding 24 | Count key.Binding 25 | } 26 | 27 | // TODO: add shift+tab 28 | var DefaultKeyMap = KeyMap{ 29 | // TODO: combine tab selection keys into one and distinguish by Key.Value? 30 | TtabSel: key.NewBinding( 31 | key.WithKeys("1", "2", "3", "4", "5", "6"), 32 | key.WithHelp("1-6", "GoTo Tab"), 33 | ), 34 | Count: key.NewBinding( 35 | key.WithKeys("c"), 36 | key.WithHelp("c", "Show Counters"), 37 | ), 38 | Stats: key.NewBinding( 39 | key.WithKeys("s"), 40 | key.WithHelp("s", "Show Statistics"), 41 | ), 42 | Up: key.NewBinding( 43 | key.WithKeys("k", "up"), // actual keybindings 44 | key.WithHelp("↑/k", "Move up"), // corresponding help text 45 | ), 46 | Down: key.NewBinding( 47 | key.WithKeys("j", "down"), 48 | key.WithHelp("↓/j", "Move down"), 49 | ), 50 | PageUp: key.NewBinding( 51 | key.WithKeys("b", "pgup"), 52 | key.WithHelp("b/pgup", "Page Up"), 53 | ), 54 | PageDown: key.NewBinding( 55 | key.WithKeys("f", "pgdown"), 56 | key.WithHelp("f/pgdn", "Page Down"), 57 | ), 58 | Tab: key.NewBinding( 59 | key.WithKeys("tab"), 60 | key.WithHelp("tab", "Cycle tabs"), 61 | ), 62 | ShiftTab: key.NewBinding( 63 | key.WithKeys("shift+tab"), 64 | key.WithHelp("shift+tab", "Cycle tabs backwards"), 65 | ), 66 | Refresh: key.NewBinding( 67 | key.WithKeys("r"), 68 | key.WithHelp("r", "Refresh View"), 69 | key.WithDisabled(), 70 | ), 71 | TimeRange: key.NewBinding( 72 | key.WithKeys("t"), 73 | key.WithHelp("t", "Modify Time Range"), 74 | key.WithDisabled(), 75 | ), 76 | Quit: key.NewBinding( 77 | key.WithKeys("q", "ctrl+c"), 78 | key.WithHelp("q", "Quit scom"), 79 | ), 80 | Slash: key.NewBinding( 81 | key.WithKeys("/"), 82 | key.WithHelp("/", "Filter table"), 83 | ), 84 | Info: key.NewBinding( 85 | key.WithKeys("i"), 86 | key.WithHelp("i", "Info"), 87 | ), 88 | Enter: key.NewBinding( 89 | key.WithKeys("enter"), 90 | key.WithHelp("enter", "Select entry"), 91 | ), 92 | SaveSubmitJob: key.NewBinding( 93 | key.WithKeys("ctrl+s"), 94 | key.WithHelp("ctrl+s", "Save and Submit the job script"), 95 | key.WithDisabled(), 96 | ), 97 | Escape: key.NewBinding( 98 | key.WithKeys("Esc"), 99 | key.WithHelp("Esc", "Exit without saving"), 100 | key.WithDisabled(), 101 | ), 102 | } 103 | 104 | func (km KeyMap) ShortHelp() []key.Binding { 105 | return []key.Binding{ 106 | km.TtabSel, 107 | km.Up, 108 | km.Down, 109 | km.PageUp, 110 | km.PageDown, 111 | km.Tab, 112 | km.ShiftTab, 113 | km.Slash, 114 | km.Info, 115 | km.Stats, 116 | km.Count, 117 | km.Refresh, 118 | km.TimeRange, 119 | km.Enter, 120 | km.Quit, 121 | km.SaveSubmitJob, 122 | km.Escape, 123 | } 124 | } 125 | 126 | func (km KeyMap) FullHelp() [][]key.Binding { 127 | // TODO: this... 128 | // MoreHelp returns an extended group of help items, grouped by columns. 129 | // The help bubble will render the help in the order in which the help 130 | // items are returned here. 131 | return nil 132 | } 133 | -------------------------------------------------------------------------------- /internal/logger/logger.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import ( 4 | "log" 5 | "os" 6 | 7 | tea "github.com/charmbracelet/bubbletea" 8 | ) 9 | 10 | const ( 11 | logFileName = "scdebug.log" 12 | ) 13 | 14 | func SetupLogger() (bool, *log.Logger) { 15 | 16 | var ( 17 | debugset bool = false 18 | lf *os.File 19 | err error 20 | ) 21 | 22 | if len(os.Getenv("DEBUG")) > 0 { 23 | lf, err = tea.LogToFile(logFileName, "debug") 24 | if err != nil { 25 | log.Fatalf("FATAL: %s\n", err) 26 | } 27 | debugset = true 28 | } else { 29 | lf, err = os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0000) 30 | if err != nil { 31 | log.Fatalf("Open /dev/null for logging ended up fatal!\n") 32 | } 33 | } 34 | 35 | l := log.New(lf, "SC: ", log.Lshortfile|log.Lmicroseconds) 36 | l.Printf("Log file: %s\n", lf.Name()) 37 | 38 | return debugset, l 39 | } 40 | -------------------------------------------------------------------------------- /internal/model/init.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | tea "github.com/charmbracelet/bubbletea" 5 | "github.com/CLIP-HPC/SlurmCommander/internal/command" 6 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/clustertab" 7 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/jobfromtemplate" 8 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/jobtab" 9 | ) 10 | 11 | func (m Model) Init() tea.Cmd { 12 | // use bubbletea.Batch(com1, com2, ...) here 13 | //return command.TimedGetSqueue() 14 | //return tea.Batch(command.TimedGetSqueue(), command.TimedGetSinfo(), command.TimedGetSacct()) 15 | return tea.Batch( 16 | command.GetUserName(m.Log), 17 | jobtab.QuickGetSqueue(m.Log), 18 | clustertab.QuickGetSinfo(m.Log), 19 | jobfromtemplate.GetTemplateList(m.Globals.ConfigContainer.TemplateDirs, m.Log), 20 | ) 21 | } 22 | -------------------------------------------------------------------------------- /internal/model/model.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | "log" 5 | 6 | "github.com/charmbracelet/bubbles/help" 7 | "github.com/CLIP-HPC/SlurmCommander/internal/config" 8 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/abouttab" 9 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/clustertab" 10 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/jobdetailstab" 11 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/jobfromtemplate" 12 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/jobhisttab" 13 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/jobtab" 14 | ) 15 | 16 | const ( 17 | tabJobs = iota 18 | tabJobHist 19 | tabJobDetails 20 | tabJobFromTemplate 21 | tabCluster 22 | tabAbout 23 | ) 24 | 25 | // TODO: put this in model? 26 | var tabs = []string{ 27 | "Job Queue", 28 | "Job History", // TODO: get this from sacct, even without json, then on Enter, goto: Job Details tab and fetch JSON there for a specific job? 29 | "Job Details", // TODO: either show jobid textinput, or open this tab from Job History on selection 30 | "Job from Template", // TODO: devise sbatch templates and menus in this tab to fill them out 31 | "Cluster", 32 | "About", 33 | } 34 | 35 | type ActiveTabKeys interface { 36 | SetupKeys() 37 | DisableKeys() 38 | } 39 | 40 | var tabKeys = []ActiveTabKeys{ 41 | &jobtab.KeyMap, 42 | &jobhisttab.KeyMap, 43 | &jobdetailstab.KeyMap, 44 | &jobfromtemplate.KeyMap, 45 | &clustertab.KeyMap, 46 | &abouttab.KeyMap, 47 | } 48 | 49 | // TODO: in structures below: 50 | // - make embedding and accessing leafs uniform (shorthand notation vs Full path) 51 | type Model struct { 52 | Globals 53 | jobtab.JobTab 54 | jobhisttab.JobHistTab 55 | jobdetailstab.JobDetailsTab 56 | jobfromtemplate.JobFromTemplateTab 57 | clustertab.ClusterTab 58 | } 59 | 60 | type Globals struct { 61 | ActiveTab uint 62 | UpdateCnt uint64 63 | Debug bool 64 | DebugMsg string 65 | lastKey string 66 | winW int 67 | winH int 68 | Log *log.Logger 69 | Help help.Model 70 | UserName string 71 | UAccounts []string 72 | config.ConfigContainer 73 | ErrorMsg error 74 | ErrorHelp string 75 | SizeErr string 76 | } 77 | -------------------------------------------------------------------------------- /internal/model/tabs/abouttab/abouttab.go: -------------------------------------------------------------------------------- 1 | package abouttab 2 | 3 | import ( 4 | "github.com/charmbracelet/bubbles/key" 5 | ) 6 | 7 | type Keys []*key.Binding 8 | 9 | var KeyMap = Keys{} 10 | 11 | func (ky *Keys) SetupKeys() { 12 | for _, k := range *ky { 13 | k.SetEnabled(true) 14 | } 15 | } 16 | 17 | func (ky *Keys) DisableKeys() { 18 | for _, k := range *ky { 19 | k.SetEnabled(false) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /internal/model/tabs/clustertab/clustertab.go: -------------------------------------------------------------------------------- 1 | package clustertab 2 | 3 | import ( 4 | "log" 5 | "strings" 6 | 7 | "github.com/CLIP-HPC/SlurmCommander/internal/generic" 8 | "github.com/CLIP-HPC/SlurmCommander/internal/slurm" 9 | "github.com/CLIP-HPC/SlurmCommander/internal/table" 10 | "github.com/charmbracelet/bubbles/progress" 11 | "github.com/charmbracelet/bubbles/textinput" 12 | ) 13 | 14 | type ClusterTab struct { 15 | StatsOn bool 16 | CountsOn bool 17 | FilterOn bool 18 | SinfoTable table.Model 19 | CpuBar progress.Model 20 | MemBar progress.Model 21 | GpuBar progress.Model 22 | Sinfo SinfoJSON 23 | SinfoFiltered SinfoJSON 24 | Filter textinput.Model 25 | Stats 26 | Breakdowns 27 | } 28 | 29 | type Stats struct { 30 | // TODO: also perhaps: count by user? account? 31 | StateCnt map[string]uint 32 | StateSimpleCnt map[string]uint 33 | } 34 | 35 | type Breakdowns struct { 36 | CpuPerPart generic.CountItemSlice 37 | MemPerPart generic.CountItemSlice 38 | GpuPerPart generic.CountItemSlice 39 | NodesPerState generic.CountItemSlice 40 | } 41 | 42 | func (t *ClusterTab) AdjTableHeight(h int, l *log.Logger) { 43 | l.Printf("FixTableHeight(%d) from %d\n", h, t.SinfoTable.Height()) 44 | if t.CountsOn || t.FilterOn { 45 | t.SinfoTable.SetHeight(h - 35) 46 | } else { 47 | t.SinfoTable.SetHeight(h - 25) 48 | } 49 | l.Printf("FixTableHeight to %d\n", t.SinfoTable.Height()) 50 | } 51 | 52 | func (t *ClusterTab) GetStatsFiltered(l *log.Logger) { 53 | var key string 54 | 55 | cpp := generic.CountItemMap{} // CpuPerPartition 56 | mpp := generic.CountItemMap{} // MemPerPartition 57 | gpp := generic.CountItemMap{} // GPUPerPartition 58 | nps := generic.CountItemMap{} // NodesPerState 59 | 60 | t.Stats.StateCnt = map[string]uint{} 61 | t.Stats.StateSimpleCnt = map[string]uint{} 62 | 63 | l.Printf("GetStatsFiltered JobClusterTab start\n") 64 | for _, v := range t.SinfoFiltered.Nodes { 65 | if len(*v.StateFlags) != 0 { 66 | key = *v.State + "+" + strings.Join(*v.StateFlags, "+") 67 | } else { 68 | key = *v.State 69 | } 70 | //t.Stats.StateCnt[*v.JobState]++ 71 | t.Stats.StateCnt[key]++ 72 | t.Stats.StateSimpleCnt[*v.State]++ 73 | 74 | //Breakdowns: 75 | // CpusPer 76 | for _, p := range *v.Partitions { 77 | if _, ok := cpp[p]; !ok { 78 | cpp[p] = &generic.CountItem{} 79 | } 80 | if _, ok := mpp[p]; !ok { 81 | mpp[p] = &generic.CountItem{} 82 | } 83 | if _, ok := gpp[p]; !ok { 84 | gpp[p] = &generic.CountItem{} 85 | } 86 | cpp[p].Name = p 87 | cpp[p].Count += uint(*v.AllocCpus) 88 | cpp[p].Total += uint(*v.Cpus) 89 | mpp[p].Name = p 90 | mpp[p].Count += uint(*v.AllocMemory) 91 | mpp[p].Total += uint(*v.RealMemory) 92 | 93 | gpp[p].Name = p 94 | gpp[p].Count += uint(*slurm.ParseGRES(*v.GresUsed)) 95 | gpp[p].Total += uint(*slurm.ParseGRES(*v.Gres)) 96 | } 97 | for _, s := range *v.StateFlags { 98 | if _, ok := nps[s]; !ok { 99 | nps[s] = &generic.CountItem{} 100 | } 101 | nps[s].Name = s 102 | nps[s].Count++ 103 | } 104 | } 105 | 106 | // sort & filter breakdowns 107 | t.Breakdowns.CpuPerPart = generic.SortItemMapBySel("Name", &cpp) 108 | t.Breakdowns.MemPerPart = generic.SortItemMapBySel("Name", &mpp) 109 | t.Breakdowns.NodesPerState = generic.SortItemMapBySel("Count", &nps) 110 | t.Breakdowns.GpuPerPart = generic.SortItemMapBySel("Count", &gpp) 111 | 112 | l.Printf("GetStatsFiltered end\n") 113 | } 114 | -------------------------------------------------------------------------------- /internal/model/tabs/clustertab/clustertabcommands.go: -------------------------------------------------------------------------------- 1 | package clustertab 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | "os/exec" 7 | "time" 8 | 9 | tea "github.com/charmbracelet/bubbletea" 10 | "github.com/CLIP-HPC/SlurmCommander/internal/command" 11 | "github.com/CLIP-HPC/SlurmCommander/internal/config" 12 | ) 13 | 14 | var ( 15 | cc config.ConfigContainer 16 | SinfoCmdSwitches = []string{"-a", "--json"} 17 | ) 18 | 19 | func NewCmdCC(config config.ConfigContainer) { 20 | cc = config 21 | } 22 | 23 | // Calls `sinfo` to get node information for Cluster Tab 24 | func GetSinfo(t time.Time) tea.Msg { 25 | var siJson SinfoJSON 26 | 27 | cmd := cc.Binpaths["sinfo"] 28 | out, err := exec.Command(cmd, SinfoCmdSwitches...).CombinedOutput() 29 | if err != nil { 30 | return command.ErrorMsg{ 31 | From: "GetSinfo", 32 | ErrHelp: "Failed to run sinfo command, check your scom.conf and set the correct paths there.", 33 | OrigErr: err, 34 | } 35 | } 36 | 37 | err = json.Unmarshal(out, &siJson) 38 | if err != nil { 39 | return command.ErrorMsg{ 40 | From: "GetSinfo", 41 | ErrHelp: "sinfo JSON failed to parse, note your slurm version and open an issue with us here: https://github.com/CLIP-HPC/SlurmCommander/issues/new/choose", 42 | OrigErr: err, 43 | } 44 | } 45 | 46 | return siJson 47 | 48 | } 49 | 50 | func TimedGetSinfo(l *log.Logger) tea.Cmd { 51 | l.Printf("TimedGetSinfo() start, tick: %d\n", cc.GetTick()) 52 | return tea.Tick(cc.GetTick()*time.Second, GetSinfo) 53 | } 54 | 55 | func QuickGetSinfo(l *log.Logger) tea.Cmd { 56 | l.Printf("QuickGetSinfo() start") 57 | return tea.Tick(0*time.Second, GetSinfo) 58 | } 59 | -------------------------------------------------------------------------------- /internal/model/tabs/clustertab/clustertabkeys.go: -------------------------------------------------------------------------------- 1 | package clustertab 2 | 3 | import ( 4 | "github.com/charmbracelet/bubbles/key" 5 | "github.com/CLIP-HPC/SlurmCommander/internal/keybindings" 6 | ) 7 | 8 | type Keys []*key.Binding 9 | 10 | var KeyMap = Keys{ 11 | &keybindings.DefaultKeyMap.Up, 12 | &keybindings.DefaultKeyMap.Down, 13 | &keybindings.DefaultKeyMap.PageUp, 14 | &keybindings.DefaultKeyMap.PageDown, 15 | &keybindings.DefaultKeyMap.Slash, 16 | &keybindings.DefaultKeyMap.Stats, 17 | &keybindings.DefaultKeyMap.Count, 18 | } 19 | 20 | func (ky *Keys) SetupKeys() { 21 | for _, k := range *ky { 22 | k.SetEnabled(true) 23 | } 24 | } 25 | 26 | func (ky *Keys) DisableKeys() { 27 | for _, k := range *ky { 28 | k.SetEnabled(false) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /internal/model/tabs/clustertab/clustertabtable.go: -------------------------------------------------------------------------------- 1 | package clustertab 2 | 3 | import ( 4 | "log" 5 | "regexp" 6 | "strconv" 7 | "strings" 8 | 9 | "github.com/CLIP-HPC/SlurmCommander/internal/command" 10 | "github.com/CLIP-HPC/SlurmCommander/internal/slurm" 11 | "github.com/CLIP-HPC/SlurmCommander/internal/table" 12 | ) 13 | 14 | const ( 15 | // Width of the SinfoTable, used in calculating Stats box width. 16 | // Must be adjusted alongside SinfoTabCols changes. 17 | //SinfoTabWidth = 118 18 | SinfoTabWidth = 134 19 | ) 20 | 21 | var SinfoTabCols = []table.Column{ 22 | { 23 | Title: "Name", 24 | Width: 15, 25 | }, 26 | { 27 | Title: "Part.", 28 | Width: 5, 29 | }, 30 | { 31 | Title: "State", 32 | Width: 10, 33 | }, 34 | { 35 | Title: "CPUAvail", 36 | Width: 8, 37 | }, 38 | { 39 | Title: "CPUTotal", 40 | Width: 8, 41 | }, 42 | { 43 | Title: "MEMAvail", 44 | Width: 10, 45 | }, 46 | { 47 | Title: "MEMTotal", 48 | Width: 10, 49 | }, 50 | { 51 | Title: "GPUAvail", 52 | Width: 8, 53 | }, 54 | { 55 | Title: "GPUTotal", 56 | Width: 8, 57 | }, 58 | { 59 | Title: "State FLAGS", 60 | Width: 15, 61 | }, 62 | } 63 | 64 | type SinfoJSON slurm.SinfoJSON 65 | type TableRows []table.Row 66 | 67 | func (siJson *SinfoJSON) FilterSinfoTable(f string, l *log.Logger) (*TableRows, *SinfoJSON, *command.ErrorMsg) { 68 | var ( 69 | siTabRows = TableRows{} 70 | siJsonFiltered = SinfoJSON{} 71 | errMsg *command.ErrorMsg 72 | re *regexp.Regexp 73 | ) 74 | 75 | l.Printf("FilterSinfoTable: rows %d", len(siJson.Nodes)) 76 | re, err := regexp.Compile(f) 77 | if err != nil { 78 | l.Printf("FAIL: compile regexp: %q with err: %s", f, err) 79 | f = "" 80 | re, _ = regexp.Compile(f) 81 | errMsg = &command.ErrorMsg{ 82 | From: "FilterSinfoTable", 83 | ErrHelp: "Regular expression failed to compile, please correct it (turn on DEBUG to see details)", 84 | OrigErr: err, 85 | } 86 | } 87 | 88 | for _, v := range siJson.Nodes { 89 | 90 | line := strings.Join([]string{ 91 | *v.Name, 92 | strings.Join(*v.Partitions, ","), 93 | *v.State, 94 | strings.Join(*v.StateFlags, ","), 95 | }, ".") 96 | 97 | if re.MatchString(line) { 98 | // This is how many GPUs are available on the node 99 | gpuAvail := slurm.ParseGRES(*v.Gres) 100 | 101 | // This is how many GPUs are allocated on the node 102 | gpuAlloc := slurm.ParseGRES(*v.GresUsed) 103 | siTabRows = append(siTabRows, table.Row{*v.Name, strings.Join(*v.Partitions, ","), *v.State, strconv.FormatInt(*v.IdleCpus, 10), strconv.Itoa(*v.Cpus), strconv.Itoa(*v.FreeMemory), strconv.Itoa(*v.RealMemory), strconv.Itoa(*gpuAvail - *gpuAlloc), strconv.Itoa(*gpuAvail), strings.Join(*v.StateFlags, ",")}) 104 | siJsonFiltered.Nodes = append(siJsonFiltered.Nodes, v) 105 | } 106 | } 107 | 108 | return &siTabRows, &siJsonFiltered, errMsg 109 | } 110 | 111 | // TODO: not sure what i was thinking, but this we really don't need, just inject in Update() directly to model!? 112 | var SinfoTabRows = []table.Row{} 113 | -------------------------------------------------------------------------------- /internal/model/tabs/clustertab/clustertabview.go: -------------------------------------------------------------------------------- 1 | package clustertab 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "sort" 7 | "strings" 8 | 9 | "github.com/CLIP-HPC/SlurmCommander/internal/generic" 10 | "github.com/CLIP-HPC/SlurmCommander/internal/slurm" 11 | "github.com/CLIP-HPC/SlurmCommander/internal/styles" 12 | "github.com/charmbracelet/bubbles/progress" 13 | "github.com/charmbracelet/lipgloss" 14 | "github.com/dustin/go-humanize" 15 | ) 16 | 17 | func (ct *ClusterTab) tabCluster() string { 18 | 19 | scr := ct.SinfoTable.View() + "\n" 20 | 21 | return scr 22 | } 23 | 24 | func (ct *ClusterTab) tabClusterBars(l *log.Logger) string { 25 | var ( 26 | scr string = "" 27 | cpuPerc float64 = 0 28 | cpuUsed int64 = 0 29 | cpuAvail int = 0 30 | memPerc float64 = 0 31 | memUsed int64 = 0 32 | memAvail int = 0 33 | //gpuPerc float64 = 0 34 | gpuUsed slurm.GresMap = make(slurm.GresMap) 35 | gpuAvail slurm.GresMap = make(slurm.GresMap) 36 | gpuPerc map[string]float64 = make(map[string]float64) 37 | gpuList string 38 | gpuSlice []string = make([]string, 0) 39 | ) 40 | 41 | sel := ct.SinfoTable.Cursor() 42 | l.Printf("ClusterTab Selected: %d\n", sel) 43 | l.Printf("ClusterTab len results: %d\n", len(ct.SinfoFiltered.Nodes)) 44 | ct.CpuBar = progress.New(progress.WithGradient("#277BC0", "#FFCB42")) 45 | ct.MemBar = progress.New(progress.WithGradient("#277BC0", "#FFCB42")) 46 | ct.GpuBar = progress.New(progress.WithGradient("#277BC0", "#FFCB42")) 47 | if len(ct.SinfoFiltered.Nodes) > 0 && sel != -1 { 48 | cpuUsed = *ct.SinfoFiltered.Nodes[sel].AllocCpus 49 | cpuAvail = *ct.SinfoFiltered.Nodes[sel].Cpus 50 | cpuPerc = float64(cpuUsed) / float64(cpuAvail) 51 | memUsed = *ct.SinfoFiltered.Nodes[sel].AllocMemory 52 | memAvail = *ct.SinfoFiltered.Nodes[sel].RealMemory 53 | memPerc = float64(memUsed) / float64(memAvail) 54 | 55 | gpuAvail = *slurm.ParseGRESAll(*ct.SinfoFiltered.Nodes[sel].Gres) 56 | gpuUsed = *slurm.ParseGRESAll(*ct.SinfoFiltered.Nodes[sel].GresUsed) 57 | if len(gpuAvail) > 0 { 58 | for k, _ := range gpuAvail { 59 | gpuPerc[k] = float64(gpuUsed[k]) / float64(gpuAvail[k]) 60 | } 61 | } 62 | } 63 | cpur := lipgloss.JoinVertical(lipgloss.Left, fmt.Sprintf("CPU used/total: %d/%d", cpuUsed, cpuAvail), ct.CpuBar.ViewAs(cpuPerc)) 64 | memr := lipgloss.JoinVertical(lipgloss.Left, fmt.Sprintf("MEM used/total: %d/%d", memUsed, memAvail), ct.MemBar.ViewAs(memPerc)) 65 | scr += lipgloss.JoinVertical(lipgloss.Top, cpur, memr) 66 | 67 | for k := range gpuAvail { 68 | gpuSlice = append(gpuSlice, k) 69 | } 70 | sort.Strings(gpuSlice) 71 | 72 | if len(gpuAvail) > 0 { 73 | for _, k := range gpuSlice { 74 | // TODO: this adds one additional newline at the top bringing gpus down... find the fix 75 | //gpuList = lipgloss.JoinVertical(lipgloss.Left, gpuList, fmt.Sprintf("GPU %s used/total: %d/%d", k, gpuUsed[k], gpuAvail[k]), ct.GpuBar.ViewAs(gpuPerc[k])) 76 | gpuList += fmt.Sprintf("GPU %q used/total: %d/%d\n", k, gpuUsed[k], gpuAvail[k]) + ct.GpuBar.ViewAs(gpuPerc[k]) + "\n" 77 | } 78 | scr = lipgloss.JoinHorizontal(lipgloss.Top, scr, fmt.Sprintf("%4s", ""), gpuList[:len(gpuList)-1]) 79 | } 80 | scr += "\n\n" 81 | return scr 82 | } 83 | 84 | func (ct *ClusterTab) ClusterTabStats(l *log.Logger) string { 85 | var str string 86 | 87 | l.Printf("JobClusterTabStats called\n") 88 | 89 | sel := ct.SinfoTable.Cursor() 90 | //str += styles.StatsSeparatorTitle.Render(fmt.Sprintf("%-30s", "Nodes states (filtered):")) 91 | str += styles.StatsSeparatorTitle.Render(fmt.Sprintf("%-30s", "Nodes states (filtered):")) 92 | str += "\n" 93 | 94 | if len(ct.SinfoFiltered.Nodes) > 0 { 95 | //str += generic.GenCountStrVert(m.JobClusterTab.Stats.StateCnt, m.Log) 96 | str += generic.GenCountStrVert(ct.Stats.StateSimpleCnt, l) 97 | } 98 | 99 | str += styles.StatsSeparatorTitle.Render(fmt.Sprintf("%-30s", "Selected node:")) 100 | 101 | if len(ct.SinfoFiltered.Nodes) > 0 && sel != -1 { 102 | str += "\n" 103 | str += fmt.Sprintf("%-15s: %s\n", "Arch", *ct.SinfoFiltered.Nodes[sel].Architecture) 104 | str += fmt.Sprintf("%-15s: %s\n", "Features", *ct.SinfoFiltered.Nodes[sel].ActiveFeatures) 105 | str += fmt.Sprintf("%-15s: %s\n", "TRES", *ct.SinfoFiltered.Nodes[sel].Tres) 106 | if ct.SinfoFiltered.Nodes[sel].TresUsed != nil { 107 | str += fmt.Sprintf("%-15s: %s\n", "TRES Used", *ct.SinfoFiltered.Nodes[sel].TresUsed) 108 | } else { 109 | str += fmt.Sprintf("%-15s: %s\n", "TRES Used", "") 110 | } 111 | str += fmt.Sprintf("%-15s: %s\n", "GRES", *ct.SinfoFiltered.Nodes[sel].Gres) 112 | str += fmt.Sprintf("%-15s: %s\n", "GRES Used", *ct.SinfoFiltered.Nodes[sel].GresUsed) 113 | str += fmt.Sprintf("%-15s: %s\n", "Partitions", strings.Join(*ct.SinfoFiltered.Nodes[sel].Partitions, ",")) 114 | } 115 | return str 116 | } 117 | 118 | func (ct *ClusterTab) getClusterCounts() string { 119 | var ( 120 | ret string 121 | cpp string 122 | mpp string 123 | gpp string 124 | nps string 125 | ) 126 | 127 | fmtStrCpu := "%-8s : %4d / %4d %2.0f%%\n" 128 | fmtStrMem := "%-8s : %s / %s %2.0f%%\n" 129 | fmtStrGpu := "%-8s : %4d / %4d %2.0f%%\n" 130 | fmtStrNPS := "%-15s : %4d\n" 131 | fmtTitle := "%-30s" 132 | 133 | cpp += styles.TextYellowOnBlue.Render(fmt.Sprintf(fmtTitle, "CPUs per Partition (used/total)")) 134 | cpp += "\n" 135 | for _, v := range ct.Breakdowns.CpuPerPart { 136 | cpp += fmt.Sprintf(fmtStrCpu, v.Name, v.Count, v.Total, float32(v.Count)/float32(v.Total)*100) 137 | } 138 | 139 | mpp += styles.TextYellowOnBlue.Render(fmt.Sprintf(fmtTitle, "Mem per Partition (used/total)")) 140 | mpp += "\n" 141 | for _, v := range ct.Breakdowns.MemPerPart { 142 | mpp += fmt.Sprintf(fmtStrMem, v.Name, humanize.Bytes(uint64(v.Count)*1024*1024), humanize.Bytes(uint64(v.Total)*1024*1024), float32(v.Count)/float32(v.Total)*100) 143 | } 144 | 145 | gpp += styles.TextYellowOnBlue.Render(fmt.Sprintf(fmtTitle, "GPUs per Partition (used/total)")) 146 | gpp += "\n" 147 | for _, v := range ct.Breakdowns.GpuPerPart { 148 | var gpuPerc float32 = 0.0 149 | if v.Total > 0 { 150 | gpuPerc = float32(v.Count) / float32(v.Total) * 100 151 | } 152 | gpp += fmt.Sprintf(fmtStrGpu, v.Name, v.Count, v.Total, gpuPerc) 153 | } 154 | 155 | nps += styles.TextYellowOnBlue.Render(fmt.Sprintf("%-30s", "Nodes per State")) 156 | nps += "\n" 157 | for _, v := range ct.Breakdowns.NodesPerState { 158 | nps += fmt.Sprintf(fmtStrNPS, v.Name, v.Count) 159 | } 160 | 161 | cpp = styles.CountsBox.Render(cpp) 162 | mpp = styles.CountsBox.Render(mpp) 163 | gpp = styles.CountsBox.Render(gpp) 164 | nps = styles.CountsBox.Render(nps) 165 | 166 | ret = lipgloss.JoinHorizontal(lipgloss.Top, cpp, mpp, gpp, nps) 167 | 168 | return ret 169 | } 170 | 171 | func (ct *ClusterTab) View(l *log.Logger) string { 172 | var ( 173 | Header strings.Builder 174 | MainWindow strings.Builder 175 | FooterWindow strings.Builder 176 | ) 177 | 178 | // Top Main 179 | Header.WriteString(fmt.Sprintf("Filter: %10.20s\tItems: %d\n\n", ct.Filter.Value(), len(ct.SinfoFiltered.Nodes))) 180 | Header.WriteString(ct.tabClusterBars(l)) 181 | 182 | // Table is always there 183 | MainWindow.WriteString(ct.tabCluster()) 184 | 185 | // Attach below table whatever is turned on 186 | switch { 187 | case ct.FilterOn: 188 | // filter 189 | FooterWindow.WriteString("\n") 190 | FooterWindow.WriteString("Filter value (search in joined: Name + Partition + State + StateFlags!):\n") 191 | FooterWindow.WriteString(fmt.Sprintf("%s\n", ct.Filter.View())) 192 | FooterWindow.WriteString("(Enter to apply, Esc to clear filter and abort, Regular expressions supported.\n") 193 | FooterWindow.WriteString(" Syntax details: https://golang.org/s/re2syntax)\n") 194 | case ct.CountsOn: 195 | FooterWindow.WriteString("\n") 196 | FooterWindow.WriteString(styles.JobInfoBox.Render(ct.getClusterCounts())) 197 | 198 | default: 199 | FooterWindow.WriteString("\n") 200 | //MainWindow.WriteString(generic.GenCountStr(ct.Stats.StateCnt, l)) 201 | } 202 | 203 | // Lastly, if stats are on, horizontally join them to main 204 | switch { 205 | case ct.StatsOn: 206 | X := MainWindow.String() 207 | MainWindow.Reset() 208 | // TODO: make this Width() somewhere else (e.g. Update() on WindowSizeMsg) 209 | // Table Width == 118 chars, so .Width(m.winW-118) 210 | //MainWindow.WriteString(lipgloss.JoinHorizontal(lipgloss.Top, X, styles.StatsBoxStyle.Width(50).Render(ct.ClusterTabStats(l)))) 211 | l.Printf("CTB Width = %d\n", styles.ClusterTabStats.GetWidth()) 212 | MainWindow.WriteString(lipgloss.JoinHorizontal(lipgloss.Top, X, styles.ClusterTabStats.Render(ct.ClusterTabStats(l)))) 213 | } 214 | 215 | return Header.String() + MainWindow.String() + FooterWindow.String() 216 | } 217 | -------------------------------------------------------------------------------- /internal/model/tabs/jobdetailstab/jobdetailstab.go: -------------------------------------------------------------------------------- 1 | package jobdetailstab 2 | 3 | import ( 4 | "github.com/charmbracelet/bubbles/viewport" 5 | "github.com/CLIP-HPC/SlurmCommander/internal/slurm" 6 | ) 7 | 8 | type JobDetailsTab struct { 9 | SelJobID string 10 | SelJobIDNew int 11 | ViewPort viewport.Model 12 | slurm.SacctSingleJobHist 13 | } 14 | -------------------------------------------------------------------------------- /internal/model/tabs/jobdetailstab/jobdetailstabkeys.go: -------------------------------------------------------------------------------- 1 | package jobdetailstab 2 | 3 | import ( 4 | "github.com/charmbracelet/bubbles/key" 5 | "github.com/CLIP-HPC/SlurmCommander/internal/keybindings" 6 | ) 7 | 8 | type Keys []*key.Binding 9 | 10 | var KeyMap = Keys{ 11 | &keybindings.DefaultKeyMap.Up, 12 | &keybindings.DefaultKeyMap.Down, 13 | &keybindings.DefaultKeyMap.PageUp, 14 | &keybindings.DefaultKeyMap.PageDown, 15 | } 16 | 17 | func (ky *Keys) SetupKeys() { 18 | for _, k := range *ky { 19 | k.SetEnabled(true) 20 | } 21 | } 22 | 23 | func (ky *Keys) DisableKeys() { 24 | for _, k := range *ky { 25 | k.SetEnabled(false) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /internal/model/tabs/jobdetailstab/jobdetailstabview.go: -------------------------------------------------------------------------------- 1 | package jobdetailstab 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "strconv" 7 | "strings" 8 | "time" 9 | 10 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/jobhisttab" 11 | "github.com/CLIP-HPC/SlurmCommander/internal/styles" 12 | ) 13 | 14 | func (jd *JobDetailsTab) tabJobDetails(jh *jobhisttab.JobHistTab, l *log.Logger) (scr string) { 15 | 16 | var ( 17 | runT time.Duration 18 | waitT time.Duration 19 | ) 20 | 21 | // race between View() call and command.SingleJobGetSacct(jd.SelJobID) call 22 | switch { 23 | //case jd.SelJobID == "": 24 | // return "Select a job from the Job History tab.\n" 25 | case jd.SelJobIDNew == -1: 26 | return "Select a job from the Job History tab.\n" 27 | //case len(m.SacctSingleJobHist.Jobs) == 0: 28 | // return fmt.Sprintf("Waiting for job %s info...\n", jd.SelJobID) 29 | case len(jh.SacctHistFiltered.Jobs) == 0: 30 | //return fmt.Sprintf("Waiting for job %s info...\n", jd.SelJobID) 31 | return "Select a job from the Job History tab.\n" 32 | } 33 | 34 | //width := m.Globals.winW - 10 35 | 36 | //job := m.SacctSingleJobHist.Jobs[0] 37 | // NEW: 38 | job := jh.SacctHistFiltered.Jobs[jd.SelJobIDNew] 39 | 40 | l.Printf("Job Details req %#v ,got: %#v\n", jd.SelJobID, job.JobId) 41 | 42 | // TODO: consider moving this to a viewport... 43 | 44 | fmtStr := "%-20s : %-60s\n" 45 | fmtStrX := "%-20s : %-60s" 46 | 47 | head := "" 48 | waitT = time.Unix(int64(*job.Time.Start), 0).Sub(time.Unix(int64(*job.Time.Submission), 0)) 49 | // If job is RUNNING, use Elapsed instead of Sub (because End=0) 50 | if *job.State.Current == "RUNNING" { 51 | runT = time.Duration(int64(*job.Time.Elapsed) * int64(time.Second)) 52 | } else { 53 | runT = time.Unix(int64(*job.Time.End), 0).Sub(time.Unix(int64(*job.Time.Start), 0)) 54 | } 55 | 56 | head += styles.StatsSeparatorTitle.Render(fmt.Sprintf(fmtStrX, "Job ID", strconv.Itoa(*job.JobId))) 57 | head += "\n" 58 | head += fmt.Sprintf(fmtStr, "Job Name", *job.Name) 59 | head += fmt.Sprintf(fmtStr, "User", *job.User) 60 | head += fmt.Sprintf(fmtStr, "Group", *job.Group) 61 | head += fmt.Sprintf(fmtStr, "Job Account", *job.Account) 62 | head += fmt.Sprintf(fmtStr, "Job Submission", time.Unix(int64(*job.Time.Submission), 0).String()) 63 | head += fmt.Sprintf(fmtStr, "Job Start", time.Unix(int64(*job.Time.Start), 0).String()) 64 | // Running jobs have End==0 65 | if *job.State.Current == "RUNNING" { 66 | head += fmt.Sprintf(fmtStr, "Job End", "RUNNING") 67 | } else { 68 | head += fmt.Sprintf(fmtStr, "Job End", time.Unix(int64(*job.Time.End), 0).String()) 69 | } 70 | head += fmt.Sprintf(fmtStr, "Job Wait time", waitT.String()) 71 | head += fmt.Sprintf(fmtStr, "Job Run time", runT.String()) 72 | head += fmt.Sprintf(fmtStr, "Partition", *job.Partition) 73 | head += fmt.Sprintf(fmtStr, "Priority", strconv.Itoa(*job.Priority)) 74 | head += fmt.Sprintf(fmtStr, "QoS", *job.Qos) 75 | 76 | scr += styles.JobStepBoxStyle.Width(90).Render(head) 77 | scr += "\n" 78 | 79 | scr += styles.TextYellow.Render(fmt.Sprintf("Steps count: %d", len(*job.Steps))) 80 | 81 | steps := "" 82 | for i, v := range *job.Steps { 83 | 84 | l.Printf("Job Details, step: %d name: %s\n", i, *v.Step.Name) 85 | step := styles.StatsSeparatorTitle.Render(fmt.Sprintf(fmtStrX, "Name", *v.Step.Name)) 86 | step += "\n" 87 | step += fmt.Sprintf(fmtStr, "Nodes", *v.Nodes.Range) 88 | if *v.State != "COMPLETED" { 89 | step += styles.JobStepExitStatusRed.Render(fmt.Sprintf(fmtStrX, "State", *v.State)) 90 | step += "\n" 91 | } else { 92 | //step += fmt.Sprintf(fmtStr, "State", *v.State) 93 | step += styles.JobStepExitStatusGreen.Render(fmt.Sprintf(fmtStrX, "State", *v.State)) 94 | step += "\n" 95 | } 96 | if *v.ExitCode.Status != "SUCCESS" { 97 | step += styles.JobStepExitStatusRed.Render(fmt.Sprintf(fmtStrX, "ExitStatus", *v.ExitCode.Status)) 98 | step += "\n" 99 | } else { 100 | step += styles.JobStepExitStatusGreen.Render(fmt.Sprintf(fmtStrX, "ExitStatus", *v.ExitCode.Status)) 101 | step += "\n" 102 | } 103 | if *v.ExitCode.Status == "SIGNALED" { 104 | step += styles.JobStepExitStatusRed.Render(fmt.Sprintf(fmtStrX, "Signal ID", strconv.Itoa(*v.ExitCode.Signal.SignalId))) 105 | step += "\n" 106 | step += styles.JobStepExitStatusRed.Render(fmt.Sprintf(fmtStrX, "SignalName", *v.ExitCode.Signal.Name)) 107 | step += "\n" 108 | } 109 | if v.KillRequestUser != nil { 110 | step += fmt.Sprintf(fmtStr, "KillReqUser", *v.KillRequestUser) 111 | } 112 | step += fmt.Sprintf(fmtStr, "Tasks", strconv.Itoa(*v.Tasks.Count)) 113 | 114 | // TODO: TRES part needs quite some love... 115 | tres := "" 116 | tresAlloc := "" 117 | 118 | //tresReqMin := "" 119 | //tresReqMax := "" 120 | //tresReqAvg := "" 121 | //tresReqTotal := "" 122 | //tresConMax := "" 123 | //tresConMin := "" 124 | // TRES: allocated 125 | tresAlloc += "\nALLOCATED:\n" 126 | l.Printf("Dumping step allocation: %#v\n", *v.Tres.Allocated) 127 | l.Printf("ALLOCATED:\n") 128 | for i, t := range *v.Tres.Allocated { 129 | if t.Count != nil { 130 | l.Printf("Dumping type %d : %s - %d\n", i, *t.Type, *t.Count) 131 | tresAlloc += "* " 132 | if *t.Type == "gres" { 133 | // TODO: 134 | //fmtStr := "%-20s : %-60s\n" 135 | tresAlloc += fmt.Sprintf(fmtStr, *t.Type, strings.Join([]string{*t.Name, strconv.Itoa(*t.Count)}, ":")) 136 | } else { 137 | // TODO: 138 | tresAlloc += fmt.Sprintf(fmtStr, *t.Type, strconv.Itoa(*t.Count)) 139 | } 140 | } 141 | } 142 | //// REQUESTED:MIN 143 | //tresReqMin += "REQUESTED:Min:\n" 144 | //l.Printf("REQ:Min\n") 145 | //for i, t := range *v.Tres.Requested.Min { 146 | // if t.Count != nil { 147 | // l.Printf("Dumping type %d : %s - %d\n", i, *t.Type, *t.Count) 148 | // tresReqMin += " " 149 | // tresReqMin += fmt.Sprintf(fmtStr, *t.Type, strconv.Itoa(*t.Count)) 150 | // } 151 | //} 152 | //// REQUESTED:MAX 153 | //l.Printf("REQ:Max\n") 154 | //tresReqMax += "REQUESTED:Max:\n" 155 | //for i, t := range *v.Tres.Requested.Min { 156 | // if t.Count != nil { 157 | // l.Printf("Dumping type %d : %s - %d\n", i, *t.Type, *t.Count) 158 | // tresReqMax += " " 159 | // tresReqMax += fmt.Sprintf(fmtStr, *t.Type, strconv.Itoa(*t.Count)) 160 | // } 161 | //} 162 | //// REQUESTED:AVG 163 | //l.Printf("REQ:Avg\n") 164 | //tresReqAvg += "REQUESTED:Avg:\n" 165 | //for i, t := range *v.Tres.Requested.Average { 166 | // if t.Count != nil { 167 | // l.Printf("Dumping type %d : %s - %d\n", i, *t.Type, *t.Count) 168 | // tresReqAvg += fmt.Sprintf(fmtStr, *t.Type, strconv.Itoa(*t.Count)) 169 | // } 170 | //} 171 | //// REQUESTED:TOT 172 | //tresReqAvg += "REQUESTED:Tot:\n" 173 | //l.Printf("REQ:Tot\n") 174 | //for i, t := range *v.Tres.Requested.Total { 175 | // if t.Count != nil { 176 | // l.Printf("Dumping type %d : %s - %d\n", i, *t.Type, *t.Count) 177 | // tresReqTotal += fmt.Sprintf(fmtStr, *t.Type, strconv.Itoa(*t.Count)) 178 | // } 179 | //} 180 | //// Consumed:Min 181 | //tresConMin += "CONSUMED:Min:\n" 182 | //l.Printf("CONS:Min\n") 183 | //for i, t := range *v.Tres.Consumed.Min { 184 | // if t.Count != nil { 185 | // l.Printf("Dumping type %d : %s - %d\n", i, *t.Type, *t.Count) 186 | // tresConMin += fmt.Sprintf(fmtStr, *t.Type, strconv.Itoa(*t.Count)) 187 | // } 188 | //} 189 | //// Consumed:Max 190 | //tresConMax += "CONSUMED:Max:\n" 191 | //l.Printf("CONS:Max\n") 192 | //for i, t := range *v.Tres.Consumed.Max { 193 | // if t.Count != nil { 194 | // l.Printf("Dumping type %d : %s - %d\n", i, *t.Type, *t.Count) 195 | // tresConMax += fmt.Sprintf(fmtStr, *t.Type, strconv.Itoa(*t.Count)) 196 | // } 197 | //} 198 | //tres = lipgloss.JoinHorizontal(lipgloss.Top, styles.TresBox.Render(tresAlloc), styles.TresBox.Width(40).Render(tresConMax)) 199 | 200 | // For now, show just allocated, later rework this whole part 201 | tres = styles.TresBox.Render(tresAlloc) 202 | 203 | step += tres 204 | 205 | // when the step is finished, append it to steps string 206 | steps += "\n" + styles.JobStepBoxStyle.Render(step) 207 | } 208 | scr += steps 209 | 210 | return scr 211 | } 212 | 213 | func (jd *JobDetailsTab) SetViewportContent(jh *jobhisttab.JobHistTab, l *log.Logger) string { 214 | var ( 215 | MainWindow strings.Builder 216 | ) 217 | 218 | jd.ViewPort.SetContent(jd.tabJobDetails(jh, l)) 219 | 220 | return MainWindow.String() 221 | } 222 | 223 | func (jd *JobDetailsTab) View(jh *jobhisttab.JobHistTab, l *log.Logger) string { 224 | var ( 225 | MainWindow strings.Builder 226 | ) 227 | 228 | MainWindow.WriteString(jd.ViewPort.View()) 229 | 230 | return MainWindow.String() 231 | } 232 | -------------------------------------------------------------------------------- /internal/model/tabs/jobfromtemplate/jobfromtemplate.go: -------------------------------------------------------------------------------- 1 | package jobfromtemplate 2 | 3 | import ( 4 | "bufio" 5 | "log" 6 | "os" 7 | "strconv" 8 | "strings" 9 | "time" 10 | 11 | "github.com/charmbracelet/bubbles/textarea" 12 | tea "github.com/charmbracelet/bubbletea" 13 | "github.com/CLIP-HPC/SlurmCommander/internal/defaults" 14 | "github.com/CLIP-HPC/SlurmCommander/internal/table" 15 | ) 16 | 17 | type JobFromTemplateTab struct { 18 | TemplatesTable table.Model 19 | TemplatesList TemplatesListRows 20 | TemplateEditor textarea.Model 21 | NewJobScript string 22 | EditTemplate bool 23 | } 24 | 25 | type EditTemplate bool 26 | 27 | func EditorOn() tea.Cmd { 28 | return func() tea.Msg { 29 | return EditTemplate(true) 30 | } 31 | } 32 | 33 | var TemplatesListCols = []table.Column{ 34 | { 35 | Title: "Name", 36 | Width: 20, 37 | }, 38 | { 39 | Title: "Description", 40 | Width: 40, 41 | }, 42 | { 43 | Title: "Path", 44 | Width: 100, 45 | }, 46 | } 47 | 48 | type TemplateText string 49 | 50 | func GetTemplate(name string, l *log.Logger) tea.Cmd { 51 | l.Printf("GetTemplate: open file: %s\n", name) 52 | return func() tea.Msg { 53 | 54 | t, e := os.ReadFile(name) 55 | if e != nil { 56 | l.Printf("GetTemplate ERROR: open(%s): %s", name, e) 57 | 58 | } 59 | return TemplateText(t) 60 | } 61 | } 62 | 63 | type TemplatesListRows []table.Row 64 | 65 | func GetTemplateList(paths []string, l *log.Logger) tea.Cmd { 66 | 67 | return func() tea.Msg { 68 | var tlr TemplatesListRows 69 | for _, p := range paths { 70 | l.Printf("GetTemplateList reading dir: %s\n", p) 71 | files, err := os.ReadDir(p) 72 | if err != nil { 73 | l.Printf("GetTemplateList ERROR: %s\n", err) 74 | } 75 | for _, f := range files { 76 | l.Printf("GetTemplateList INFO files: %s %s\n", p, f.Name()) 77 | // if suffix=".desc" then read content and use as description 78 | if strings.HasSuffix(f.Name(), ".sbatch") { 79 | sbatchPath := p + "/" + f.Name() 80 | descPath := p + "/" + strings.TrimSuffix(f.Name(), defaults.TemplatesSuffix) + defaults.TemplatesDescSuffix 81 | fd, err := os.Open(descPath) 82 | if err != nil { 83 | // handle error and put no desc in table 84 | l.Printf("GetTemplateList FAIL open desc file: %s\n", err) 85 | tlr = append(tlr, table.Row{f.Name(), "", sbatchPath}) 86 | } else { 87 | l.Printf("GetTemplateList INFO open desc file: %s\n", descPath) 88 | s := bufio.NewScanner(fd) 89 | if s.Scan() { 90 | tlr = append(tlr, table.Row{f.Name(), s.Text(), sbatchPath}) 91 | } else { 92 | l.Printf("GetTemplateList ERR scanning desc file: %s : %s\n", descPath, s.Err()) 93 | // then we put no description 94 | tlr = append(tlr, table.Row{f.Name(), "", sbatchPath}) 95 | } 96 | } 97 | } 98 | } 99 | 100 | } 101 | return tlr 102 | } 103 | 104 | } 105 | 106 | func SaveToFile(name string, content string, l *log.Logger) (string, error) { 107 | var ( 108 | ofName string 109 | ) 110 | ofName = strings.TrimSuffix(name, ".sbatch") 111 | ofName += "-" + strconv.FormatInt(time.Now().Unix(), 10) 112 | ofName += ".sbatch" 113 | l.Printf("SaveToFile INFO: OutputFileName %s\n", ofName) 114 | if err := os.WriteFile(ofName, []byte(content), 0644); err != nil { 115 | l.Printf("SaveToFile ERROR: File %s: %s\n", ofName, err) 116 | return "", err 117 | } 118 | return ofName, nil 119 | } 120 | -------------------------------------------------------------------------------- /internal/model/tabs/jobfromtemplate/jobfromtemplatekeys.go: -------------------------------------------------------------------------------- 1 | package jobfromtemplate 2 | 3 | import ( 4 | "github.com/CLIP-HPC/SlurmCommander/internal/keybindings" 5 | "github.com/charmbracelet/bubbles/key" 6 | ) 7 | 8 | type Keys []*key.Binding 9 | 10 | var KeyMap = Keys{ 11 | &keybindings.DefaultKeyMap.Up, 12 | &keybindings.DefaultKeyMap.Down, 13 | &keybindings.DefaultKeyMap.Enter, 14 | } 15 | 16 | var EditorKeyMap = Keys{ 17 | &keybindings.DefaultKeyMap.SaveSubmitJob, 18 | &keybindings.DefaultKeyMap.Escape, 19 | } 20 | 21 | func (ky *Keys) SetupKeys() { 22 | for _, k := range *ky { 23 | k.SetEnabled(true) 24 | } 25 | } 26 | 27 | func (ky *Keys) DisableKeys() { 28 | for _, k := range *ky { 29 | k.SetEnabled(false) 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /internal/model/tabs/jobfromtemplate/jobfromtemplateview.go: -------------------------------------------------------------------------------- 1 | package jobfromtemplate 2 | 3 | import ( 4 | "log" 5 | "strings" 6 | 7 | "github.com/CLIP-HPC/SlurmCommander/internal/styles" 8 | ) 9 | 10 | func (jft *JobFromTemplateTab) tabJobFromTemplate() string { 11 | 12 | if jft.EditTemplate { 13 | return jft.TemplateEditor.View() 14 | } else { 15 | if len(jft.TemplatesList) == 0 { 16 | return styles.NotFound.Render("\nNo templates found!\n") 17 | } else { 18 | return jft.TemplatesTable.View() 19 | } 20 | } 21 | } 22 | 23 | func (jft *JobFromTemplateTab) View(l *log.Logger) string { 24 | var ( 25 | MainWindow strings.Builder 26 | ) 27 | 28 | MainWindow.WriteString(jft.tabJobFromTemplate()) 29 | 30 | return MainWindow.String() 31 | } 32 | -------------------------------------------------------------------------------- /internal/model/tabs/jobhisttab/jobhisttab.go: -------------------------------------------------------------------------------- 1 | package jobhisttab 2 | 3 | import ( 4 | "log" 5 | "time" 6 | "strconv" 7 | 8 | "github.com/charmbracelet/bubbles/textinput" 9 | "github.com/CLIP-HPC/SlurmCommander/internal/generic" 10 | "github.com/CLIP-HPC/SlurmCommander/internal/stats" 11 | "github.com/CLIP-HPC/SlurmCommander/internal/table" 12 | ) 13 | 14 | type JobHistTab struct { 15 | StatsOn bool 16 | CountsOn bool 17 | FilterOn bool 18 | UserInputsOn bool // allow user to add/modify parameters for slurm commands 19 | HistFetched bool // signals View() if sacct call is finished, to print "waiting for..." message 20 | HistFetchFail bool // if sacct call times out/errors, this is set to true 21 | JobHistStart string 22 | JobHistEnd string 23 | JobHistTimeout uint 24 | SacctTable table.Model 25 | SacctHist SacctJSON 26 | SacctHistFiltered SacctJSON 27 | Filter textinput.Model 28 | UserInputs generic.UserInputs 29 | Stats 30 | Breakdowns 31 | } 32 | 33 | type Stats struct { 34 | StateCnt map[string]uint 35 | AvgWait time.Duration 36 | MinWait time.Duration 37 | MaxWait time.Duration 38 | MedWait time.Duration 39 | AvgRun time.Duration 40 | MinRun time.Duration 41 | MaxRun time.Duration 42 | MedRun time.Duration 43 | SDWait int 44 | } 45 | 46 | type Breakdowns struct { 47 | Top5user generic.CountItemSlice 48 | Top5acc generic.CountItemSlice 49 | JobPerQos generic.CountItemSlice 50 | JobPerPart generic.CountItemSlice 51 | } 52 | 53 | func NewUserInputs(t uint, starttime string, endtime string) generic.UserInputs { 54 | var tmp_s string 55 | var tmp_t textinput.Model 56 | 57 | // User Input (for modifying the table/view) 58 | userinput := generic.UserInputs { 59 | FocusIndex: 0, 60 | ParamTexts: make([]string, 3), 61 | Params: make([]textinput.Model, 3), 62 | } 63 | 64 | // TODO: we should probably think of encapsulating this 65 | for i := range userinput.Params { 66 | tmp_t = textinput.New() 67 | 68 | // FIXME we need to generalise this 69 | switch i { 70 | case 0: 71 | tmp_t.Placeholder = "Timeout (s)" 72 | tmp_t.SetValue(strconv.FormatInt(int64(t), 10)) 73 | tmp_t.Focus() 74 | tmp_t.CharLimit = 30 75 | tmp_t.Width = 30 76 | tmp_s = "Timeout" 77 | 78 | case 1: 79 | tmp_t.Placeholder = "Starttime" 80 | tmp_t.SetValue(starttime) 81 | tmp_t.CharLimit = 50 82 | tmp_t.Width = 50 83 | tmp_s = "Starttime" 84 | 85 | case 2: 86 | tmp_t.Placeholder = "Endtime" 87 | tmp_t.SetValue(endtime) 88 | tmp_t.CharLimit = 50 89 | tmp_t.Width = 50 90 | tmp_s = "Endtime" 91 | 92 | } 93 | 94 | userinput.Params[i] = tmp_t 95 | userinput.ParamTexts[i] = tmp_s 96 | } 97 | 98 | return userinput 99 | } 100 | 101 | func (t *JobHistTab) AdjTableHeight(h int, l *log.Logger) { 102 | l.Printf("FixTableHeight(%d) from %d\n", h, t.SacctTable.Height()) 103 | if t.CountsOn || t.FilterOn || t.UserInputsOn { 104 | t.SacctTable.SetHeight(h - 31) 105 | } else { 106 | t.SacctTable.SetHeight(h - 16) 107 | } 108 | l.Printf("FixTableHeight to %d\n", t.SacctTable.Height()) 109 | } 110 | 111 | func (t *JobHistTab) GetStatsFiltered(l *log.Logger) { 112 | top5user := generic.CountItemMap{} 113 | top5acc := generic.CountItemMap{} 114 | jpq := generic.CountItemMap{} 115 | jpp := generic.CountItemMap{} 116 | 117 | t.Stats.StateCnt = map[string]uint{} 118 | tmp := []time.Duration{} // waiting times 119 | tmpRun := []time.Duration{} // running times 120 | t.AvgWait = 0 121 | t.MedWait = 0 122 | 123 | l.Printf("GetStatsFiltered start on %d rows\n", len(t.SacctHistFiltered.Jobs)) 124 | 125 | for _, v := range t.SacctHistFiltered.Jobs { 126 | t.Stats.StateCnt[*v.State.Current]++ 127 | //l.Printf("TIME: submit=%d, start=%d, end=%d\n", *v.Time.Submission, *v.Time.Start, *v.Time.End) 128 | switch *v.State.Current { 129 | case "PENDING": 130 | // no *v.Time.Start 131 | tmp = append(tmp, time.Since(time.Unix(int64(*v.Time.Submission), 0))) 132 | case "RUNNING": 133 | // no *v.Time.End 134 | tmpRun = append(tmpRun, time.Since(time.Unix(int64(*v.Time.Start), 0))) 135 | default: 136 | tmp = append(tmp, time.Unix(int64(*v.Time.Start), 0).Sub(time.Unix(int64(*v.Time.Submission), 0))) 137 | tmpRun = append(tmpRun, time.Unix(int64(*v.Time.End), 0).Sub(time.Unix(int64(*v.Time.Start), 0))) 138 | 139 | } 140 | // Breakdowns: 141 | if _, ok := top5acc[*v.Account]; !ok { 142 | top5acc[*v.Account] = &generic.CountItem{} 143 | } 144 | if _, ok := top5user[*v.User]; !ok { 145 | top5user[*v.User] = &generic.CountItem{} 146 | } 147 | if _, ok := jpp[*v.Partition]; !ok { 148 | jpp[*v.Partition] = &generic.CountItem{} 149 | } 150 | if _, ok := jpq[*v.Qos]; !ok { 151 | jpq[*v.Qos] = &generic.CountItem{} 152 | } 153 | top5acc[*v.Account].Count++ 154 | top5user[*v.User].Count++ 155 | jpp[*v.Partition].Count++ 156 | jpq[*v.Qos].Count++ 157 | } 158 | 159 | // sort & filter breakdowns 160 | t.Breakdowns.Top5user = generic.Top5(generic.SortItemMapBySel("Count", &top5user)) 161 | t.Breakdowns.Top5acc = generic.Top5(generic.SortItemMapBySel("Count", &top5acc)) 162 | t.Breakdowns.JobPerPart = generic.SortItemMapBySel("Count", &jpp) 163 | t.Breakdowns.JobPerQos = generic.SortItemMapBySel("Count", &jpq) 164 | 165 | t.MedWait, t.MinWait, t.MaxWait = stats.Median(tmp) 166 | t.MedRun, t.MinRun, t.MaxRun = stats.Median(tmpRun) 167 | t.AvgWait = stats.Avg(tmp) 168 | t.AvgRun = stats.Avg(tmpRun) 169 | 170 | l.Printf("GetStatsFiltered end\n") 171 | } 172 | -------------------------------------------------------------------------------- /internal/model/tabs/jobhisttab/jobhisttabcommands.go: -------------------------------------------------------------------------------- 1 | package jobhisttab 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | "encoding/json" 7 | "log" 8 | "os/exec" 9 | "time" 10 | 11 | "github.com/CLIP-HPC/SlurmCommander/internal/command" 12 | "github.com/CLIP-HPC/SlurmCommander/internal/config" 13 | tea "github.com/charmbracelet/bubbletea" 14 | ) 15 | 16 | var ( 17 | cc config.ConfigContainer 18 | SacctHistCmdSwitches = []string{"-n", "--json"} 19 | ) 20 | 21 | func NewCmdCC(config config.ConfigContainer) { 22 | cc = config 23 | } 24 | 25 | type JobHistTabMsg struct { 26 | HistFetchFail bool 27 | SacctJSON 28 | } 29 | 30 | func GetSacctHist(uaccs string, start string, end string, t uint, l *log.Logger) tea.Cmd { 31 | return func() tea.Msg { 32 | var ( 33 | jht JobHistTabMsg 34 | ) 35 | 36 | l.Printf("GetSacctHist(%q) start: %s, end: %s, timeout: %d\n", uaccs, start, end, t) 37 | 38 | // setup context with user set timeout in seconds 39 | ctx, cancel := context.WithTimeout(context.Background(), time.Duration(t)*time.Second) 40 | defer cancel() 41 | 42 | // prepare command 43 | cmd := cc.Binpaths["sacct"] 44 | switches := append(SacctHistCmdSwitches, "-A", uaccs) 45 | if strings.TrimSpace(start) != "" { 46 | switches = append(switches, "-S", start) 47 | } 48 | if strings.TrimSpace(end) != "" { 49 | switches = append(switches, "-E", end) 50 | } 51 | 52 | l.Printf("EXEC: %q %q\n", cmd, switches) 53 | out, err := exec.CommandContext(ctx, cmd, switches...).Output() 54 | if err != nil { 55 | l.Printf("Error exec sacct: %q\n", err) 56 | // set error, return. 57 | // TODO: see how to fit this with the new commands-return-command.ErrorMsg pattern 58 | jht.HistFetchFail = true 59 | return jht 60 | } 61 | l.Printf("EXEC returned: %d bytes\n", len(out)) 62 | 63 | err = json.Unmarshal(out, &jht.SacctJSON) 64 | if err != nil { 65 | l.Printf("Error unmarshall: %q\n", err) 66 | return command.ErrorMsg{ 67 | From: "GetSacctHist", 68 | ErrHelp: "sacct JSON failed to parse, note your slurm version and open an issue with us here: https://github.com/CLIP-HPC/SlurmCommander/issues/new/choose", 69 | OrigErr: err, 70 | } 71 | } 72 | 73 | jht.HistFetchFail = false 74 | l.Printf("Unmarshalled %d jobs from hist\n", len(jht.SacctJSON.Jobs)) 75 | 76 | return jht 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /internal/model/tabs/jobhisttab/jobhisttabkeys.go: -------------------------------------------------------------------------------- 1 | package jobhisttab 2 | 3 | import ( 4 | "github.com/charmbracelet/bubbles/key" 5 | "github.com/CLIP-HPC/SlurmCommander/internal/keybindings" 6 | ) 7 | 8 | type Keys []*key.Binding 9 | 10 | var KeyMap = Keys{ 11 | &keybindings.DefaultKeyMap.Up, 12 | &keybindings.DefaultKeyMap.Down, 13 | &keybindings.DefaultKeyMap.PageUp, 14 | &keybindings.DefaultKeyMap.PageDown, 15 | &keybindings.DefaultKeyMap.Slash, 16 | &keybindings.DefaultKeyMap.Refresh, 17 | &keybindings.DefaultKeyMap.TimeRange, 18 | &keybindings.DefaultKeyMap.Enter, 19 | &keybindings.DefaultKeyMap.Stats, 20 | &keybindings.DefaultKeyMap.Count, 21 | } 22 | 23 | func (ky *Keys) SetupKeys() { 24 | for _, k := range *ky { 25 | k.SetEnabled(true) 26 | } 27 | } 28 | 29 | func (ky *Keys) DisableKeys() { 30 | for _, k := range *ky { 31 | k.SetEnabled(false) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /internal/model/tabs/jobhisttab/jobhisttabtable.go: -------------------------------------------------------------------------------- 1 | package jobhisttab 2 | 3 | import ( 4 | "log" 5 | "regexp" 6 | "strconv" 7 | "strings" 8 | 9 | "github.com/CLIP-HPC/SlurmCommander/internal/command" 10 | "github.com/CLIP-HPC/SlurmCommander/internal/slurm" 11 | "github.com/CLIP-HPC/SlurmCommander/internal/table" 12 | ) 13 | 14 | var SacctTabCols = []table.Column{ 15 | { 16 | Title: "Job ID", 17 | Width: 10, 18 | }, 19 | { 20 | Title: "Job Name", 21 | Width: 35, 22 | }, 23 | { 24 | Title: "Part.", 25 | Width: 5, 26 | }, 27 | { 28 | Title: "QoS", 29 | Width: 10, 30 | }, 31 | { 32 | Title: "Account", 33 | Width: 10, 34 | }, 35 | { 36 | Title: "User", 37 | Width: 15, 38 | }, 39 | { 40 | Title: "Nodes", 41 | Width: 20, 42 | }, 43 | { 44 | Title: "State", 45 | Width: 10, 46 | }, 47 | } 48 | 49 | type SacctJSON slurm.SacctJSON 50 | type TableRows []table.Row 51 | 52 | func (saList *SacctJSON) FilterSacctTable(f string, l *log.Logger) (*TableRows, *SacctJSON, *command.ErrorMsg) { 53 | var ( 54 | saTabRows = TableRows{} 55 | sacctHistFiltered = SacctJSON{} 56 | errMsg *command.ErrorMsg 57 | re *regexp.Regexp 58 | ) 59 | 60 | l.Printf("FilterSacctTable: rows %d", len(saList.Jobs)) 61 | re, err := regexp.Compile(f) 62 | if err != nil { 63 | l.Printf("FAIL: compile regexp: %q with err: %s", f, err) 64 | f = "" 65 | re, _ = regexp.Compile(f) 66 | errMsg = &command.ErrorMsg{ 67 | From: "FilterSacctTable", 68 | ErrHelp: "Regular expression failed to compile, please correct it (turn on DEBUG to see details)", 69 | OrigErr: err, 70 | } 71 | } 72 | 73 | for _, v := range saList.Jobs { 74 | 75 | // https://github.com/CLIP-HPC/SlurmCommander/issues/9 76 | // Entry without JobId, log&discard 77 | switch { 78 | case v.JobId == nil: 79 | l.Printf("FilterSacctTable: Found job with no JobId field, skipping...\n") 80 | continue 81 | } 82 | 83 | line := strings.Join([]string{ 84 | strconv.Itoa(*v.JobId), 85 | *v.Name, 86 | *v.Qos, 87 | *v.Account, 88 | *v.User, 89 | *v.State.Current, 90 | }, ".") 91 | 92 | if re.MatchString(line) { 93 | saTabRows = append(saTabRows, table.Row{strconv.Itoa(*v.JobId), *v.Name, *v.Partition, *v.Qos, *v.Account, *v.User, *v.Nodes, *v.State.Current}) 94 | sacctHistFiltered.Jobs = append(sacctHistFiltered.Jobs, v) 95 | } 96 | } 97 | 98 | return &saTabRows, &sacctHistFiltered, errMsg 99 | } 100 | -------------------------------------------------------------------------------- /internal/model/tabs/jobhisttab/jobhisttabview.go: -------------------------------------------------------------------------------- 1 | package jobhisttab 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "strings" 7 | 8 | "github.com/charmbracelet/lipgloss" 9 | "github.com/CLIP-HPC/SlurmCommander/internal/generic" 10 | "github.com/CLIP-HPC/SlurmCommander/internal/styles" 11 | ) 12 | 13 | func (jh *JobHistTab) tabJobHist() string { 14 | 15 | return jh.SacctTable.View() + "\n" 16 | } 17 | 18 | func (jh *JobHistTab) JobHistTabStats(l *log.Logger) string { 19 | 20 | l.Printf("JobHistTabStats called\n") 21 | 22 | str := styles.StatsSeparatorTitle.Render(fmt.Sprintf("%-30s", "Historical job states (filtered):")) 23 | str += "\n" 24 | str += generic.GenCountStrVert(jh.Stats.StateCnt, l) 25 | 26 | str += styles.StatsSeparatorTitle.Render(fmt.Sprintf("%-30s", "Waiting times (finished jobs):")) 27 | str += "\n" 28 | str += fmt.Sprintf("%-10s : %s\n", " ", "dd-hh:mm:ss") 29 | str += fmt.Sprintf("%-10s : %s\n", "MinWait", generic.HumanizeDuration(jh.Stats.MinWait, l)) 30 | str += fmt.Sprintf("%-10s : %s\n", "AvgWait", generic.HumanizeDuration(jh.Stats.AvgWait, l)) 31 | str += fmt.Sprintf("%-10s : %s\n", "MedWait", generic.HumanizeDuration(jh.Stats.MedWait, l)) 32 | str += fmt.Sprintf("%-10s : %s\n", "MaxWait", generic.HumanizeDuration(jh.Stats.MaxWait, l)) 33 | 34 | str += "\n" 35 | str += styles.StatsSeparatorTitle.Render(fmt.Sprintf("%-30s", "Run times (finished jobs):")) 36 | str += "\n" 37 | str += fmt.Sprintf("%-10s : %s\n", " ", "dd-hh:mm:ss") 38 | str += fmt.Sprintf("%-10s : %s\n", "MinRun", generic.HumanizeDuration(jh.Stats.MinRun, l)) 39 | str += fmt.Sprintf("%-10s : %s\n", "AvgRun", generic.HumanizeDuration(jh.Stats.AvgRun, l)) 40 | str += fmt.Sprintf("%-10s : %s\n", "MedRun", generic.HumanizeDuration(jh.Stats.MedRun, l)) 41 | str += fmt.Sprintf("%-10s : %s\n", "MaxRun", generic.HumanizeDuration(jh.Stats.MaxRun, l)) 42 | 43 | return str 44 | } 45 | 46 | func (jh *JobHistTab) getJobHistCounts() string { 47 | var ( 48 | ret string 49 | top5u string 50 | top5a string 51 | jpp string 52 | jpq string 53 | ) 54 | 55 | fmtStr := "%-20s : %6d\n" 56 | fmtTitle := "%-29s" 57 | 58 | top5u += styles.TextYellowOnBlue.Render(fmt.Sprintf(fmtTitle, "Top 5 User")) 59 | top5u += "\n" 60 | for _, v := range jh.Breakdowns.Top5user { 61 | top5u += fmt.Sprintf(fmtStr, v.Name, v.Count) 62 | } 63 | 64 | top5a += styles.TextYellowOnBlue.Render(fmt.Sprintf(fmtTitle, "Top 5 Accounts")) 65 | top5a += "\n" 66 | for _, v := range jh.Breakdowns.Top5acc { 67 | top5a += fmt.Sprintf(fmtStr, v.Name, v.Count) 68 | } 69 | 70 | jpp += styles.TextYellowOnBlue.Render(fmt.Sprintf(fmtTitle, "Jobs per Partition")) 71 | jpp += "\n" 72 | for _, v := range jh.Breakdowns.JobPerPart { 73 | jpp += fmt.Sprintf(fmtStr, v.Name, v.Count) 74 | } 75 | 76 | jpq += styles.TextYellowOnBlue.Render(fmt.Sprintf(fmtTitle, "Jobs per QoS")) 77 | jpq += "\n" 78 | for _, v := range jh.Breakdowns.JobPerQos { 79 | jpq += fmt.Sprintf(fmtStr, v.Name, v.Count) 80 | } 81 | 82 | top5u = styles.CountsBox.Render(top5u) 83 | top5a = styles.CountsBox.Render(top5a) 84 | jpq = styles.CountsBox.Render(jpq) 85 | jpp = styles.CountsBox.Render(jpp) 86 | 87 | ret = lipgloss.JoinHorizontal(lipgloss.Top, top5u, top5a, jpp, jpq) 88 | 89 | return ret 90 | } 91 | 92 | func (jh *JobHistTab) View(l *log.Logger) string { 93 | var ( 94 | Header strings.Builder 95 | MainWindow strings.Builder 96 | ) 97 | 98 | // If sacct timed out/errored, instruct the user to reduce fetch period from default 7 days 99 | l.Printf("HistFetch: %t HistFetchFail: %t\n", jh.HistFetched, jh.HistFetchFail) 100 | if jh.HistFetchFail { 101 | Header.WriteString(fmt.Sprintf("Fetching jobs history failed! Maybe the timed out was too short(%d seconds)? See the returned error message:\n", jh.JobHistTimeout)) 102 | Header.WriteString("You can you can modify the time ranges or timeout using the 'time-ranges' menu\n") 103 | } 104 | 105 | // Check if history is here, if not, return "Waiting for sacct..." 106 | if !jh.HistFetchFail && !jh.HistFetched { 107 | Header.WriteString(fmt.Sprintf("Waiting for job history...(%d seconds timeout)\n", jh.JobHistTimeout)) 108 | return Header.String() 109 | } 110 | 111 | if !jh.HistFetchFail { 112 | // Show parameters to the user 113 | Header.WriteString(fmt.Sprintf("%s Start: %10.20s\tEnd: %10.20s\tTimeout: %d\n", styles.TextYellowOnBlue.Render("Parameters"), jh.JobHistStart, jh.JobHistEnd, jh.JobHistTimeout)) 114 | Header.WriteString(fmt.Sprintf(" %s Query: %10.20s\tItems: %d\n", styles.TextYellowOnBlue.Render("Filter"), jh.Filter.Value(), len(jh.SacctHistFiltered.Jobs))) 115 | Header.WriteString("\n") 116 | 117 | // Top Main 118 | MainWindow.WriteString(jh.tabJobHist()) 119 | } else { 120 | MainWindow.WriteString("\n") 121 | } 122 | 123 | // Next we join table Vertically with: nil || filter || params || counts 124 | switch { 125 | case jh.FilterOn: 126 | MainWindow.WriteString("\n") 127 | MainWindow.WriteString("Filter value (search in joined: JobID + JobName + QoS + AccountName + UserName + JobState):\n") 128 | MainWindow.WriteString(fmt.Sprintf("%s\n", jh.Filter.View())) 129 | MainWindow.WriteString("(Enter to apply, Esc to clear filter and abort, Regular expressions supported, syntax details: https://golang.org/s/re2syntax)\n") 130 | 131 | case jh.UserInputsOn: 132 | MainWindow.WriteString("\n") 133 | MainWindow.WriteString(fmt.Sprintf("Command Parameters:\n")) 134 | for i := range jh.UserInputs.Params { 135 | MainWindow.WriteString(fmt.Sprintf("%s: %s\n", jh.UserInputs.ParamTexts[i], jh.UserInputs.Params[i].View())) 136 | } 137 | MainWindow.WriteString("(Enter to apply, or Esc to clear params and abort)\n") 138 | 139 | case jh.CountsOn: 140 | // Counts on 141 | MainWindow.WriteString("\n") 142 | MainWindow.WriteString(styles.JobInfoBox.Render(jh.getJobHistCounts())) 143 | } 144 | 145 | // Last, if needed we join Stats Horizontally with Main 146 | switch { 147 | case jh.StatsOn: 148 | // table + stats 149 | X := MainWindow.String() 150 | MainWindow.Reset() 151 | MainWindow.WriteString(lipgloss.JoinHorizontal(lipgloss.Top, X, styles.StatsBoxStyle.Render(jh.JobHistTabStats(l)))) 152 | } 153 | 154 | return Header.String() + MainWindow.String() 155 | } 156 | -------------------------------------------------------------------------------- /internal/model/tabs/jobtab/jobtab.go: -------------------------------------------------------------------------------- 1 | package jobtab 2 | 3 | import ( 4 | "log" 5 | "time" 6 | 7 | "github.com/charmbracelet/bubbles/list" 8 | "github.com/charmbracelet/bubbles/textinput" 9 | "github.com/CLIP-HPC/SlurmCommander/internal/generic" 10 | "github.com/CLIP-HPC/SlurmCommander/internal/stats" 11 | "github.com/CLIP-HPC/SlurmCommander/internal/table" 12 | ) 13 | 14 | type JobTab struct { 15 | InfoOn bool 16 | CountsOn bool 17 | StatsOn bool 18 | FilterOn bool 19 | SqueueTable table.Model 20 | Squeue SqueueJSON 21 | SqueueFiltered SqueueJSON 22 | Filter textinput.Model 23 | SelectedJob string 24 | SelectedJobState string 25 | MenuOn bool 26 | MenuChoice MenuItem 27 | Menu list.Model 28 | Stats 29 | Breakdowns 30 | } 31 | 32 | type Stats struct { 33 | // TODO: also perhaps: count by user? account? 34 | StateCnt map[string]uint 35 | AvgWait time.Duration 36 | MinWait time.Duration 37 | MaxWait time.Duration 38 | MedWait time.Duration 39 | AvgRun time.Duration 40 | MinRun time.Duration 41 | MaxRun time.Duration 42 | MedRun time.Duration 43 | } 44 | 45 | type Breakdowns struct { 46 | Top5user generic.CountItemSlice 47 | Top5acc generic.CountItemSlice 48 | JobPerQos generic.CountItemSlice 49 | JobPerPart generic.CountItemSlice 50 | } 51 | 52 | func (t *JobTab) AdjTableHeight(h int, l *log.Logger) { 53 | l.Printf("FixTableHeight(%d) from %d\n", h, t.SqueueTable.Height()) 54 | if t.InfoOn || t.CountsOn || t.FilterOn { 55 | t.SqueueTable.SetHeight(h - 30) 56 | } else { 57 | t.SqueueTable.SetHeight(h - 15) 58 | } 59 | l.Printf("FixTableHeight to %d\n", t.SqueueTable.Height()) 60 | } 61 | 62 | func (t *JobTab) GetStatsFiltered(l *log.Logger) { 63 | 64 | top5user := generic.CountItemMap{} 65 | top5acc := generic.CountItemMap{} 66 | jpq := generic.CountItemMap{} 67 | jpp := generic.CountItemMap{} 68 | 69 | t.Stats.StateCnt = map[string]uint{} 70 | tmp := []time.Duration{} 71 | tmpRun := []time.Duration{} 72 | t.AvgWait = 0 73 | t.MedWait = 0 74 | 75 | l.Printf("GetStatsFiltered start on %d rows\n", len(t.SqueueFiltered.Jobs)) 76 | for _, v := range t.SqueueFiltered.Jobs { 77 | t.Stats.StateCnt[*v.JobState]++ 78 | switch *v.JobState { 79 | case "PENDING": 80 | tmp = append(tmp, time.Since(time.Unix(int64(*v.SubmitTime), 0))) 81 | case "RUNNING": 82 | tmpRun = append(tmpRun, time.Since(time.Unix(int64(*v.StartTime), 0))) 83 | } 84 | 85 | // Breakdowns: 86 | if _, ok := top5acc[*v.Account]; !ok { 87 | top5acc[*v.Account] = &generic.CountItem{} 88 | } 89 | if _, ok := top5user[*v.UserName]; !ok { 90 | top5user[*v.UserName] = &generic.CountItem{} 91 | } 92 | if _, ok := jpp[*v.Partition]; !ok { 93 | jpp[*v.Partition] = &generic.CountItem{} 94 | } 95 | if _, ok := jpq[*v.Qos]; !ok { 96 | jpq[*v.Qos] = &generic.CountItem{} 97 | } 98 | top5acc[*v.Account].Count++ 99 | top5user[*v.UserName].Count++ 100 | jpp[*v.Partition].Count++ 101 | jpq[*v.Qos].Count++ 102 | } 103 | 104 | // sort & filter breakdowns 105 | t.Breakdowns.Top5user = generic.Top5(generic.SortItemMapBySel("Count", &top5user)) 106 | t.Breakdowns.Top5acc = generic.Top5(generic.SortItemMapBySel("Count", &top5acc)) 107 | t.Breakdowns.JobPerPart = generic.SortItemMapBySel("Count", &jpp) 108 | t.Breakdowns.JobPerQos = generic.SortItemMapBySel("Count", &jpq) 109 | 110 | //l.Printf("TOP5USER: %#v\n", t.Breakdowns.Top5user) 111 | //l.Printf("TOP5ACC: %#v\n", t.Breakdowns.Top5acc) 112 | //l.Printf("JobPerQos: %#v\n", t.Breakdowns.JobPerQos) 113 | //l.Printf("JobPerPart: %#v\n", t.Breakdowns.JobPerPart) 114 | 115 | t.MedWait, t.MinWait, t.MaxWait = stats.Median(tmp) 116 | t.MedRun, t.MinRun, t.MaxRun = stats.Median(tmpRun) 117 | t.AvgWait = stats.Avg(tmp) 118 | t.AvgRun = stats.Avg(tmpRun) 119 | 120 | l.Printf("GetStatsFiltered end\n") 121 | } 122 | -------------------------------------------------------------------------------- /internal/model/tabs/jobtab/jobtabcommands.go: -------------------------------------------------------------------------------- 1 | package jobtab 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | "os/exec" 7 | "time" 8 | 9 | "github.com/CLIP-HPC/SlurmCommander/internal/command" 10 | "github.com/CLIP-HPC/SlurmCommander/internal/config" 11 | tea "github.com/charmbracelet/bubbletea" 12 | ) 13 | 14 | var ( 15 | cc config.ConfigContainer 16 | SqueueCmdSwitches = []string{"-a", "--json"} 17 | ) 18 | 19 | func NewCmdCC(config config.ConfigContainer) { 20 | cc = config 21 | } 22 | 23 | // Calls `squeue` to get job information for Jobs Tab 24 | func GetSqueue(t time.Time) tea.Msg { 25 | 26 | var sqJson SqueueJSON 27 | 28 | cmd := cc.Binpaths["squeue"] 29 | out, err := exec.Command(cmd, SqueueCmdSwitches...).Output() 30 | if err != nil { 31 | return command.ErrorMsg{ 32 | From: "GetSqueue", 33 | ErrHelp: "Failed to run squeue command, check your scom.conf and set the correct paths there.", 34 | OrigErr: err, 35 | } 36 | } 37 | 38 | err = json.Unmarshal(out, &sqJson) 39 | if err != nil { 40 | return command.ErrorMsg{ 41 | From: "GetSqueue", 42 | ErrHelp: "squeue JSON failed to parse, note your slurm version and open an issue with us here: https://github.com/CLIP-HPC/SlurmCommander/issues/new/choose", 43 | OrigErr: err, 44 | } 45 | } 46 | 47 | return sqJson 48 | } 49 | 50 | func TimedGetSqueue(l *log.Logger) tea.Cmd { 51 | l.Printf("TimedGetSqueue() start, tick: %d\n", cc.GetTick()) 52 | return tea.Tick(cc.GetTick()*time.Second, GetSqueue) 53 | } 54 | 55 | func QuickGetSqueue(l *log.Logger) tea.Cmd { 56 | l.Printf("QuickGetSqueue() start\n") 57 | return tea.Tick(0*time.Second, GetSqueue) 58 | } 59 | -------------------------------------------------------------------------------- /internal/model/tabs/jobtab/jobtabkeys.go: -------------------------------------------------------------------------------- 1 | package jobtab 2 | 3 | import ( 4 | "github.com/charmbracelet/bubbles/key" 5 | "github.com/CLIP-HPC/SlurmCommander/internal/keybindings" 6 | ) 7 | 8 | type Keys []*key.Binding 9 | 10 | var KeyMap = Keys{ 11 | &keybindings.DefaultKeyMap.Up, 12 | &keybindings.DefaultKeyMap.Down, 13 | &keybindings.DefaultKeyMap.PageUp, 14 | &keybindings.DefaultKeyMap.PageDown, 15 | &keybindings.DefaultKeyMap.Slash, 16 | &keybindings.DefaultKeyMap.Info, 17 | &keybindings.DefaultKeyMap.Enter, 18 | &keybindings.DefaultKeyMap.Stats, 19 | &keybindings.DefaultKeyMap.Count, 20 | } 21 | 22 | func (ky *Keys) SetupKeys() { 23 | for _, k := range *ky { 24 | k.SetEnabled(true) 25 | } 26 | } 27 | 28 | func (ky *Keys) DisableKeys() { 29 | for _, k := range *ky { 30 | k.SetEnabled(false) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /internal/model/tabs/jobtab/jobtabmenu.go: -------------------------------------------------------------------------------- 1 | package jobtab 2 | 3 | import ( 4 | "log" 5 | 6 | "github.com/charmbracelet/bubbles/list" 7 | tea "github.com/charmbracelet/bubbletea" 8 | "github.com/CLIP-HPC/SlurmCommander/internal/command" 9 | "github.com/CLIP-HPC/SlurmCommander/internal/styles" 10 | ) 11 | 12 | type JobMenuOptions map[string]MenuOptions 13 | 14 | type MenuOptions []list.Item 15 | type MenuItem struct { 16 | action string 17 | description string 18 | } 19 | 20 | var MenuList = JobMenuOptions{ 21 | "PENDING": MenuOptions{ 22 | MenuItem{ 23 | action: "INFO", 24 | description: "Show job information", 25 | }, 26 | MenuItem{ 27 | action: "CANCEL", 28 | description: "Cancel the selected job", 29 | }, 30 | MenuItem{ 31 | action: "HOLD", 32 | description: "Prevent a job from starting", 33 | }, 34 | }, 35 | "RUNNING": MenuOptions{ 36 | MenuItem{ 37 | action: "INFO", 38 | description: "Show job information", 39 | }, 40 | MenuItem{ 41 | action: "CANCEL", 42 | description: "Cancel the selected job", 43 | }, 44 | MenuItem{ 45 | action: "SSH", 46 | description: "Ssh connect to job batch node", 47 | }, 48 | MenuItem{ 49 | action: "REQUEUE", 50 | description: "Stop the job and send it back to Queue", 51 | }, 52 | }, 53 | } 54 | 55 | func (i MenuItem) GetAction() string { 56 | return i.action 57 | } 58 | func (i MenuItem) FilterValue() string { 59 | return "" 60 | } 61 | func (i MenuItem) Title() string { 62 | return i.action 63 | } 64 | func (i MenuItem) Description() string { 65 | return i.description 66 | } 67 | 68 | func (m *MenuItem) ExecMenuItem(jobID string, node string, l *log.Logger) tea.Cmd { 69 | 70 | l.Printf("ExecMenuItem() jobID=%s node=%s m.action=%s\n", jobID, node, m.action) 71 | 72 | // TODO: move related commands to jobtab package 73 | switch m.action { 74 | case "CANCEL": 75 | return command.CallScancel(jobID, l) 76 | case "HOLD": 77 | return command.CallScontrolHold(jobID, l) 78 | case "REQUEUE": 79 | return command.CallScontrolRequeue(jobID, l) 80 | case "SSH": 81 | return command.CallSsh(node, l) 82 | } 83 | 84 | return nil 85 | } 86 | 87 | func NewMenu(selJobState string, l *log.Logger) list.Model { 88 | var lm list.Model = list.Model{} 89 | 90 | menuOpts := MenuList[selJobState] 91 | l.Printf("MENU Options%#v\n", MenuList[selJobState]) 92 | 93 | defDel := list.NewDefaultDelegate() 94 | defStyles := list.NewDefaultItemStyles() 95 | defStyles.NormalTitle = styles.MenuNormalTitle 96 | defStyles.SelectedTitle = styles.MenuSelectedTitle 97 | //defStyles.NormalDesc = styles.MenuNormalDesc 98 | defStyles.SelectedDesc = styles.MenuSelectedDesc 99 | defDel.Styles = defStyles 100 | lm = list.New(menuOpts, defDel, 10, 10) 101 | lm.Title = "Job actions" 102 | lm.SetShowStatusBar(true) 103 | lm.SetFilteringEnabled(false) 104 | lm.SetShowHelp(false) 105 | lm.SetShowPagination(true) 106 | lm.SetSize(30, 17) 107 | lm.Styles.Title = styles.MenuTitleStyle 108 | 109 | return lm 110 | } 111 | -------------------------------------------------------------------------------- /internal/model/tabs/jobtab/jobtabtable.go: -------------------------------------------------------------------------------- 1 | package jobtab 2 | 3 | import ( 4 | "log" 5 | "regexp" 6 | "strconv" 7 | "strings" 8 | "time" 9 | 10 | "github.com/CLIP-HPC/SlurmCommander/internal/command" 11 | "github.com/CLIP-HPC/SlurmCommander/internal/slurm" 12 | "github.com/CLIP-HPC/SlurmCommander/internal/table" 13 | ) 14 | 15 | var SqueueTabCols = []table.Column{ 16 | { 17 | Title: "Job ID", 18 | Width: 10, 19 | }, 20 | { 21 | Title: "Job Name", 22 | Width: 60, 23 | }, 24 | { 25 | Title: "Account", 26 | Width: 10, 27 | }, 28 | { 29 | Title: "User Name", 30 | Width: 20, 31 | }, 32 | { 33 | Title: "Job State", 34 | Width: 10, 35 | }, 36 | { 37 | Title: "Priority", 38 | Width: 10, 39 | }, 40 | } 41 | 42 | type SqueueJSON slurm.SqueueJSON 43 | type TableRows []table.Row 44 | 45 | func (sqJson *SqueueJSON) FilterSqueueTable(f string, l *log.Logger) (*TableRows, *SqueueJSON, *command.ErrorMsg) { 46 | var ( 47 | sqTabRows = TableRows{} 48 | sqJsonFiltered = SqueueJSON{} 49 | errMsg *command.ErrorMsg 50 | re *regexp.Regexp 51 | ) 52 | 53 | l.Printf("Filter SQUEUE start.\n") 54 | re, err := regexp.Compile(f) 55 | if err != nil { 56 | // User entered bad regexp, return error and set empty re (filter.value() will be zeroed in update()) 57 | l.Printf("FAIL: compile regexp: %q with err: %s", f, err) 58 | f = "" 59 | re, _ = regexp.Compile(f) 60 | errMsg = &command.ErrorMsg{ 61 | From: "FilterSqueueTable", 62 | ErrHelp: "Regular expression failed to compile, please correct it (turn on DEBUG to see details)", 63 | OrigErr: err, 64 | } 65 | } 66 | t := time.Now() 67 | 68 | // NEW: Filter: 69 | // 1. join strings from job into one line 70 | // 2. re.MatchString(line) 71 | // Allows doing matches across multiple columns, e.g.: "bash.*(als|gmi)" - "bash jobs BY als or gmi accounts" 72 | for _, v := range sqJson.Jobs { 73 | 74 | // NEW: 1. JOIN 75 | line := strings.Join([]string{ 76 | strconv.Itoa(*v.JobId), 77 | *v.Name, 78 | *v.Account, 79 | *v.UserName, 80 | *v.JobState, 81 | }, ".") 82 | 83 | // NEW: 2. MATCH 84 | if re.MatchString(line) { 85 | sqTabRows = append(sqTabRows, table.Row{strconv.Itoa(*v.JobId), *v.Name, *v.Account, *v.UserName, *v.JobState, strconv.Itoa(*v.Priority)}) 86 | sqJsonFiltered.Jobs = append(sqJsonFiltered.Jobs, v) 87 | } 88 | } 89 | l.Printf("Filter SQUEUE end in %.3f seconds\n", time.Since(t).Seconds()) 90 | 91 | return &sqTabRows, &sqJsonFiltered, errMsg 92 | } 93 | -------------------------------------------------------------------------------- /internal/model/tabs/jobtab/jobtabview.go: -------------------------------------------------------------------------------- 1 | package jobtab 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "strconv" 7 | "strings" 8 | "time" 9 | 10 | "github.com/charmbracelet/lipgloss" 11 | "github.com/CLIP-HPC/SlurmCommander/internal/generic" 12 | "github.com/CLIP-HPC/SlurmCommander/internal/styles" 13 | ) 14 | 15 | func (jt *JobTab) tabJobs() string { 16 | 17 | return jt.SqueueTable.View() + "\n" 18 | } 19 | 20 | func (jt *JobTab) getJobInfo(l *log.Logger) string { 21 | var scr strings.Builder 22 | 23 | n := jt.SqueueTable.Cursor() 24 | l.Printf("getJobInfo: cursor at %d table rows: %d\n", n, len(jt.SqueueFiltered.Jobs)) 25 | if len(jt.SqueueFiltered.Jobs) == 0 || n == -1 { 26 | return "Select a job" 27 | } 28 | 29 | fmtStr := "%-15s : %-30s\n" 30 | fmtStrLast := "%-15s : %-30s" 31 | //ibFmt := "Job Name: %s\nJob Command: %s\nOutput: %s\nError: %s\n" 32 | infoBoxLeft := fmt.Sprintf(fmtStr, "Partition", *jt.SqueueFiltered.Jobs[n].Partition) 33 | infoBoxLeft += fmt.Sprintf(fmtStr, "QoS", *jt.SqueueFiltered.Jobs[n].Qos) 34 | infoBoxLeft += fmt.Sprintf(fmtStr, "TRES", *jt.SqueueFiltered.Jobs[n].TresReqStr) 35 | infoBoxLeft += fmt.Sprintf(fmtStr, "Batch Host", *jt.SqueueFiltered.Jobs[n].BatchHost) 36 | // DONE: slurm 21 vs 22, in 22, JobResources is not set for pending jobs? include a test for that? investigate 37 | if jt.SqueueFiltered.Jobs[n].JobResources != nil { 38 | if jt.SqueueFiltered.Jobs[n].JobResources.Nodes != nil { 39 | infoBoxLeft += fmt.Sprintf(fmtStrLast, "AllocNodes", *jt.SqueueFiltered.Jobs[n].JobResources.Nodes) 40 | } else { 41 | infoBoxLeft += fmt.Sprintf(fmtStrLast, "AllocNodes", "none") 42 | 43 | } 44 | } 45 | 46 | infoBoxRight := fmt.Sprintf(fmtStr, "Array Job ID", strconv.Itoa(*jt.SqueueFiltered.Jobs[n].ArrayJobId)) 47 | if jt.SqueueFiltered.Jobs[n].ArrayTaskId != nil { 48 | infoBoxRight += fmt.Sprintf(fmtStr, "Array Task ID", strconv.Itoa(*jt.SqueueFiltered.Jobs[n].ArrayTaskId)) 49 | } else { 50 | infoBoxRight += fmt.Sprintf(fmtStr, "Array Task ID", "NoTaskID") 51 | } 52 | infoBoxRight += fmt.Sprintf(fmtStr, "Gres Details", strings.Join(*jt.SqueueFiltered.Jobs[n].GresDetail, ",")) 53 | infoBoxRight += fmt.Sprintf(fmtStr, "Features", *jt.SqueueFiltered.Jobs[n].Features) 54 | infoBoxRight += fmt.Sprintf(fmtStrLast, "wckey", *jt.SqueueFiltered.Jobs[n].Wckey) 55 | 56 | infoBoxMiddle := fmt.Sprintf(fmtStr, "Submit", time.Unix(*jt.SqueueFiltered.Jobs[n].SubmitTime, 0)) 57 | if *jt.SqueueFiltered.Jobs[n].StartTime != 0 { 58 | infoBoxMiddle += fmt.Sprintf(fmtStr, "Start /expected", time.Unix(*jt.SqueueFiltered.Jobs[n].StartTime, 0)) 59 | } else { 60 | infoBoxMiddle += fmt.Sprintf(fmtStr, "Start", "unknown") 61 | } 62 | // placeholder lines 63 | infoBoxMiddle += "\n" 64 | infoBoxMiddle += "\n" 65 | // EO placeholder lines 66 | infoBoxMiddle += fmt.Sprintf(fmtStrLast, "State reason", *jt.SqueueFiltered.Jobs[n].StateReason) 67 | 68 | infoBoxWide := fmt.Sprintf(fmtStr, "Job Name", *jt.SqueueFiltered.Jobs[n].Name) 69 | infoBoxWide += fmt.Sprintf(fmtStr, "Command", *jt.SqueueFiltered.Jobs[n].Command) 70 | infoBoxWide += fmt.Sprintf(fmtStr, "StdOut", *jt.SqueueFiltered.Jobs[n].StandardOutput) 71 | infoBoxWide += fmt.Sprintf(fmtStr, "StdErr", *jt.SqueueFiltered.Jobs[n].StandardError) 72 | infoBoxWide += fmt.Sprintf(fmtStrLast, "Working Dir", *jt.SqueueFiltered.Jobs[n].CurrentWorkingDirectory) 73 | 74 | // 8 for borders (~10 extra) 75 | //w := ((m.Globals.winW - 10) / 3) * 3 76 | //s := styles.JobInfoInBox.Copy().Width(w / 3).Height(5) 77 | ////top := lipgloss.JoinHorizontal(lipgloss.Top, styles.JobInfoInBox.Render(infoBoxLeft), styles.JobInfoInBox.Render(infoBoxMiddle), styles.JobInfoInBox.Render(infoBoxRight)) 78 | // TODO: use builder here 79 | top := lipgloss.JoinHorizontal(lipgloss.Top, styles.JobInfoInBox.Render(infoBoxLeft), styles.JobInfoInBox.Render(infoBoxMiddle), styles.JobInfoInBox.Render(infoBoxRight)) 80 | //s = styles.JobInfoInBox.Copy().Width(w + 4) 81 | scr.WriteString(lipgloss.JoinVertical(lipgloss.Left, top, styles.JobInfoInBottomBox.Render(infoBoxWide))) 82 | 83 | //return infoBox 84 | return scr.String() 85 | } 86 | 87 | func (jt *JobTab) getJobCounts() string { 88 | var ( 89 | ret string 90 | top5u string 91 | top5a string 92 | jpp string 93 | jpq string 94 | ) 95 | 96 | fmtStr := "%-20s : %6d\n" 97 | fmtTitle := "%-29s" 98 | 99 | top5u += styles.TextYellowOnBlue.Render(fmt.Sprintf(fmtTitle, "Top 5 User")) 100 | top5u += "\n" 101 | for _, v := range jt.Breakdowns.Top5user { 102 | top5u += fmt.Sprintf(fmtStr, v.Name, v.Count) 103 | } 104 | 105 | top5a += styles.TextYellowOnBlue.Render(fmt.Sprintf(fmtTitle, "Top 5 Accounts")) 106 | top5a += "\n" 107 | for _, v := range jt.Breakdowns.Top5acc { 108 | top5a += fmt.Sprintf(fmtStr, v.Name, v.Count) 109 | } 110 | 111 | jpp += styles.TextYellowOnBlue.Render(fmt.Sprintf(fmtTitle, "Jobs per Partition")) 112 | jpp += "\n" 113 | for _, v := range jt.Breakdowns.JobPerPart { 114 | jpp += fmt.Sprintf(fmtStr, v.Name, v.Count) 115 | } 116 | 117 | jpq += styles.TextYellowOnBlue.Render(fmt.Sprintf(fmtTitle, "Jobs per QoS")) 118 | jpq += "\n" 119 | for _, v := range jt.Breakdowns.JobPerQos { 120 | jpq += fmt.Sprintf(fmtStr, v.Name, v.Count) 121 | } 122 | 123 | top5u = styles.CountsBox.Render(top5u) 124 | top5a = styles.CountsBox.Render(top5a) 125 | jpq = styles.CountsBox.Render(jpq) 126 | jpp = styles.CountsBox.Render(jpp) 127 | 128 | ret = lipgloss.JoinHorizontal(lipgloss.Top, top5u, top5a, jpp, jpq) 129 | 130 | return ret 131 | } 132 | 133 | func (jt *JobTab) JobTabStats(l *log.Logger) string { 134 | 135 | l.Printf("JobTabStats called\n") 136 | 137 | str := styles.StatsSeparatorTitle.Render(fmt.Sprintf("%-30s", "Job states (filtered):")) 138 | str += "\n" 139 | 140 | str += generic.GenCountStrVert(jt.Stats.StateCnt, l) 141 | 142 | str += styles.StatsSeparatorTitle.Render(fmt.Sprintf("%-30s", "Pending jobs:")) 143 | str += "\n" 144 | str += fmt.Sprintf("%-10s : %s\n", " ", "dd-hh:mm:ss") 145 | str += fmt.Sprintf("%-10s : %s\n", "MinWait", generic.HumanizeDuration(jt.Stats.MinWait, l)) 146 | str += fmt.Sprintf("%-10s : %s\n", "AvgWait", generic.HumanizeDuration(jt.Stats.AvgWait, l)) 147 | str += fmt.Sprintf("%-10s : %s\n", "MedWait", generic.HumanizeDuration(jt.Stats.MedWait, l)) 148 | str += fmt.Sprintf("%-10s : %s\n", "MaxWait", generic.HumanizeDuration(jt.Stats.MaxWait, l)) 149 | 150 | str += "\n" 151 | str += styles.StatsSeparatorTitle.Render(fmt.Sprintf("%-30s", "Running jobs:")) 152 | str += "\n" 153 | str += fmt.Sprintf("%-10s : %s\n", " ", "dd-hh:mm:ss") 154 | str += fmt.Sprintf("%-10s : %s\n", "MinRun", generic.HumanizeDuration(jt.Stats.MinRun, l)) 155 | str += fmt.Sprintf("%-10s : %s\n", "AvgRun", generic.HumanizeDuration(jt.Stats.AvgRun, l)) 156 | str += fmt.Sprintf("%-10s : %s\n", "MedRun", generic.HumanizeDuration(jt.Stats.MedRun, l)) 157 | str += fmt.Sprintf("%-10s : %s\n", "MaxRun", generic.HumanizeDuration(jt.Stats.MaxRun, l)) 158 | 159 | return str 160 | } 161 | 162 | func (jt *JobTab) View(l *log.Logger) string { 163 | 164 | var ( 165 | Header strings.Builder 166 | MainWindow strings.Builder 167 | ) 168 | 169 | l.Printf("IN JobTab.View()") 170 | 171 | // Header 172 | //Header.WriteString("\n") 173 | Header.WriteString(fmt.Sprintf("Filter: %10.30s\tItems: %d\n", jt.Filter.Value(), len(jt.SqueueFiltered.Jobs))) 174 | Header.WriteString("\n") 175 | 176 | // Mid Main: table || table+stats || table+menu 177 | 178 | // Case Info OFF 179 | if !jt.InfoOn { 180 | // Table always here 181 | MainWindow.WriteString(jt.tabJobs()) 182 | 183 | // Below table stich Filter || Counts 184 | switch { 185 | case jt.FilterOn: 186 | // filter 187 | MainWindow.WriteString("\n") 188 | MainWindow.WriteString("Filter value (Search in joined: JobID + JobName + Account + UserName + JobState):\n") 189 | MainWindow.WriteString(fmt.Sprintf("%s\n", jt.Filter.View())) 190 | MainWindow.WriteString("(Enter to apply, Esc to clear filter and abort, Regular expressions supported, syntax details: https://golang.org/s/re2syntax)\n") 191 | case jt.CountsOn: 192 | // Counts on 193 | MainWindow.WriteString("\n") 194 | MainWindow.WriteString(styles.JobInfoBox.Render(jt.getJobCounts())) 195 | } 196 | 197 | // Then join all that Horizontally with Menu || Stats 198 | switch { 199 | case jt.MenuOn: 200 | X := MainWindow.String() 201 | MainWindow.Reset() 202 | MainWindow.WriteString(lipgloss.JoinHorizontal(lipgloss.Top, X, styles.MenuBoxStyle.Render(jt.Menu.View()))) 203 | l.Printf("\nITEMS LIST: %#v\n", jt.Menu.Items()) 204 | case jt.StatsOn: 205 | X := MainWindow.String() 206 | MainWindow.Reset() 207 | MainWindow.WriteString(lipgloss.JoinHorizontal(lipgloss.Top, X, styles.MenuBoxStyle.Render(jt.JobTabStats(l)))) 208 | } 209 | } else { 210 | // Case Info ON 211 | 212 | // First join Horizontally Table with Menu || Stats 213 | switch { 214 | case jt.MenuOn: 215 | // table + menu 216 | MainWindow.WriteString(lipgloss.JoinHorizontal(lipgloss.Top, jt.tabJobs(), styles.MenuBoxStyle.Render(jt.Menu.View()))) 217 | l.Printf("\nITEMS LIST: %#v\n", jt.Menu.Items()) 218 | case jt.StatsOn: 219 | // table + stats 220 | MainWindow.WriteString(lipgloss.JoinHorizontal(lipgloss.Top, jt.tabJobs(), styles.MenuBoxStyle.Render(jt.JobTabStats(l)))) 221 | default: 222 | // table 223 | MainWindow.WriteString(jt.tabJobs()) 224 | } 225 | 226 | // Then stich the Filter || Info || Counts below 227 | switch { 228 | case jt.FilterOn: 229 | // filter 230 | MainWindow.WriteString("\n") 231 | MainWindow.WriteString("Filter value (Search in joined: JobID + JobName + Account + UserName + JobState):\n") 232 | MainWindow.WriteString(fmt.Sprintf("%s\n", jt.Filter.View())) 233 | MainWindow.WriteString("(Enter to apply, Esc to clear filter and abort, Regular expressions supported, syntax details: https://golang.org/s/re2syntax)\n") 234 | case jt.InfoOn: 235 | // info 236 | MainWindow.WriteString("\n") 237 | MainWindow.WriteString(styles.JobInfoBox.Render(jt.getJobInfo(l))) 238 | case jt.CountsOn: 239 | // Counts on 240 | MainWindow.WriteString("\n") 241 | MainWindow.WriteString(styles.JobInfoBox.Render(jt.getJobCounts())) 242 | } 243 | } 244 | 245 | return Header.String() + MainWindow.String() 246 | } 247 | -------------------------------------------------------------------------------- /internal/model/update.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "strconv" 7 | "strings" 8 | 9 | "github.com/CLIP-HPC/SlurmCommander/internal/command" 10 | "github.com/CLIP-HPC/SlurmCommander/internal/keybindings" 11 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/clustertab" 12 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/jobfromtemplate" 13 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/jobhisttab" 14 | "github.com/CLIP-HPC/SlurmCommander/internal/model/tabs/jobtab" 15 | "github.com/CLIP-HPC/SlurmCommander/internal/slurm" 16 | "github.com/CLIP-HPC/SlurmCommander/internal/generic" 17 | "github.com/CLIP-HPC/SlurmCommander/internal/styles" 18 | "github.com/CLIP-HPC/SlurmCommander/internal/table" 19 | "github.com/charmbracelet/bubbles/key" 20 | "github.com/charmbracelet/bubbles/textinput" 21 | "github.com/charmbracelet/bubbles/textarea" 22 | tea "github.com/charmbracelet/bubbletea" 23 | ) 24 | 25 | type errMsg error 26 | 27 | type activeTabType interface { 28 | AdjTableHeight(int, *log.Logger) 29 | } 30 | 31 | func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { 32 | 33 | var ( 34 | brk bool = false 35 | activeTab activeTabType 36 | activeTable *table.Model 37 | activeFilter *textinput.Model 38 | activeFilterOn *bool 39 | activeUserInputs *generic.UserInputs 40 | activeUserInputsOn *bool 41 | activeJDViewport bool 42 | ) 43 | 44 | // This shortens the testing for table movement keys 45 | switch m.ActiveTab { 46 | case tabJobs: 47 | activeTab = &m.JobTab 48 | activeTable = &m.JobTab.SqueueTable 49 | activeFilter = &m.JobTab.Filter 50 | activeFilterOn = &m.JobTab.FilterOn 51 | case tabJobHist: 52 | activeTab = &m.JobHistTab 53 | activeTable = &m.JobHistTab.SacctTable 54 | activeFilter = &m.JobHistTab.Filter 55 | activeFilterOn = &m.JobHistTab.FilterOn 56 | activeUserInputs = &m.JobHistTab.UserInputs 57 | activeUserInputsOn = &m.JobHistTab.UserInputsOn 58 | case tabJobDetails: 59 | // here we're in the special situation, we need to pass on keys to viewport 60 | activeJDViewport = true 61 | case tabJobFromTemplate: 62 | activeTable = &m.JobFromTemplateTab.TemplatesTable 63 | case tabCluster: 64 | activeTab = &m.ClusterTab 65 | activeTable = &m.ClusterTab.SinfoTable 66 | activeFilter = &m.ClusterTab.Filter 67 | activeFilterOn = &m.ClusterTab.FilterOn 68 | } 69 | 70 | // Filter is turned on, take care of this first 71 | // TODO: revisit this for filtering on multiple tabs 72 | switch { 73 | case activeJDViewport: 74 | // catch only up/down keys, leave the rest to fallthrough 75 | switch msg := msg.(type) { 76 | case tea.KeyMsg: 77 | switch { 78 | case key.Matches(msg, keybindings.DefaultKeyMap.Up), 79 | key.Matches(msg, keybindings.DefaultKeyMap.Down), 80 | key.Matches(msg, keybindings.DefaultKeyMap.PageUp), 81 | key.Matches(msg, keybindings.DefaultKeyMap.PageDown): 82 | m.Log.Printf("VIEWPORT: up/down msg\n") 83 | var cmd tea.Cmd 84 | m.JobDetailsTab.ViewPort, cmd = m.JobDetailsTab.ViewPort.Update(msg) 85 | return m, cmd 86 | } 87 | } 88 | 89 | case activeFilterOn != nil && *activeFilterOn: 90 | m.Log.Printf("Filter is ON") 91 | switch msg := msg.(type) { 92 | 93 | case tea.KeyMsg: 94 | switch msg.Type { 95 | // TODO: when filter is set/cleared, trigger refresh with new filtered data 96 | case tea.KeyEnter: 97 | // finish & apply entering filter 98 | *activeFilterOn = false 99 | brk = true 100 | 101 | case tea.KeyEsc: 102 | // abort entering filter 103 | *activeFilterOn = false 104 | activeFilter.SetValue("") 105 | brk = true 106 | } 107 | 108 | if brk { 109 | // TODO: this is a "fix" for crashing-after-filter when Cursor() goes beyond list end 110 | // TODO: don't feel good about this... what if list is empty? no good. revisit 111 | // NOTE: This doesn't do what i image it should, cursor remains -1 when table is empty situation? 112 | // Explanation in clamp function: https://github.com/charmbracelet/bubbles/blob/13f52d678d315676568a656b5211b8a24a54a885/table/table.go#L296 113 | activeTable.SetCursor(0) 114 | activeTab.AdjTableHeight(m.winH, m.Log) 115 | //m.Log.Printf("ActiveTable = %v\n", activeTable) 116 | m.Log.Printf("Update: Filter set, setcursor(0), activetable.Cursor==%d\n", activeTable.Cursor()) 117 | switch m.ActiveTab { 118 | case tabJobs: 119 | rows, sqf, err := m.JobTab.Squeue.FilterSqueueTable(m.JobTab.Filter.Value(), m.Log) 120 | if err != nil { 121 | m.Globals.ErrorHelp = err.ErrHelp 122 | m.Globals.ErrorMsg = err.OrigErr 123 | m.JobTab.Filter.SetValue("") 124 | } else { 125 | m.JobTab.SqueueTable.SetRows(*rows) 126 | m.JobTab.SqueueFiltered = *sqf 127 | m.JobTab.GetStatsFiltered(m.Log) 128 | } 129 | return m, nil 130 | 131 | case tabJobHist: 132 | rows, saf, err := m.JobHistTab.SacctHist.FilterSacctTable(m.JobHistTab.Filter.Value(), m.Log) 133 | if err != nil { 134 | m.Globals.ErrorHelp = err.ErrHelp 135 | m.Globals.ErrorMsg = err.OrigErr 136 | m.JobHistTab.Filter.SetValue("") 137 | } else { 138 | m.JobHistTab.SacctTable.SetRows(*rows) 139 | m.JobHistTab.SacctHistFiltered = *saf 140 | m.JobHistTab.GetStatsFiltered(m.Log) 141 | } 142 | return m, nil 143 | 144 | case tabCluster: 145 | m.ClusterTab.GetStatsFiltered(m.Log) 146 | return m, clustertab.QuickGetSinfo(m.Log) 147 | 148 | default: 149 | return m, nil 150 | } 151 | } 152 | } 153 | 154 | tmp, cmd := activeFilter.Update(msg) 155 | *activeFilter = tmp 156 | return m, cmd 157 | 158 | case activeUserInputsOn != nil && *activeUserInputsOn: 159 | m.Log.Printf("UserInputs is ON") 160 | switch msg := msg.(type) { 161 | 162 | case tea.KeyMsg: 163 | switch msg.Type { 164 | // TODO: when filter is set/cleared, trigger refresh with new filtered data 165 | case tea.KeyEnter: 166 | // finish & apply entering filter 167 | *activeUserInputsOn = false 168 | brk = true 169 | 170 | case tea.KeyEsc: 171 | // abort entering filter 172 | *activeUserInputsOn = false 173 | brk = true 174 | 175 | case tea.KeyUp, tea.KeyDown, tea.KeyTab: 176 | s := msg.String() 177 | m.JobHistTab.UserInputs.Params[m.JobHistTab.UserInputs.FocusIndex].Blur() 178 | 179 | if s == "up" { 180 | m.JobHistTab.UserInputs.FocusIndex-- 181 | } else { 182 | m.JobHistTab.UserInputs.FocusIndex++ 183 | } 184 | 185 | if m.JobHistTab.UserInputs.FocusIndex >= len(m.JobHistTab.UserInputs.Params) { 186 | m.JobHistTab.UserInputs.FocusIndex = 0 187 | } else if m.JobHistTab.UserInputs.FocusIndex < 0 { 188 | m.JobHistTab.UserInputs.FocusIndex = len(m.JobHistTab.UserInputs.Params)-1 189 | } 190 | 191 | m.JobHistTab.UserInputs.Params[m.JobHistTab.UserInputs.FocusIndex].Focus() 192 | } 193 | 194 | if brk { 195 | // TODO: this is a "fix" for crashing-after-filter when Cursor() goes beyond list end 196 | // TODO: don't feel good about this... what if list is empty? no good. revisit 197 | // NOTE: This doesn't do what i image it should, cursor remains -1 when table is empty situation? 198 | // Explanation in clamp function: https://github.com/charmbracelet/bubbles/blob/13f52d678d315676568a656b5211b8a24a54a885/table/table.go#L296 199 | activeTable.SetCursor(0) 200 | activeTab.AdjTableHeight(m.winH, m.Log) 201 | m.Log.Printf("Update: Param set, setcursor(0), activetable.Cursor==%d\n", activeTable.Cursor()) 202 | switch m.ActiveTab { 203 | case tabJobHist: 204 | chngd := false 205 | t, _ := strconv.ParseUint(m.JobHistTab.UserInputs.Params[0].Value(), 10, 32) 206 | 207 | if m.JobHistTab.UserInputs.Params[1].Value() != m.JobHistTab.JobHistStart { 208 | m.JobHistTab.JobHistStart = m.JobHistTab.UserInputs.Params[1].Value() 209 | chngd = true 210 | } 211 | if m.JobHistTab.UserInputs.Params[2].Value() != m.JobHistTab.JobHistEnd { 212 | m.JobHistTab.JobHistEnd = m.JobHistTab.UserInputs.Params[2].Value() 213 | chngd = true 214 | } 215 | if uint(t) != m.JobHistTab.JobHistTimeout { 216 | m.JobHistTab.JobHistTimeout = uint(t) 217 | chngd = true 218 | } 219 | 220 | if chngd { 221 | m.Log.Println ("Refreshing JobHist View") 222 | m.JobHistTab.HistFetched = false 223 | return m, jobhisttab.GetSacctHist(strings.Join(m.Globals.UAccounts, ","), 224 | m.JobHistTab.JobHistStart, 225 | m.JobHistTab.JobHistEnd, 226 | m.JobHistTab.JobHistTimeout, 227 | m.Log) 228 | } 229 | 230 | default: 231 | return m, nil 232 | } 233 | } 234 | } 235 | 236 | var tmp textinput.Model 237 | var cmd tea.Cmd 238 | for i := range m.JobHistTab.UserInputs.Params { 239 | tmp, cmd = activeUserInputs.Params[i].Update(msg) 240 | *&activeUserInputs.Params[i] = tmp 241 | } 242 | return m, cmd 243 | 244 | case m.JobTab.MenuOn: 245 | m.Log.Printf("Update: In Menu\n") 246 | switch msg := msg.(type) { 247 | case tea.WindowSizeMsg: 248 | m.JobTab.Menu.SetWidth(msg.Width) 249 | return m, nil 250 | 251 | case tea.KeyMsg: 252 | switch keypress := msg.String(); keypress { 253 | case "esc": 254 | m.JobTab.MenuOn = false 255 | return m, nil 256 | case "ctrl+c": 257 | //m.quitting = true 258 | m.JobTab.MenuOn = false 259 | //return m, tea.Quit 260 | return m, nil 261 | 262 | case "enter": 263 | m.JobTab.MenuOn = false 264 | i, ok := m.JobTab.Menu.SelectedItem().(jobtab.MenuItem) 265 | if ok { 266 | m.JobTab.MenuChoice = jobtab.MenuItem(i) 267 | if m.JobTab.MenuChoice.GetAction() == "INFO" { 268 | // TODO: IF Stats==ON AND NxM, turn it of, can't have both on below NxM 269 | m.JobTab.InfoOn = true 270 | if m.JobTab.StatsOn && m.Globals.winH < 60 { 271 | m.Log.Printf("Toggle InfoBox: Height %d too low (<60). Turn OFF Stats\n", m.Globals.winH) 272 | // We have to turn off stats otherwise screen will break at this Height! 273 | m.JobTab.StatsOn = false 274 | // TODO: send a message via ErrMsg 275 | } 276 | } 277 | // host is needed for ssh command 278 | activeTab.AdjTableHeight(m.winH, m.Log) 279 | host := m.JobTab.SqueueFiltered.Jobs[m.JobTab.SqueueTable.Cursor()].BatchHost 280 | retCmd := m.JobTab.MenuChoice.ExecMenuItem(m.JobTab.SelectedJob, *host, m.Log) 281 | return m, retCmd 282 | } 283 | //return m, tea.Quit 284 | return m, nil 285 | } 286 | } 287 | 288 | var cmd tea.Cmd 289 | m.JobTab.Menu, cmd = m.JobTab.Menu.Update(msg) 290 | return m, cmd 291 | 292 | case m.EditTemplate: 293 | // TODO: move this code to a function/method 294 | var cmds []tea.Cmd 295 | var cmd tea.Cmd 296 | 297 | m.Log.Printf("Update: In EditTemplate: %#v\n", msg) 298 | switch msg := msg.(type) { 299 | case tea.KeyMsg: 300 | m.Log.Printf("Update: m.EditTemplate case tea.KeyMsg\n") 301 | switch msg.Type { 302 | case tea.KeyEsc: 303 | m.EditTemplate = false 304 | jobfromtemplate.EditorKeyMap.DisableKeys() 305 | tabKeys[m.ActiveTab].SetupKeys() 306 | //if m.TemplateEditor.Focused() { 307 | // m.TemplateEditor.Blur() 308 | //} else { 309 | // m.EditTemplate = false 310 | //} 311 | 312 | case tea.KeyCtrlS: 313 | // TODO: 314 | // 1. Exit editor 315 | // 2. Save content to file 316 | // 3. Notify user about generated filename from 2. 317 | // 4. Submit job 318 | m.Log.Printf("EditTemplate: Ctrl+s pressed\n") 319 | m.EditTemplate = false 320 | jobfromtemplate.EditorKeyMap.DisableKeys() 321 | tabKeys[m.ActiveTab].SetupKeys() 322 | name, err := jobfromtemplate.SaveToFile(m.JobFromTemplateTab.TemplatesTable.SelectedRow()[0], m.JobFromTemplateTab.TemplateEditor.Value(), m.Log) 323 | if err != nil { 324 | m.Log.Printf("ERROR saving to file!\n") 325 | return m, nil 326 | } 327 | return m, command.CallSbatch(name, m.Log) 328 | 329 | case tea.KeyCtrlC: 330 | return m, tea.Quit 331 | 332 | default: 333 | if !m.TemplateEditor.Focused() { 334 | cmd = m.TemplateEditor.Focus() 335 | cmds = append(cmds, cmd) 336 | } 337 | } 338 | 339 | // We handle errors just like any other message 340 | case errMsg: 341 | //m.err = msg 342 | return m, nil 343 | } 344 | 345 | m.TemplateEditor, cmd = m.TemplateEditor.Update(msg) 346 | cmds = append(cmds, cmd) 347 | return m, tea.Batch(cmds...) 348 | 349 | } 350 | 351 | switch msg := msg.(type) { 352 | 353 | // TODO: https://pkg.go.dev/github.com/charmbracelet/bubbletea#WindowSizeMsg 354 | // ToDo: 355 | // prevent updates for non-selected tabs 356 | 357 | // ERROR msg 358 | case command.ErrorMsg: 359 | m.Log.Printf("ERROR msg, from: %s\n", msg.From) 360 | m.Log.Printf("ERROR msg, original error: %q\n", msg.OrigErr) 361 | m.Globals.ErrorMsg = msg.OrigErr 362 | m.Globals.ErrorHelp = msg.ErrHelp 363 | // cases when this is BAD and we can't continue 364 | switch msg.From { 365 | case "GetUserName", "GetUserAssoc": 366 | return m, tea.Quit 367 | } 368 | return m, nil 369 | 370 | // Ssh finished 371 | case command.SshCompleted: 372 | m.Log.Printf("Got SshCompleted msg, value: %#v\n", msg) 373 | return m, nil 374 | 375 | // UAccounts fetched 376 | case command.UserAssoc: 377 | m.Log.Printf("Got UserAssoc msg, value: %#v\n", msg) 378 | // TODO: consider changing this to string and do a join(",") to be ready to pass around 379 | m.Globals.UAccounts = append(m.Globals.UAccounts, msg...) 380 | m.Log.Printf("Appended UserAssoc msg go Globals, value now: %#v\n", m.Globals.UAccounts) 381 | // Now we trigger a sacctHist 382 | return m, jobhisttab.GetSacctHist(strings.Join(m.Globals.UAccounts, ","), 383 | m.JobHistTab.JobHistStart, 384 | m.JobHistTab.JobHistEnd, 385 | m.JobHistTab.JobHistTimeout, 386 | m.Log) 387 | 388 | // UserName fetched 389 | case command.UserName: 390 | m.Log.Printf("Got UserNAme msg, save %q to Globals.\n", msg) 391 | m.Globals.UserName = string(msg) 392 | // now, call GetUserAssoc() 393 | return m, command.GetUserAssoc(m.Globals.UserName, m.Log) 394 | 395 | // Shold executed 396 | case command.SBatchSent: 397 | m.Log.Printf("Got SBatchSent msg on file %q\n", msg.JobFile) 398 | return m, nil 399 | 400 | // Shold executed 401 | case command.SHoldSent: 402 | m.Log.Printf("Got SHoldSent msg on job %q\n", msg.Jobid) 403 | return m, jobtab.TimedGetSqueue(m.Log) 404 | 405 | // Scancel executed 406 | case command.ScancelSent: 407 | m.Log.Printf("Got ScancelSent msg on job %q\n", msg.Jobid) 408 | return m, jobtab.TimedGetSqueue(m.Log) 409 | 410 | // Srequeue executed 411 | case command.SRequeueSent: 412 | m.Log.Printf("Got SRequeueSent msg on job %q\n", msg.Jobid) 413 | return m, jobtab.TimedGetSqueue(m.Log) 414 | 415 | // Get initial job template list 416 | case jobfromtemplate.TemplatesListRows: 417 | m.Log.Printf("Update: Got TemplatesListRows msg: %#v\n", msg) 418 | if msg != nil { 419 | // if it's not empty, append to table 420 | m.JobFromTemplateTab.TemplatesTable.SetRows(msg) 421 | m.JobFromTemplateTab.TemplatesList = msg 422 | } 423 | return m, nil 424 | 425 | // getting initial template text 426 | case jobfromtemplate.TemplateText: 427 | m.Log.Printf("Update: Got TemplateText msg: %#v\n", msg) 428 | // HERE: we initialize the new textarea editor and flip the EditTemplate switch to ON 429 | tabKeys[m.ActiveTab].DisableKeys() 430 | jobfromtemplate.EditorKeyMap.SetupKeys() 431 | m.EditTemplate = true 432 | m.TemplateEditor = textarea.New() 433 | m.TemplateEditor.SetWidth(m.winW - 15) 434 | m.TemplateEditor.SetHeight(m.winH - 15) 435 | m.TemplateEditor.SetValue(string(msg)) 436 | m.TemplateEditor.Focus() 437 | m.TemplateEditor.CharLimit = 0 438 | return m, jobfromtemplate.EditorOn() 439 | 440 | // Windows resize 441 | case tea.WindowSizeMsg: 442 | m.Log.Printf("Update: got WindowSizeMsg: %d %d\n", msg.Width, msg.Height) 443 | // TODO: if W<195 || H<60 we can't really run without breaking view, so quit and inform user 444 | // 187x44 == 13" MacBook Font 14 iTerm (HUGE letters!) 445 | if msg.Height < 43 || msg.Width < 185 { 446 | m.Log.Printf("FATAL: Window too small to run without breaking view. Have %dx%d. Need at least 185x43.\n", msg.Width, msg.Height) 447 | m.Globals.SizeErr = fmt.Sprintf("FATAL: Window too small to run without breaking view. Have %dx%d. Need at least 185x43.\nIncrease your terminal window and/or decrease font size.", msg.Width, msg.Height) 448 | return m, tea.Quit 449 | } 450 | m.winW = msg.Width 451 | m.winH = msg.Height 452 | // TODO: set also maxheight/width here on change? 453 | styles.MainWindow = styles.MainWindow.Height(m.winH - 10) 454 | styles.MainWindow = styles.MainWindow.Width(m.winW - 15) 455 | styles.HelpWindow = styles.HelpWindow.Width(m.winW) 456 | styles.JobStepBoxStyle = styles.JobStepBoxStyle.Width(m.winW - 20) 457 | // InfoBox 458 | w := ((m.Globals.winW - 25) / 3) * 3 459 | styles.JobInfoInBox = styles.JobInfoInBox.Width(w / 3).Height(5) 460 | styles.JobInfoInBottomBox = styles.JobInfoInBottomBox.Width(w + 4).Height(5) 461 | 462 | // Adjust ALL tables 463 | m.JobTab.AdjTableHeight(m.winH, m.Log) 464 | m.JobHistTab.AdjTableHeight(m.winH, m.Log) 465 | m.ClusterTab.AdjTableHeight(m.winH, m.Log) 466 | 467 | // Fix jobdetails viewport 468 | m.JobDetailsTab.ViewPort.Width = m.winW - 15 469 | m.JobDetailsTab.ViewPort.Height = m.winH - 15 470 | 471 | // Adjust StatBoxes 472 | m.Log.Printf("CTB Width = %d\n", styles.ClusterTabStats.GetWidth()) 473 | styles.ClusterTabStats = styles.ClusterTabStats.Width(m.winW - clustertab.SinfoTabWidth) 474 | m.Log.Printf("CTB Width = %d\n", styles.ClusterTabStats.GetWidth()) 475 | 476 | // JobTab update 477 | case jobtab.SqueueJSON: 478 | m.Log.Printf("U(): got SqueueJSON\n") 479 | if len(msg.Jobs) != 0 { 480 | m.Squeue = msg 481 | 482 | // TODO: 483 | // fix: if after filtering m.table.Cursor|SelectedRow > lines in table, Info crashes trying to fetch nonexistent row 484 | rows, sqf, err := msg.FilterSqueueTable(m.JobTab.Filter.Value(), m.Log) 485 | if err != nil { 486 | m.Globals.ErrorHelp = err.ErrHelp 487 | m.Globals.ErrorMsg = err.OrigErr 488 | m.JobTab.Filter.SetValue("") 489 | } else { 490 | m.JobTab.SqueueTable.SetRows(*rows) 491 | m.JobTab.SqueueFiltered = *sqf 492 | m.JobTab.GetStatsFiltered(m.Log) 493 | } 494 | } 495 | m.UpdateCnt++ 496 | // if active window != this, don't trigger new refresh 497 | if m.ActiveTab == tabJobs { 498 | return m, jobtab.TimedGetSqueue(m.Log) 499 | } else { 500 | return m, nil 501 | } 502 | 503 | // Cluster tab update 504 | case clustertab.SinfoJSON: 505 | m.Log.Printf("U(): got SinfoJSON\n") 506 | if len(msg.Nodes) != 0 { 507 | m.Sinfo = msg 508 | rows, sif, err := msg.FilterSinfoTable(m.ClusterTab.Filter.Value(), m.Log) 509 | if err != nil { 510 | m.Globals.ErrorHelp = err.ErrHelp 511 | m.Globals.ErrorMsg = err.OrigErr 512 | m.ClusterTab.Filter.SetValue("") 513 | } else { 514 | m.ClusterTab.SinfoTable.SetRows(*rows) 515 | m.ClusterTab.SinfoFiltered = *sif 516 | m.ClusterTab.GetStatsFiltered(m.Log) 517 | } 518 | } 519 | m.UpdateCnt++ 520 | // if active window != this, don't trigger new refresh 521 | if m.ActiveTab == tabCluster { 522 | return m, clustertab.TimedGetSinfo(m.Log) 523 | } else { 524 | return m, nil 525 | } 526 | 527 | // Job Details tab update 528 | case slurm.SacctSingleJobHist: 529 | m.Log.Printf("Got SacctSingleJobHist\n") 530 | m.JobDetailsTab.SacctSingleJobHist = msg 531 | return m, nil 532 | 533 | // Job History tab update - NEW, with wrapped failure message 534 | case jobhisttab.JobHistTabMsg: 535 | m.Log.Printf("Got SacctJobHist len: %d\n", len(msg.Jobs)) 536 | m.JobHistTab.SacctHist = msg.SacctJSON 537 | m.JobHistTab.HistFetchFail = msg.HistFetchFail 538 | // Filter and create filtered table 539 | rows, saf, err := msg.FilterSacctTable(m.JobHistTab.Filter.Value(), m.Log) 540 | if err != nil { 541 | m.Globals.ErrorHelp = err.ErrHelp 542 | m.Globals.ErrorMsg = err.OrigErr 543 | m.JobHistTab.Filter.SetValue("") 544 | } else { 545 | m.JobHistTab.SacctTable.SetRows(*rows) 546 | m.JobHistTab.SacctHistFiltered = *saf 547 | m.JobHistTab.GetStatsFiltered(m.Log) 548 | } 549 | if !m.JobHistTab.HistFetchFail { 550 | m.JobHistTab.HistFetched = true 551 | } 552 | 553 | return m, nil 554 | 555 | // Keys pressed 556 | case tea.KeyMsg: 557 | switch { 558 | 559 | // Counters 560 | case key.Matches(msg, keybindings.DefaultKeyMap.Count): 561 | // Depends at which tab we're at 562 | m.Log.Printf("Toggle Counters pressed at %d\n", m.ActiveTab) 563 | switch m.ActiveTab { 564 | case tabJobs: 565 | m.JobTab.InfoOn = false 566 | toggleSwitch(&m.JobTab.CountsOn) 567 | case tabJobHist: 568 | toggleSwitch(&m.JobHistTab.CountsOn) 569 | case tabCluster: 570 | toggleSwitch(&m.ClusterTab.CountsOn) 571 | } 572 | activeTab.AdjTableHeight(m.winH, m.Log) 573 | return m, nil 574 | 575 | // UP 576 | // TODO: what if it's a list? 577 | case key.Matches(msg, keybindings.DefaultKeyMap.Up): 578 | activeTable.MoveUp(1) 579 | m.lastKey = "up" 580 | 581 | // DOWN 582 | case key.Matches(msg, keybindings.DefaultKeyMap.Down): 583 | activeTable.MoveDown(1) 584 | m.lastKey = "down" 585 | 586 | // PAGE DOWN 587 | case key.Matches(msg, keybindings.DefaultKeyMap.PageDown): 588 | activeTable.MoveDown(activeTable.Height()) 589 | m.lastKey = "pgdown" 590 | 591 | // PAGE UP 592 | case key.Matches(msg, keybindings.DefaultKeyMap.PageUp): 593 | activeTable.MoveUp(activeTable.Height()) 594 | m.lastKey = "pgup" 595 | 596 | // 1..6 Tab Selection keys 597 | case key.Matches(msg, keybindings.DefaultKeyMap.TtabSel): 598 | k, _ := strconv.Atoi(msg.String()) 599 | tabKeys[m.ActiveTab].DisableKeys() 600 | m.ActiveTab = uint(k) - 1 601 | tabKeys[m.ActiveTab].SetupKeys() 602 | m.lastKey = msg.String() 603 | 604 | // clear error states 605 | m.Globals.ErrorHelp = "" 606 | m.Globals.ErrorMsg = nil 607 | 608 | switch m.ActiveTab { 609 | case tabJobs: 610 | return m, jobtab.TimedGetSqueue(m.Log) 611 | case tabCluster: 612 | return m, clustertab.TimedGetSinfo(m.Log) 613 | default: 614 | return m, nil 615 | } 616 | 617 | // TAB 618 | case key.Matches(msg, keybindings.DefaultKeyMap.Tab): 619 | tabKeys[m.ActiveTab].DisableKeys() 620 | // switch tab 621 | m.ActiveTab = (m.ActiveTab + 1) % uint(len(tabs)) 622 | // setup keys 623 | tabKeys[m.ActiveTab].SetupKeys() 624 | m.lastKey = "tab" 625 | 626 | // clear error states 627 | m.Globals.ErrorHelp = "" 628 | m.Globals.ErrorMsg = nil 629 | 630 | switch m.ActiveTab { 631 | case tabJobs: 632 | return m, jobtab.TimedGetSqueue(m.Log) 633 | case tabCluster: 634 | return m, clustertab.TimedGetSinfo(m.Log) 635 | default: 636 | return m, nil 637 | } 638 | 639 | // Shift+TAB 640 | case key.Matches(msg, keybindings.DefaultKeyMap.ShiftTab): 641 | tabKeys[m.ActiveTab].DisableKeys() 642 | // switch tab 643 | if m.ActiveTab == 0 { 644 | m.ActiveTab = uint(len(tabs) - 1) 645 | } else { 646 | m.ActiveTab -= 1 647 | } 648 | // setup keys 649 | tabKeys[m.ActiveTab].SetupKeys() 650 | m.lastKey = "tab" 651 | 652 | // clear error states 653 | m.Globals.ErrorHelp = "" 654 | m.Globals.ErrorMsg = nil 655 | 656 | switch m.ActiveTab { 657 | case tabJobs: 658 | return m, jobtab.TimedGetSqueue(m.Log) 659 | case tabCluster: 660 | return m, clustertab.TimedGetSinfo(m.Log) 661 | default: 662 | return m, nil 663 | } 664 | 665 | // SLASH 666 | case key.Matches(msg, keybindings.DefaultKeyMap.Slash): 667 | m.Log.Printf("Filter key pressed\n") 668 | switch m.ActiveTab { 669 | case tabJobs: 670 | m.JobTab.FilterOn = true 671 | case tabJobHist: 672 | m.JobHistTab.FilterOn = true 673 | case tabCluster: 674 | m.ClusterTab.FilterOn = true 675 | } 676 | activeTab.AdjTableHeight(m.winH, m.Log) 677 | return m, nil 678 | 679 | // t 680 | case key.Matches(msg, keybindings.DefaultKeyMap.TimeRange): 681 | m.Log.Printf("time-range key pressed\n") 682 | switch m.ActiveTab { 683 | case tabJobHist: 684 | m.JobHistTab.UserInputsOn = true 685 | } 686 | activeTab.AdjTableHeight(m.winH, m.Log) 687 | return m, nil 688 | 689 | // ENTER 690 | case key.Matches(msg, keybindings.DefaultKeyMap.Enter): 691 | switch m.ActiveTab { 692 | 693 | // Job Queue tab: Open Job menu 694 | case tabJobs: 695 | // Check if there is anything in the filtered table and if cursor is on a valid item 696 | n := m.JobTab.SqueueTable.Cursor() 697 | m.Log.Printf("Update ENTER key @ jobqueue table\n") 698 | if n == -1 || len(m.JobTab.SqueueFiltered.Jobs) == 0 { 699 | m.Log.Printf("Update ENTER key @ jobqueue table, no jobs selected/empty table\n") 700 | return m, nil 701 | } 702 | // IF Info==ON AND NxM, turn it of, can't have both on below NxM 703 | if m.JobTab.InfoOn && m.Globals.winH < 60 { 704 | m.Log.Printf("Toggle MenuBox: Height %d too low (<60). Turn OFF Info\n", m.Globals.winH) 705 | m.JobTab.InfoOn = false 706 | } 707 | // If yes, turn on menu 708 | m.JobTab.MenuOn = true 709 | m.JobTab.SelectedJob = m.JobTab.SqueueTable.SelectedRow()[0] 710 | m.JobTab.SelectedJobState = m.JobTab.SqueueTable.SelectedRow()[4] 711 | // Create new menu 712 | m.JobTab.Menu = jobtab.NewMenu(m.JobTab.SelectedJobState, m.Log) 713 | return m, nil 714 | 715 | // Job History tab: Select Job from history and open its Details tab 716 | case tabJobHist: 717 | n := m.JobHistTab.SacctTable.Cursor() 718 | m.Log.Printf("Update ENTER key @ jobhist table, cursor=%d, len=%d\n", n, len(m.JobHistTab.SacctHistFiltered.Jobs)) 719 | if n == -1 || len(m.JobHistTab.SacctHistFiltered.Jobs) == 0 { 720 | m.Log.Printf("Update ENTER key @ jobhist table, no jobs selected/empty table\n") 721 | return m, nil 722 | } 723 | tabKeys[m.ActiveTab].DisableKeys() 724 | m.ActiveTab = tabJobDetails 725 | tabKeys[m.ActiveTab].SetupKeys() 726 | m.JobDetailsTab.SelJobIDNew = n 727 | // clear error states 728 | m.Globals.ErrorHelp = "" 729 | m.Globals.ErrorMsg = nil 730 | 731 | // new job selected, fill out viewport 732 | m.JobDetailsTab.SetViewportContent(&m.JobHistTab, m.Log) 733 | return m, nil 734 | 735 | // Job from Template tab: Open template for editing 736 | case tabJobFromTemplate: 737 | m.Log.Printf("Update ENTER key @ jobfromtemplate table\n") 738 | // return & handle editing there 739 | if len(m.JobFromTemplateTab.TemplatesList) != 0 { 740 | return m, jobfromtemplate.GetTemplate(m.JobFromTemplateTab.TemplatesTable.SelectedRow()[2], m.Log) 741 | } else { 742 | return m, nil 743 | } 744 | } 745 | 746 | // Refresh the View 747 | case key.Matches(msg, keybindings.DefaultKeyMap.Refresh): 748 | switch m.ActiveTab { 749 | case tabJobHist: 750 | m.Log.Println ("Refreshing JobHist View") 751 | m.JobHistTab.HistFetched = false 752 | return m, jobhisttab.GetSacctHist(strings.Join(m.Globals.UAccounts, ","), 753 | m.JobHistTab.JobHistStart, 754 | m.JobHistTab.JobHistEnd, 755 | m.JobHistTab.JobHistTimeout, 756 | m.Log) 757 | } 758 | return m, nil 759 | 760 | // Info - toggle on/off 761 | case key.Matches(msg, keybindings.DefaultKeyMap.Info): 762 | m.Log.Println("Toggle InfoBox") 763 | 764 | // TODO: IF Stats==ON AND NxM, turn it of, can't have both on below NxM 765 | if m.JobTab.StatsOn && m.Globals.winH < 60 { 766 | m.Log.Printf("Toggle InfoBox: Height %d too low (<60). Turn OFF Stats\n", m.Globals.winH) 767 | // We have to turn off stats otherwise screen will break at this Height! 768 | m.JobTab.StatsOn = false 769 | // TODO: send a message via ErrMsg 770 | } 771 | 772 | m.JobTab.CountsOn = false 773 | toggleSwitch(&m.JobTab.InfoOn) 774 | m.JobTab.AdjTableHeight(m.Globals.winH, m.Log) 775 | return m, nil 776 | 777 | // Stats - toggle on/off 778 | case key.Matches(msg, keybindings.DefaultKeyMap.Stats): 779 | switch m.ActiveTab { 780 | case tabJobs: 781 | m.Log.Printf("JobTab toggle from: %v\n", m.JobTab.StatsOn) 782 | toggleSwitch(&m.JobTab.StatsOn) 783 | // IF Info==ON AND NxM, turn it of, can't have both on below NxM 784 | if m.JobTab.InfoOn && m.Globals.winH < 60 { 785 | m.Log.Printf("Toggle StatsBox: Height %d too low (<60). Turn OFF Info\n", m.Globals.winH) 786 | m.JobTab.InfoOn = false 787 | } 788 | m.Log.Printf("JobTab toggle to: %v\n", m.JobTab.StatsOn) 789 | case tabJobHist: 790 | m.Log.Printf("JobHistTab toggle from: %v\n", m.JobHistTab.StatsOn) 791 | toggleSwitch(&m.JobHistTab.StatsOn) 792 | m.Log.Printf("JobHistTab toggle to: %v\n", m.JobHistTab.StatsOn) 793 | case tabCluster: 794 | m.Log.Printf("JobCluster toggle from: %v\n", m.ClusterTab.StatsOn) 795 | toggleSwitch(&m.ClusterTab.StatsOn) 796 | m.Log.Printf("JobCluster toggle to: %v\n", m.ClusterTab.StatsOn) 797 | } 798 | return m, nil 799 | 800 | // QUIT 801 | case key.Matches(msg, keybindings.DefaultKeyMap.Quit): 802 | m.Log.Printf("Quit key pressed\n") 803 | return m, tea.Quit 804 | } 805 | } 806 | 807 | return m, nil 808 | } 809 | 810 | func toggleSwitch(b *bool) { 811 | if *b { 812 | *b = false 813 | } else { 814 | *b = true 815 | } 816 | } 817 | -------------------------------------------------------------------------------- /internal/model/view.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/CLIP-HPC/SlurmCommander/internal/keybindings" 8 | "github.com/CLIP-HPC/SlurmCommander/internal/styles" 9 | "github.com/CLIP-HPC/SlurmCommander/internal/version" 10 | "github.com/charmbracelet/lipgloss" 11 | ) 12 | 13 | // genTabs() generates top tabs 14 | func (m Model) genTabs() string { 15 | 16 | var doc strings.Builder 17 | 18 | tlist := make([]string, len(tabs)) 19 | for i, v := range tabs { 20 | if i == int(m.ActiveTab) { 21 | tlist = append(tlist, styles.TabActiveTab.Render(v)) 22 | } else { 23 | tlist = append(tlist, styles.Tab.Render(v)) 24 | } 25 | } 26 | row := lipgloss.JoinHorizontal(lipgloss.Top, tlist...) 27 | 28 | //gap := tabGap.Render(strings.Repeat(" ", max(0, width-lipgloss.Width(row)-2))) 29 | gap := styles.TabGap.Render(strings.Repeat(" ", max(0, m.winW-lipgloss.Width(row)-2))) 30 | row = lipgloss.JoinHorizontal(lipgloss.Bottom, row, gap) 31 | doc.WriteString(row + "\n") 32 | 33 | return doc.String() 34 | } 35 | 36 | func max(a, b int) int { 37 | if a > b { 38 | return a 39 | } 40 | return b 41 | } 42 | 43 | func (m Model) tabAbout() string { 44 | 45 | s := "Version: " + version.BuildVersion + "\n" 46 | s += "Commit : " + version.BuildCommit + "\n" 47 | 48 | s += ` 49 | 50 | A special thank you goes to our code-crafters, bug-hunters, idea-pitchers: 51 | (in order of appearance) 52 | 53 | Petar Jager 54 | Seren Ümit 55 | Kilian Cavalotti 56 | Killian Murphy 57 | Hans-Nikolai Vießmann 58 | github.com/reedacus25 59 | ` 60 | 61 | return s 62 | } 63 | 64 | func (m *Model) genTabHelp() string { 65 | var th string 66 | switch m.ActiveTab { 67 | case tabJobs: 68 | th = "List of jobs in the queue" 69 | case tabJobHist: 70 | th = "List of jobs from all user associated accounts" 71 | case tabJobDetails: 72 | th = "Job details, select a job from Job History tab" 73 | case tabJobFromTemplate: 74 | th = "Edit and submit one of the job templates" 75 | case tabCluster: 76 | th = "List and status of cluster nodes" 77 | default: 78 | th = "SlurmCommander" 79 | } 80 | return th + "\n" 81 | } 82 | 83 | func (m Model) View() string { 84 | 85 | var ( 86 | header strings.Builder 87 | MainWindow strings.Builder 88 | ) 89 | 90 | // HEADER / TABS 91 | header.WriteString(m.genTabs()) 92 | header.WriteString(m.genTabHelp()) 93 | 94 | if m.Debug { 95 | // One debug line 96 | header.WriteString(fmt.Sprintf("%s Width: %d Height: %d ErrorMsg: %s\n", styles.TextRed.Render("DEBUG ON:"), m.Globals.winW, m.Globals.winH, m.Globals.ErrorMsg)) 97 | } 98 | 99 | if m.Globals.ErrorHelp != "" { 100 | m.Log.Println("Got error") 101 | header.WriteString(styles.ErrorHelp.Render(fmt.Sprintf("ERROR: %s", m.Globals.ErrorHelp))) 102 | } else { 103 | m.Log.Println("Got NO error") 104 | } 105 | 106 | // PICK and RENDER ACTIVE TAB 107 | switch m.ActiveTab { 108 | case tabJobs: 109 | m.Log.Printf("CALL JobTab.View()\n") 110 | MainWindow.WriteString(m.JobTab.View(m.Log)) 111 | 112 | case tabJobHist: 113 | m.Log.Printf("CALL JobHistTab.View()\n") 114 | MainWindow.WriteString(m.JobHistTab.View(m.Log)) 115 | 116 | case tabJobDetails: 117 | m.Log.Printf("CALL JobDetailsTab.View()\n") 118 | MainWindow.WriteString(m.JobDetailsTab.View(&m.JobHistTab, m.Log)) 119 | 120 | case tabJobFromTemplate: 121 | m.Log.Printf("CALL JobFromTemplate.View()\n") 122 | MainWindow.WriteString(m.JobFromTemplateTab.View(m.Log)) 123 | 124 | case tabCluster: 125 | m.Log.Printf("CALL ClusterTab.View()\n") 126 | MainWindow.WriteString(m.ClusterTab.View(m.Log)) 127 | 128 | case tabAbout: 129 | MainWindow.WriteString(m.tabAbout()) 130 | // TODO: default 131 | } 132 | 133 | return lipgloss.JoinVertical(lipgloss.Left, header.String(), styles.MainWindow.Render(MainWindow.String()), styles.HelpWindow.Render(m.Help.View(keybindings.DefaultKeyMap))) 134 | } 135 | -------------------------------------------------------------------------------- /internal/openapi/openapi.go: -------------------------------------------------------------------------------- 1 | package openapi 2 | 3 | // TODO: come up with a workflow to maintain and (auto)-import currated list of 4 | // openapi_version.json files and/or openapi.gen.go for different slurm versions 5 | // 6 | 7 | //go:generate oapi-codegen --old-config-style --package=openapi --generate=types -alias-types -o ./openapi.gen.go ./openapi_0.0.39_master.json 8 | -------------------------------------------------------------------------------- /internal/openapidb/openapi_db.gen.go: -------------------------------------------------------------------------------- 1 | // Package openapidb provides primitives to interact with the openapi HTTP API. 2 | // 3 | // Code generated by github.com/deepmap/oapi-codegen version v1.11.0 DO NOT EDIT. 4 | package openapidb 5 | 6 | const ( 7 | TokenScopes = "token.Scopes" 8 | UserScopes = "user.Scopes" 9 | ) 10 | 11 | // Account description 12 | type Dbv0037Account struct { 13 | // List of assigned associations 14 | Associations *[]Dbv0037AssociationShortInfo `json:"associations,omitempty"` 15 | 16 | // List of assigned coordinators 17 | Coordinators *[]Dbv0037CoordinatorInfo `json:"coordinators,omitempty"` 18 | 19 | // Description of account 20 | Description *string `json:"description,omitempty"` 21 | 22 | // List of properties of account 23 | Flags *[]string `json:"flags,omitempty"` 24 | 25 | // Name of account 26 | Name *string `json:"name,omitempty"` 27 | 28 | // Assigned organization of account 29 | Organization *string `json:"organization,omitempty"` 30 | } 31 | 32 | // Dbv0037AccountInfo defines model for dbv0.0.37_account_info. 33 | type Dbv0037AccountInfo struct { 34 | // List of accounts 35 | Accounts *[]Dbv0037Account `json:"accounts,omitempty"` 36 | 37 | // Slurm errors 38 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 39 | } 40 | 41 | // Dbv0037AccountResponse defines model for dbv0.0.37_account_response. 42 | type Dbv0037AccountResponse struct { 43 | // Slurm errors 44 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 45 | } 46 | 47 | // Association description 48 | type Dbv0037Association struct { 49 | // Assigned account 50 | Account *string `json:"account,omitempty"` 51 | 52 | // Assigned cluster 53 | Cluster *string `json:"cluster,omitempty"` 54 | 55 | // Default settings 56 | Default *struct { 57 | // Default QOS 58 | Qos *string `json:"qos,omitempty"` 59 | } `json:"default,omitempty"` 60 | 61 | // List of properties of association 62 | Flags *[]string `json:"flags,omitempty"` 63 | 64 | // is default association 65 | IsDefault *int `json:"is_default,omitempty"` 66 | 67 | // Max settings 68 | Max *struct { 69 | // Max jobs settings 70 | Jobs *struct { 71 | // Max TRES for job accruing priority 72 | Accruing *int `json:"accruing,omitempty"` 73 | 74 | // Max TRES for active total jobs 75 | Active *int `json:"active,omitempty"` 76 | 77 | // Max jobs per settings 78 | Per *struct { 79 | // Max wallclock per job 80 | WallClock *int `json:"wall_clock,omitempty"` 81 | } `json:"per,omitempty"` 82 | 83 | // Max TRES for job total submitted 84 | Total *int `json:"total,omitempty"` 85 | } `json:"jobs,omitempty"` 86 | 87 | // Max per settings 88 | Per *struct { 89 | // Max per accounting settings 90 | Account *struct { 91 | // Max wallclock per account 92 | WallClock *int `json:"wall_clock,omitempty"` 93 | } `json:"account,omitempty"` 94 | } `json:"per,omitempty"` 95 | 96 | // Max TRES settings 97 | Tres *struct { 98 | // Max TRES per group 99 | Group *struct { 100 | // TRES list of attributes 101 | Active *Dbv0037TresList `json:"active,omitempty"` 102 | 103 | // TRES list of attributes 104 | Minutes *Dbv0037TresList `json:"minutes,omitempty"` 105 | } `json:"group,omitempty"` 106 | 107 | // Max TRES minutes settings 108 | Minutes *struct { 109 | // Max TRES minutes per settings 110 | Per *struct { 111 | // TRES list of attributes 112 | Job *Dbv0037TresList `json:"job,omitempty"` 113 | } `json:"per,omitempty"` 114 | 115 | // TRES list of attributes 116 | Total *Dbv0037TresList `json:"total,omitempty"` 117 | } `json:"minutes,omitempty"` 118 | 119 | // Max TRES per settings 120 | Per *struct { 121 | // TRES list of attributes 122 | Job *Dbv0037TresList `json:"job,omitempty"` 123 | 124 | // TRES list of attributes 125 | Node *Dbv0037TresList `json:"node,omitempty"` 126 | } `json:"per,omitempty"` 127 | 128 | // TRES list of attributes 129 | Total *Dbv0037TresList `json:"total,omitempty"` 130 | } `json:"tres,omitempty"` 131 | } `json:"max,omitempty"` 132 | 133 | // Min settings 134 | Min *struct { 135 | // Min priority threshold 136 | PriorityThreshold *int `json:"priority_threshold,omitempty"` 137 | } `json:"min,omitempty"` 138 | 139 | // Parent account name 140 | ParentAccount *string `json:"parent_account,omitempty"` 141 | 142 | // Assigned partition 143 | Partition *string `json:"partition,omitempty"` 144 | 145 | // Assigned priority 146 | Priority *int `json:"priority,omitempty"` 147 | 148 | // Assigned QOS 149 | Qos *[]string `json:"qos,omitempty"` 150 | 151 | // Raw fairshare shares 152 | SharesRaw *int `json:"shares_raw,omitempty"` 153 | 154 | // Association usage 155 | Usage *struct { 156 | // Jobs accuring priority 157 | AccrueJobCount *int `json:"accrue_job_count,omitempty"` 158 | 159 | // Effective normalized usage 160 | EffectiveNormalizedUsage *float32 `json:"effective_normalized_usage,omitempty"` 161 | 162 | // Fairshare factor 163 | FairshareFactor *float32 `json:"fairshare_factor,omitempty"` 164 | 165 | // Fairshare level 166 | FairshareLevel *float32 `json:"fairshare_level,omitempty"` 167 | 168 | // Fairshare shares 169 | FairshareShares *int `json:"fairshare_shares,omitempty"` 170 | 171 | // Group used wallclock time (s) 172 | GroupUsedWallclock *float32 `json:"group_used_wallclock,omitempty"` 173 | 174 | // Total jobs submitted 175 | JobCount *int `json:"job_count,omitempty"` 176 | 177 | // Currently active jobs 178 | NormalizedPriority *int `json:"normalized_priority,omitempty"` 179 | 180 | // Normalized shares 181 | NormalizedShares *float32 `json:"normalized_shares,omitempty"` 182 | 183 | // Raw usage 184 | RawUsage *int `json:"raw_usage,omitempty"` 185 | } `json:"usage,omitempty"` 186 | 187 | // Assigned user 188 | User *string `json:"user,omitempty"` 189 | } 190 | 191 | // Dbv0037AssociationShortInfo defines model for dbv0.0.37_association_short_info. 192 | type Dbv0037AssociationShortInfo struct { 193 | // Account name 194 | Account *string `json:"account,omitempty"` 195 | 196 | // Cluster name 197 | Cluster *string `json:"cluster,omitempty"` 198 | 199 | // Partition name (optional) 200 | Partition *string `json:"partition,omitempty"` 201 | 202 | // User name 203 | User *string `json:"user,omitempty"` 204 | } 205 | 206 | // Dbv0037AssociationsInfo defines model for dbv0.0.37_associations_info. 207 | type Dbv0037AssociationsInfo struct { 208 | // Array of associations 209 | Associations *[]Dbv0037Association `json:"associations,omitempty"` 210 | 211 | // Slurm errors 212 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 213 | } 214 | 215 | // Dbv0037ClusterInfo defines model for dbv0.0.37_cluster_info. 216 | type Dbv0037ClusterInfo struct { 217 | // Information about associations 218 | Associations *struct { 219 | Root *Dbv0037AssociationShortInfo `json:"root,omitempty"` 220 | } `json:"associations,omitempty"` 221 | 222 | // Information about controller 223 | Controller *struct { 224 | // Hostname 225 | Host *string `json:"host,omitempty"` 226 | 227 | // Port number 228 | Port *int `json:"port,omitempty"` 229 | } `json:"controller,omitempty"` 230 | 231 | // List of properties of cluster 232 | Flags *[]string `json:"flags,omitempty"` 233 | 234 | // Cluster name 235 | Name *string `json:"name,omitempty"` 236 | 237 | // Assigned nodes 238 | Nodes *string `json:"nodes,omitempty"` 239 | 240 | // Number rpc version 241 | RpcVersion *int `json:"rpc_version,omitempty"` 242 | 243 | // Configured select plugin 244 | SelectPlugin *string `json:"select_plugin,omitempty"` 245 | 246 | // List of TRES in cluster 247 | Tres *[]Dbv0037ResponseTres `json:"tres,omitempty"` 248 | } 249 | 250 | // Dbv0037ConfigInfo defines model for dbv0.0.37_config_info. 251 | type Dbv0037ConfigInfo struct { 252 | // Array of accounts 253 | Accounts *[]Dbv0037Account `json:"accounts,omitempty"` 254 | 255 | // Array of associations 256 | Associations *[]Dbv0037Association `json:"associations,omitempty"` 257 | 258 | // Slurm errors 259 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 260 | 261 | // Array of qos 262 | Qos *[]Dbv0037Qos `json:"qos,omitempty"` 263 | 264 | // Array of TRES 265 | Tres *[]Dbv0037TresList `json:"tres,omitempty"` 266 | 267 | // Array of users 268 | Users *[]Dbv0037User `json:"users,omitempty"` 269 | 270 | // Array of wckeys 271 | Wckeys *[]Dbv0037Wckey `json:"wckeys,omitempty"` 272 | } 273 | 274 | // Dbv0037ConfigResponse defines model for dbv0.0.37_config_response. 275 | type Dbv0037ConfigResponse struct { 276 | // Slurm errors 277 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 278 | } 279 | 280 | // Dbv0037CoordinatorInfo defines model for dbv0.0.37_coordinator_info. 281 | type Dbv0037CoordinatorInfo struct { 282 | // If user is coordinator of this account directly or coordinator status was inheirted from a higher account in the tree 283 | Direct *int `json:"direct,omitempty"` 284 | 285 | // Name of user 286 | Name *string `json:"name,omitempty"` 287 | } 288 | 289 | // Dbv0037Diag defines model for dbv0.0.37_diag. 290 | type Dbv0037Diag struct { 291 | // Slurm errors 292 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 293 | 294 | // dictionary of Slurmdb statistics 295 | Statistics *struct { 296 | RPCs *[]struct { 297 | // Number of RPCs 298 | Count *int `json:"count,omitempty"` 299 | 300 | // RPC type 301 | Rpc *string `json:"rpc,omitempty"` 302 | 303 | // Time values 304 | Time *struct { 305 | // Average time spent processing this RPC type 306 | Average *int `json:"average,omitempty"` 307 | 308 | // Total time spent processing this RPC type 309 | Total *int `json:"total,omitempty"` 310 | } `json:"time,omitempty"` 311 | } `json:"RPCs,omitempty"` 312 | Rollups *[]struct { 313 | // Timestamp of last cycle 314 | LastCycle *int `json:"last_cycle,omitempty"` 315 | 316 | // Timestamp of last rollup 317 | LastRun *int `json:"last_run,omitempty"` 318 | 319 | // Max time of all cycles 320 | MaxCycle *int `json:"max_cycle,omitempty"` 321 | 322 | // Average time (s) of cycle 323 | MeanCycles *int `json:"mean_cycles,omitempty"` 324 | 325 | // Total time (s) spent doing rollup 326 | TotalTime *int `json:"total_time,omitempty"` 327 | 328 | // Type of rollup 329 | Type *string `json:"type,omitempty"` 330 | } `json:"rollups,omitempty"` 331 | 332 | // Unix timestamp of start time 333 | TimeStart *int `json:"time_start,omitempty"` 334 | Users *[]struct { 335 | // Number of RPCs 336 | Count *int `json:"count,omitempty"` 337 | 338 | // Time values 339 | Time *struct { 340 | // Average time spent processing each user RPC 341 | Average *int `json:"average,omitempty"` 342 | 343 | // Total time spent processing each user RPC 344 | Total *int `json:"total,omitempty"` 345 | } `json:"time,omitempty"` 346 | 347 | // User name 348 | User *string `json:"user,omitempty"` 349 | } `json:"users,omitempty"` 350 | } `json:"statistics,omitempty"` 351 | } 352 | 353 | // Dbv0037Error defines model for dbv0.0.37_error. 354 | type Dbv0037Error struct { 355 | // Error number 356 | Errno *int `json:"errno,omitempty"` 357 | 358 | // Error message 359 | Error *string `json:"error,omitempty"` 360 | } 361 | 362 | // Single job description 363 | type Dbv0037Job struct { 364 | // Account charged by job 365 | Account *string `json:"account,omitempty"` 366 | 367 | // Nodes allocated to job 368 | AllocationNodes *int `json:"allocation_nodes,omitempty"` 369 | 370 | // Array properties (optional) 371 | Array *struct { 372 | // Job id of array 373 | JobId *int `json:"job_id,omitempty"` 374 | 375 | // Limits on array settings 376 | Limits *struct { 377 | // Limits on array settings 378 | Max *struct { 379 | // Limits on array settings 380 | Running *struct { 381 | // Max running tasks in array at any one time 382 | Tasks *int `json:"tasks,omitempty"` 383 | } `json:"running,omitempty"` 384 | } `json:"max,omitempty"` 385 | } `json:"limits,omitempty"` 386 | 387 | // Array task 388 | Task *string `json:"task,omitempty"` 389 | 390 | // Array task id 391 | TaskId *int `json:"task_id,omitempty"` 392 | } `json:"array,omitempty"` 393 | Association *Dbv0037AssociationShortInfo `json:"association,omitempty"` 394 | 395 | // Assigned cluster 396 | Cluster *string `json:"cluster,omitempty"` 397 | 398 | // Job comments by type 399 | Comment *struct { 400 | // Administrator set comment 401 | Administrator *string `json:"administrator,omitempty"` 402 | 403 | // Job comment 404 | Job *string `json:"job,omitempty"` 405 | 406 | // System set comment 407 | System *string `json:"system,omitempty"` 408 | } `json:"comment,omitempty"` 409 | 410 | // Constraints on job 411 | Constraints *string `json:"constraints,omitempty"` 412 | DerivedExitCode *Dbv0037JobExitCode `json:"derived_exit_code,omitempty"` 413 | ExitCode *Dbv0037JobExitCode `json:"exit_code,omitempty"` 414 | 415 | // List of properties of job 416 | Flags *[]string `json:"flags,omitempty"` 417 | 418 | // User's group to run job 419 | Group *string `json:"group,omitempty"` 420 | 421 | // Heterogeneous Job details (optional) 422 | Het *struct { 423 | // Parent HetJob id 424 | JobId *int `json:"job_id,omitempty"` 425 | 426 | // Offset of this job to parent 427 | JobOffset *map[string]interface{} `json:"job_offset,omitempty"` 428 | } `json:"het,omitempty"` 429 | 430 | // Job id 431 | JobId *int `json:"job_id,omitempty"` 432 | 433 | // User who requested job killed 434 | KillRequestUser *string `json:"kill_request_user,omitempty"` 435 | 436 | // Multi-Category Security 437 | Mcs *struct { 438 | // Assigned MCS label 439 | Label *string `json:"label,omitempty"` 440 | } `json:"mcs,omitempty"` 441 | 442 | // Assigned job name 443 | Name *string `json:"name,omitempty"` 444 | 445 | // List of nodes allocated for job 446 | Nodes *string `json:"nodes,omitempty"` 447 | 448 | // Assigned job's partition 449 | Partition *string `json:"partition,omitempty"` 450 | 451 | // Priority 452 | Priority *int `json:"priority,omitempty"` 453 | 454 | // Assigned qos name 455 | Qos *string `json:"qos,omitempty"` 456 | 457 | // Job run requirements 458 | Required *struct { 459 | // Required number of CPUs 460 | CPUs *int `json:"CPUs,omitempty"` 461 | 462 | // Required amount of memory (MiB) 463 | Memory *int `json:"memory,omitempty"` 464 | } `json:"required,omitempty"` 465 | 466 | // Reservation usage details 467 | Reservation *struct { 468 | // Database id of reservation 469 | Id *int `json:"id,omitempty"` 470 | 471 | // Name of reservation 472 | Name *int `json:"name,omitempty"` 473 | } `json:"reservation,omitempty"` 474 | 475 | // State properties of job 476 | State *struct { 477 | // Current state of job 478 | Current *string `json:"current,omitempty"` 479 | 480 | // Last reason job didn't run 481 | Reason *string `json:"reason,omitempty"` 482 | } `json:"state,omitempty"` 483 | 484 | // Job step description 485 | Steps *[]Dbv0037JobStep `json:"steps,omitempty"` 486 | 487 | // Time properties 488 | Time *struct { 489 | // Total time elapsed 490 | Elapsed *int `json:"elapsed,omitempty"` 491 | 492 | // Total time eligible to run 493 | Eligible *int `json:"eligible,omitempty"` 494 | 495 | // Timestamp of when job ended 496 | End *int `json:"end,omitempty"` 497 | 498 | // Job wall clock time limit 499 | Limit *int `json:"limit,omitempty"` 500 | 501 | // Timestamp of when job started 502 | Start *int `json:"start,omitempty"` 503 | 504 | // Timestamp of when job submitted 505 | Submission *int `json:"submission,omitempty"` 506 | 507 | // Timestamp of when job last suspended 508 | Suspended *int `json:"suspended,omitempty"` 509 | 510 | // System time values 511 | System *struct { 512 | // Total number of CPU-seconds used by the system on behalf of the process (in kernel mode), in microseconds 513 | Microseconds *int `json:"microseconds,omitempty"` 514 | 515 | // Total number of CPU-seconds used by the system on behalf of the process (in kernel mode), in seconds 516 | Seconds *int `json:"seconds,omitempty"` 517 | } `json:"system,omitempty"` 518 | 519 | // System time values 520 | Total *struct { 521 | // Total number of CPU-seconds used by the job, in microseconds 522 | Microseconds *int `json:"microseconds,omitempty"` 523 | 524 | // Total number of CPU-seconds used by the job, in seconds 525 | Seconds *int `json:"seconds,omitempty"` 526 | } `json:"total,omitempty"` 527 | 528 | // User land time values 529 | User *struct { 530 | // Total number of CPU-seconds used by the job in user land, in microseconds 531 | Microseconds *int `json:"microseconds,omitempty"` 532 | 533 | // Total number of CPU-seconds used by the job in user land, in seconds 534 | Seconds *int `json:"seconds,omitempty"` 535 | } `json:"user,omitempty"` 536 | } `json:"time,omitempty"` 537 | 538 | // TRES settings 539 | Tres *struct { 540 | // TRES list of attributes 541 | Allocated *Dbv0037TresList `json:"allocated,omitempty"` 542 | 543 | // TRES list of attributes 544 | Requested *Dbv0037TresList `json:"requested,omitempty"` 545 | } `json:"tres,omitempty"` 546 | 547 | // Job user 548 | User *string `json:"user,omitempty"` 549 | 550 | // Job assigned wckey details 551 | Wckey *struct { 552 | // wckey flags 553 | Flags *[]string `json:"flags,omitempty"` 554 | 555 | // Job assigned wckey 556 | Wckey *string `json:"wckey,omitempty"` 557 | } `json:"wckey,omitempty"` 558 | 559 | // Directory where job was initially started 560 | WorkingDirectory *string `json:"working_directory,omitempty"` 561 | } 562 | 563 | // Dbv0037JobExitCode defines model for dbv0.0.37_job_exit_code. 564 | type Dbv0037JobExitCode struct { 565 | // Return code from parent process 566 | ReturnCode *int `json:"return_code,omitempty"` 567 | 568 | // Signal details (if signaled) 569 | Signal *struct { 570 | // Name of signal received 571 | Name *string `json:"name,omitempty"` 572 | 573 | // Signal number process received 574 | SignalId *int `json:"signal_id,omitempty"` 575 | } `json:"signal,omitempty"` 576 | 577 | // Job exit status 578 | Status *string `json:"status,omitempty"` 579 | } 580 | 581 | // Dbv0037JobInfo defines model for dbv0.0.37_job_info. 582 | type Dbv0037JobInfo struct { 583 | // Slurm errors 584 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 585 | 586 | // Array of jobs 587 | Jobs *[]Dbv0037Job `json:"jobs,omitempty"` 588 | } 589 | 590 | // Dbv0037JobStep defines model for dbv0.0.37_job_step. 591 | type Dbv0037JobStep struct { 592 | // CPU properties 593 | CPU *struct { 594 | // CPU governor 595 | Governor *[]string `json:"governor,omitempty"` 596 | 597 | // CPU frequency requested 598 | RequestedFrequency *struct { 599 | // Max CPU frequency 600 | Max *int `json:"max,omitempty"` 601 | 602 | // Min CPU frequency 603 | Min *int `json:"min,omitempty"` 604 | } `json:"requested_frequency,omitempty"` 605 | } `json:"CPU,omitempty"` 606 | ExitCode *Dbv0037JobExitCode `json:"exit_code,omitempty"` 607 | 608 | // User who requested job killed 609 | KillRequestUser *string `json:"kill_request_user,omitempty"` 610 | 611 | // Node details 612 | Nodes *struct { 613 | // Total number of nodes in step 614 | Count *int `json:"count,omitempty"` 615 | 616 | // Nodes in step 617 | Range *string `json:"range,omitempty"` 618 | } `json:"nodes,omitempty"` 619 | 620 | // First process PID 621 | Pid *string `json:"pid,omitempty"` 622 | 623 | // State of job step 624 | State *string `json:"state,omitempty"` 625 | 626 | // Statistics of job step 627 | Statistics *struct { 628 | // Statistics of CPU 629 | CPU *struct { 630 | // Actual frequency of CPU during step 631 | ActualFrequency *int `json:"actual_frequency,omitempty"` 632 | } `json:"CPU,omitempty"` 633 | 634 | // Statistics of energy 635 | Energy *struct { 636 | // Energy consumed during step 637 | Consumed *int `json:"consumed,omitempty"` 638 | } `json:"energy,omitempty"` 639 | } `json:"statistics,omitempty"` 640 | 641 | // Step details 642 | Step *struct { 643 | // Heterogeneous job details 644 | Het *struct { 645 | // Parent HetJob component id 646 | Component *int `json:"component,omitempty"` 647 | } `json:"het,omitempty"` 648 | Id *interface{} `json:"id,omitempty"` 649 | 650 | // Parent job id 651 | JobId *int `json:"job_id,omitempty"` 652 | 653 | // Step name 654 | Name *string `json:"name,omitempty"` 655 | } `json:"step,omitempty"` 656 | 657 | // Task properties 658 | Task *struct { 659 | // Task distribution type 660 | Distribution *string `json:"distribution,omitempty"` 661 | } `json:"task,omitempty"` 662 | 663 | // Task properties 664 | Tasks *struct { 665 | // Number of tasks in step 666 | Count *int `json:"count,omitempty"` 667 | } `json:"tasks,omitempty"` 668 | 669 | // Time properties 670 | Time *struct { 671 | // Total time elapsed 672 | Elapsed *int `json:"elapsed,omitempty"` 673 | 674 | // Timestamp of when job ended 675 | End *int `json:"end,omitempty"` 676 | 677 | // Timestamp of when job started 678 | Start *int `json:"start,omitempty"` 679 | 680 | // Timestamp of when job last suspended 681 | Suspended *int `json:"suspended,omitempty"` 682 | 683 | // System time values 684 | System *struct { 685 | // Total number of CPU-seconds used by the system on behalf of the process (in kernel mode), in microseconds 686 | Microseconds *int `json:"microseconds,omitempty"` 687 | 688 | // Total number of CPU-seconds used by the system on behalf of the process (in kernel mode), in seconds 689 | Seconds *int `json:"seconds,omitempty"` 690 | } `json:"system,omitempty"` 691 | 692 | // System time values 693 | Total *struct { 694 | // Total number of CPU-seconds used by the job, in microseconds 695 | Microseconds *int `json:"microseconds,omitempty"` 696 | 697 | // Total number of CPU-seconds used by the job, in seconds 698 | Seconds *int `json:"seconds,omitempty"` 699 | } `json:"total,omitempty"` 700 | 701 | // User land time values 702 | User *struct { 703 | // Total number of CPU-seconds used by the job in user land, in microseconds 704 | Microseconds *int `json:"microseconds,omitempty"` 705 | 706 | // Total number of CPU-seconds used by the job in user land, in seconds 707 | Seconds *int `json:"seconds,omitempty"` 708 | } `json:"user,omitempty"` 709 | } `json:"time,omitempty"` 710 | 711 | // TRES usage 712 | Tres *struct { 713 | // TRES list of attributes 714 | Allocated *Dbv0037TresList `json:"allocated,omitempty"` 715 | 716 | // TRES requested for job 717 | Consumed *struct { 718 | // TRES list of attributes 719 | Average *Dbv0037TresList `json:"average,omitempty"` 720 | 721 | // TRES list of attributes 722 | Max *Dbv0037TresList `json:"max,omitempty"` 723 | 724 | // TRES list of attributes 725 | Min *Dbv0037TresList `json:"min,omitempty"` 726 | 727 | // TRES list of attributes 728 | Total *Dbv0037TresList `json:"total,omitempty"` 729 | } `json:"consumed,omitempty"` 730 | 731 | // TRES requested for job 732 | Requested *struct { 733 | // TRES list of attributes 734 | Average *Dbv0037TresList `json:"average,omitempty"` 735 | 736 | // TRES list of attributes 737 | Max *Dbv0037TresList `json:"max,omitempty"` 738 | 739 | // TRES list of attributes 740 | Min *Dbv0037TresList `json:"min,omitempty"` 741 | 742 | // TRES list of attributes 743 | Total *Dbv0037TresList `json:"total,omitempty"` 744 | } `json:"requested,omitempty"` 745 | } `json:"tres,omitempty"` 746 | } 747 | 748 | // QOS description 749 | type Dbv0037Qos struct { 750 | // QOS description 751 | Description *string `json:"description,omitempty"` 752 | 753 | // List of properties of QOS 754 | Flags *[]string `json:"flags,omitempty"` 755 | 756 | // Database id 757 | Id *string `json:"id,omitempty"` 758 | 759 | // Assigned limits 760 | Limits *struct { 761 | // factor to apply to TRES count for associations using this QOS 762 | Factor *float32 `json:"factor,omitempty"` 763 | 764 | // Limits on max settings 765 | Max *struct { 766 | // Limits on accruing priority 767 | Accruing *struct { 768 | // Max accuring priority per setting 769 | Per *struct { 770 | // Max accuring priority per account 771 | Account *int `json:"account,omitempty"` 772 | 773 | // Max accuring priority per user 774 | User *int `json:"user,omitempty"` 775 | } `json:"per,omitempty"` 776 | } `json:"accruing,omitempty"` 777 | 778 | // Limits on jobs settings 779 | Jobs *struct { 780 | // Limits on active jobs settings 781 | ActiveJobs *struct { 782 | // Limits on active jobs per settings 783 | Per *struct { 784 | // Max jobs per account 785 | Account *int `json:"account,omitempty"` 786 | 787 | // Max jobs per user 788 | User *int `json:"user,omitempty"` 789 | } `json:"per,omitempty"` 790 | } `json:"active_jobs,omitempty"` 791 | } `json:"jobs,omitempty"` 792 | 793 | // Limits on TRES 794 | Tres *struct { 795 | // Max TRES minutes settings 796 | Minutes *struct { 797 | // Max TRES minutes per settings 798 | Per *struct { 799 | // TRES list of attributes 800 | Account *Dbv0037TresList `json:"account,omitempty"` 801 | 802 | // TRES list of attributes 803 | Job *Dbv0037TresList `json:"job,omitempty"` 804 | 805 | // TRES list of attributes 806 | User *Dbv0037TresList `json:"user,omitempty"` 807 | } `json:"per,omitempty"` 808 | } `json:"minutes,omitempty"` 809 | 810 | // Max TRES per settings 811 | Per *struct { 812 | // TRES list of attributes 813 | Account *Dbv0037TresList `json:"account,omitempty"` 814 | 815 | // TRES list of attributes 816 | Job *Dbv0037TresList `json:"job,omitempty"` 817 | 818 | // TRES list of attributes 819 | Node *Dbv0037TresList `json:"node,omitempty"` 820 | 821 | // TRES list of attributes 822 | User *Dbv0037TresList `json:"user,omitempty"` 823 | } `json:"per,omitempty"` 824 | } `json:"tres,omitempty"` 825 | 826 | // Limit on wallclock settings 827 | WallClock *struct { 828 | // Limit on wallclock per settings 829 | Per *struct { 830 | // Max wallclock per job 831 | Job *int `json:"job,omitempty"` 832 | 833 | // Max wallclock per QOS 834 | Qos *int `json:"qos,omitempty"` 835 | } `json:"per,omitempty"` 836 | } `json:"wall_clock,omitempty"` 837 | } `json:"max,omitempty"` 838 | 839 | // Min limit settings 840 | Min *struct { 841 | // Min priority threshold 842 | PriorityThreshold *int `json:"priority_threshold,omitempty"` 843 | 844 | // Min tres settings 845 | Tres *struct { 846 | // Min tres per settings 847 | Per *struct { 848 | // TRES list of attributes 849 | Job *Dbv0037TresList `json:"job,omitempty"` 850 | } `json:"per,omitempty"` 851 | } `json:"tres,omitempty"` 852 | } `json:"min,omitempty"` 853 | } `json:"limits,omitempty"` 854 | 855 | // Preemption settings 856 | Preempt *struct { 857 | // Grace period (s) before jobs can preempted 858 | ExemptTime *int `json:"exempt_time,omitempty"` 859 | 860 | // List of preemptable QOS 861 | List *[]string `json:"list,omitempty"` 862 | 863 | // List of preemption modes 864 | Mode *[]string `json:"mode,omitempty"` 865 | } `json:"preempt,omitempty"` 866 | 867 | // QOS priority 868 | Priority *int `json:"priority,omitempty"` 869 | 870 | // Usage factor 871 | UsageFactor *float32 `json:"usage_factor,omitempty"` 872 | 873 | // Usage threshold 874 | UsageThreshold *float32 `json:"usage_threshold,omitempty"` 875 | } 876 | 877 | // Dbv0037QosInfo defines model for dbv0.0.37_qos_info. 878 | type Dbv0037QosInfo struct { 879 | // Slurm errors 880 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 881 | 882 | // Array of QOS 883 | Qos *[]Dbv0037Qos `json:"qos,omitempty"` 884 | } 885 | 886 | // Dbv0037ResponseAccountDelete defines model for dbv0.0.37_response_account_delete. 887 | type Dbv0037ResponseAccountDelete struct { 888 | // Slurm errors 889 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 890 | } 891 | 892 | // Dbv0037ResponseAssociationDelete defines model for dbv0.0.37_response_association_delete. 893 | type Dbv0037ResponseAssociationDelete struct { 894 | // Slurm errors 895 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 896 | } 897 | 898 | // Dbv0037ResponseAssociations defines model for dbv0.0.37_response_associations. 899 | type Dbv0037ResponseAssociations struct { 900 | // Slurm errors 901 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 902 | } 903 | 904 | // Dbv0037ResponseClusterAdd defines model for dbv0.0.37_response_cluster_add. 905 | type Dbv0037ResponseClusterAdd struct { 906 | // Slurm errors 907 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 908 | } 909 | 910 | // Dbv0037ResponseClusterDelete defines model for dbv0.0.37_response_cluster_delete. 911 | type Dbv0037ResponseClusterDelete struct { 912 | // Slurm errors 913 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 914 | } 915 | 916 | // Dbv0037ResponseQosDelete defines model for dbv0.0.37_response_qos_delete. 917 | type Dbv0037ResponseQosDelete struct { 918 | // Slurm errors 919 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 920 | } 921 | 922 | // Dbv0037ResponseTres defines model for dbv0.0.37_response_tres. 923 | type Dbv0037ResponseTres struct { 924 | // Slurm errors 925 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 926 | } 927 | 928 | // Dbv0037ResponseUserDelete defines model for dbv0.0.37_response_user_delete. 929 | type Dbv0037ResponseUserDelete struct { 930 | // Slurm errors 931 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 932 | } 933 | 934 | // Dbv0037ResponseUserUpdate defines model for dbv0.0.37_response_user_update. 935 | type Dbv0037ResponseUserUpdate struct { 936 | // Slurm errors 937 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 938 | } 939 | 940 | // Dbv0037ResponseWckeyAdd defines model for dbv0.0.37_response_wckey_add. 941 | type Dbv0037ResponseWckeyAdd struct { 942 | // Slurm errors 943 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 944 | } 945 | 946 | // Dbv0037ResponseWckeyDelete defines model for dbv0.0.37_response_wckey_delete. 947 | type Dbv0037ResponseWckeyDelete struct { 948 | // Slurm errors 949 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 950 | } 951 | 952 | // Dbv0037TresInfo defines model for dbv0.0.37_tres_info. 953 | type Dbv0037TresInfo struct { 954 | // Slurm errors 955 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 956 | 957 | // Array of tres 958 | Tres *[]Dbv0037TresList `json:"tres,omitempty"` 959 | } 960 | 961 | // TRES list of attributes 962 | type Dbv0037TresList = []struct { 963 | // count of TRES 964 | Count *int `json:"count,omitempty"` 965 | 966 | // database id 967 | Id *int `json:"id,omitempty"` 968 | 969 | // TRES name (optional) 970 | Name *string `json:"name,omitempty"` 971 | 972 | // TRES type 973 | Type *string `json:"type,omitempty"` 974 | } 975 | 976 | // User description 977 | type Dbv0037User struct { 978 | // Description of administrator level 979 | AdministratorLevel *string `json:"administrator_level,omitempty"` 980 | 981 | // Assigned associations 982 | Associations *struct { 983 | Root *Dbv0037AssociationShortInfo `json:"root,omitempty"` 984 | } `json:"associations,omitempty"` 985 | 986 | // List of assigned coordinators 987 | Coordinators *[]Dbv0037CoordinatorInfo `json:"coordinators,omitempty"` 988 | 989 | // Default settings 990 | Default *struct { 991 | // Default account name 992 | Account *string `json:"account,omitempty"` 993 | 994 | // Default wckey 995 | Wckey *string `json:"wckey,omitempty"` 996 | } `json:"default,omitempty"` 997 | 998 | // User name 999 | Name *string `json:"name,omitempty"` 1000 | } 1001 | 1002 | // Dbv0037UserInfo defines model for dbv0.0.37_user_info. 1003 | type Dbv0037UserInfo struct { 1004 | // Slurm errors 1005 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 1006 | 1007 | // Array of users 1008 | Users *[]Dbv0037User `json:"users,omitempty"` 1009 | } 1010 | 1011 | // Dbv0037Wckey defines model for dbv0.0.37_wckey. 1012 | type Dbv0037Wckey struct { 1013 | // List of assigned accounts 1014 | Accounts *[]string `json:"accounts,omitempty"` 1015 | 1016 | // Cluster name 1017 | Cluster *string `json:"cluster,omitempty"` 1018 | 1019 | // List of properties of wckey 1020 | Flags *[]string `json:"flags,omitempty"` 1021 | 1022 | // wckey database unique id 1023 | Id *int `json:"id,omitempty"` 1024 | 1025 | // wckey name 1026 | Name *string `json:"name,omitempty"` 1027 | 1028 | // wckey user 1029 | User *string `json:"user,omitempty"` 1030 | } 1031 | 1032 | // Dbv0037WckeyInfo defines model for dbv0.0.37_wckey_info. 1033 | type Dbv0037WckeyInfo struct { 1034 | // Slurm errors 1035 | Errors *[]Dbv0037Error `json:"errors,omitempty"` 1036 | 1037 | // List of wckeys 1038 | Wckeys *[]Dbv0037Wckey `json:"wckeys,omitempty"` 1039 | } 1040 | 1041 | // SlurmdbdDeleteAssociationParams defines parameters for SlurmdbdDeleteAssociation. 1042 | type SlurmdbdDeleteAssociationParams struct { 1043 | // Cluster name 1044 | Cluster *string `form:"cluster,omitempty" json:"cluster,omitempty"` 1045 | 1046 | // Account name 1047 | Account string `form:"account" json:"account"` 1048 | 1049 | // User name 1050 | User string `form:"user" json:"user"` 1051 | 1052 | // Partition Name 1053 | Partition *string `form:"partition,omitempty" json:"partition,omitempty"` 1054 | } 1055 | 1056 | // SlurmdbdGetAssociationParams defines parameters for SlurmdbdGetAssociation. 1057 | type SlurmdbdGetAssociationParams struct { 1058 | // Cluster name 1059 | Cluster *string `form:"cluster,omitempty" json:"cluster,omitempty"` 1060 | 1061 | // Account name 1062 | Account *string `form:"account,omitempty" json:"account,omitempty"` 1063 | 1064 | // User name 1065 | User *string `form:"user,omitempty" json:"user,omitempty"` 1066 | 1067 | // Partition Name 1068 | Partition *string `form:"partition,omitempty" json:"partition,omitempty"` 1069 | } 1070 | 1071 | // SlurmdbdGetJobsParams defines parameters for SlurmdbdGetJobs. 1072 | type SlurmdbdGetJobsParams struct { 1073 | // Filter by submission time 1074 | // Accepted formats: 1075 | // HH:MM[:SS] [AM|PM] 1076 | // MMDD[YY] or MM/DD[/YY] or MM.DD[.YY] 1077 | // MM/DD[/YY]-HH:MM[:SS] 1078 | // YYYY-MM-DD[THH:MM[:SS]] 1079 | SubmitTime *string `form:"submit_time,omitempty" json:"submit_time,omitempty"` 1080 | 1081 | // Filter by start time 1082 | // Accepted formats: 1083 | // HH:MM[:SS] [AM|PM] 1084 | // MMDD[YY] or MM/DD[/YY] or MM.DD[.YY] 1085 | // MM/DD[/YY]-HH:MM[:SS] 1086 | // YYYY-MM-DD[THH:MM[:SS]] 1087 | StartTime *string `form:"start_time,omitempty" json:"start_time,omitempty"` 1088 | 1089 | // Filter by end time 1090 | // Accepted formats: 1091 | // HH:MM[:SS] [AM|PM] 1092 | // MMDD[YY] or MM/DD[/YY] or MM.DD[.YY] 1093 | // MM/DD[/YY]-HH:MM[:SS] 1094 | // YYYY-MM-DD[THH:MM[:SS]] 1095 | EndTime *string `form:"end_time,omitempty" json:"end_time,omitempty"` 1096 | 1097 | // Comma delimited list of accounts to match 1098 | Account *string `form:"account,omitempty" json:"account,omitempty"` 1099 | 1100 | // Comma delimited list of associations to match 1101 | Association *string `form:"association,omitempty" json:"association,omitempty"` 1102 | 1103 | // Comma delimited list of cluster to match 1104 | Cluster *string `form:"cluster,omitempty" json:"cluster,omitempty"` 1105 | 1106 | // Comma delimited list of constraints to match 1107 | Constraints *string `form:"constraints,omitempty" json:"constraints,omitempty"` 1108 | 1109 | // Number of CPUs high range 1110 | CpusMax *string `form:"cpus_max,omitempty" json:"cpus_max,omitempty"` 1111 | 1112 | // Number of CPUs low range 1113 | CpusMin *string `form:"cpus_min,omitempty" json:"cpus_min,omitempty"` 1114 | 1115 | // Report job step information 1116 | SkipSteps *bool `form:"skip_steps,omitempty" json:"skip_steps,omitempty"` 1117 | 1118 | // Disable waiting for result from slurmdbd 1119 | DisableWaitForResult *bool `form:"disable_wait_for_result,omitempty" json:"disable_wait_for_result,omitempty"` 1120 | 1121 | // Exit code of job 1122 | ExitCode *string `form:"exit_code,omitempty" json:"exit_code,omitempty"` 1123 | 1124 | // Comma delimited list of formats to match 1125 | Format *string `form:"format,omitempty" json:"format,omitempty"` 1126 | 1127 | // Comma delimited list of groups to match 1128 | Group *string `form:"group,omitempty" json:"group,omitempty"` 1129 | 1130 | // Comma delimited list of job names to match 1131 | JobName *string `form:"job_name,omitempty" json:"job_name,omitempty"` 1132 | 1133 | // Number of nodes high range 1134 | NodesMax *string `form:"nodes_max,omitempty" json:"nodes_max,omitempty"` 1135 | 1136 | // Number of nodes low range 1137 | NodesMin *string `form:"nodes_min,omitempty" json:"nodes_min,omitempty"` 1138 | 1139 | // Comma delimited list of partitions to match 1140 | Partition *string `form:"partition,omitempty" json:"partition,omitempty"` 1141 | 1142 | // Comma delimited list of QOS to match 1143 | Qos *string `form:"qos,omitempty" json:"qos,omitempty"` 1144 | 1145 | // Comma delimited list of job reasons to match 1146 | Reason *string `form:"reason,omitempty" json:"reason,omitempty"` 1147 | 1148 | // Comma delimited list of reservations to match 1149 | Reservation *string `form:"reservation,omitempty" json:"reservation,omitempty"` 1150 | 1151 | // Comma delimited list of states to match 1152 | State *string `form:"state,omitempty" json:"state,omitempty"` 1153 | 1154 | // Comma delimited list of job steps to match 1155 | Step *string `form:"step,omitempty" json:"step,omitempty"` 1156 | 1157 | // Comma delimited list of used nodes to match 1158 | Node *string `form:"node,omitempty" json:"node,omitempty"` 1159 | 1160 | // Comma delimited list of wckeys to match 1161 | Wckey *string `form:"wckey,omitempty" json:"wckey,omitempty"` 1162 | } 1163 | -------------------------------------------------------------------------------- /internal/openapidb/openapidb.go: -------------------------------------------------------------------------------- 1 | package openapidb 2 | 3 | // TODO: https://github.com/deepmap/oapi-codegen/issues/542 4 | //go:generate oapi-codegen --old-config-style --package=openapidb --generate=types -alias-types -o ./openapi_db.gen.go ./openapi_0.0.37_21.08.8.json 5 | 6 | // debug 2022/10/13 19:01:02 Error unmarshall: "json: cannot unmarshal number into Go struct field Dbv0037Job.Jobs.allocation_nodes of type string" 7 | // debug 2022/10/13 19:04:06 Error unmarshall: "json: cannot unmarshal number into Go struct field .Jobs.het.job_id of type map[string]interface {}" 8 | 9 | // debug 2022/10/13 19:08:37 Error unmarshall: "json: cannot unmarshal number into Go struct field .Jobs.steps.step.id of type string" 10 | -------------------------------------------------------------------------------- /internal/popupmenu/popupmenu.go: -------------------------------------------------------------------------------- 1 | package popupmenu 2 | 3 | type Popupmenu struct{} 4 | -------------------------------------------------------------------------------- /internal/slurm/sacct.go: -------------------------------------------------------------------------------- 1 | package slurm 2 | 3 | import ( 4 | "github.com/CLIP-HPC/SlurmCommander/internal/openapidb" 5 | ) 6 | 7 | // SacctJobHist struct holds job history. 8 | // Comes from unmarshalling sacct -A -S now-Xdays --json call. 9 | type SacctJSON struct { 10 | Jobs []openapidb.Dbv0037Job 11 | } 12 | 13 | // This is to distinguish in Update() the return from the jobhisttab and jobdetails tab 14 | type SacctSingleJobHist struct { 15 | Jobs []openapidb.Dbv0037Job 16 | } 17 | -------------------------------------------------------------------------------- /internal/slurm/sinfo.go: -------------------------------------------------------------------------------- 1 | package slurm 2 | 3 | import ( 4 | "regexp" 5 | "strconv" 6 | "strings" 7 | 8 | "github.com/CLIP-HPC/SlurmCommander/internal/openapi" 9 | ) 10 | 11 | type SinfoJSON struct { 12 | Nodes []openapi.V0039Node 13 | } 14 | 15 | var gpuGresPattern = regexp.MustCompile(`gpu:(.*:)?(\d+)\(.*`) 16 | 17 | func ParseGRES(line string) *int { 18 | value := 0 19 | 20 | gres := strings.Split(line, ",") 21 | for _, g := range gres { 22 | if !strings.HasPrefix(g, "gpu:") { 23 | continue 24 | } 25 | 26 | matches := gpuGresPattern.FindStringSubmatch(g) 27 | if len(matches) == 3 { 28 | v, _ := strconv.Atoi(matches[2]) 29 | value += v 30 | } 31 | } 32 | 33 | return &value 34 | } 35 | 36 | type GresMap map[string]int 37 | 38 | func ParseGRESAll(line string) *GresMap { 39 | var gmap GresMap = make(GresMap) 40 | 41 | gres := strings.Split(line, ",") 42 | for _, g := range gres { 43 | if !strings.HasPrefix(g, "gpu:") { 44 | continue 45 | } 46 | 47 | matches := gpuGresPattern.FindStringSubmatch(g) 48 | if len(matches) == 3 { 49 | v, _ := strconv.Atoi(matches[2]) 50 | gmap[strings.Trim(matches[1], ":")] += v 51 | } 52 | } 53 | 54 | return &gmap 55 | } 56 | -------------------------------------------------------------------------------- /internal/slurm/sinfo_test.go: -------------------------------------------------------------------------------- 1 | package slurm_test 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | "github.com/CLIP-HPC/SlurmCommander/internal/slurm" 8 | ) 9 | 10 | type gresTest []struct { 11 | testName string 12 | input string 13 | expect int 14 | expectMap slurm.GresMap 15 | } 16 | 17 | var ( 18 | gresTestTable = gresTest{ 19 | { 20 | testName: "GRES-empty", 21 | input: "", 22 | expect: 0, 23 | expectMap: slurm.GresMap{}, 24 | }, 25 | { 26 | testName: "GRES-junk: asdf123:123:123:123", 27 | input: "asdf123:123:123:123", 28 | expect: 0, 29 | expectMap: slurm.GresMap{}, 30 | }, 31 | { 32 | testName: "GRES-simple: gpu:8(S:0-1)", 33 | input: "gpu:8(S:0-1)", 34 | expect: 8, 35 | expectMap: slurm.GresMap{"": 8}, 36 | }, 37 | { 38 | testName: "GRES: gpu:P100:8(S:0-1)", 39 | input: "gpu:P100:8(S:0-1)", 40 | expect: 8, 41 | expectMap: slurm.GresMap{"P100": 8}, 42 | }, 43 | { 44 | testName: "GRES_USED: gpu:P100:2(IDX:3,7)", 45 | input: "gpu:P100:2(IDX:3,7)", 46 | expect: 2, 47 | expectMap: slurm.GresMap{"P100": 2}, 48 | }, 49 | { 50 | testName: "GRES: gpu:p100:6(S:0),gpu:rtx:2(S:0)", 51 | input: "gpu:p100:6(S:0),gpu:rtx:2(S:0)", 52 | expect: 8, 53 | expectMap: slurm.GresMap{"p100": 6, "rtx": 2}, 54 | }, 55 | { 56 | testName: "GRES_USED: gpu:p100:0(IDX:N/A),gpu:rtx:0(IDX:N/A)", 57 | input: "gpu:p100:0(IDX:N/A),gpu:rtx:0(IDX:N/A)", 58 | expect: 0, 59 | expectMap: slurm.GresMap{"p100": 0, "rtx": 0}, 60 | }, 61 | { 62 | testName: "GRES_USED: gpu:p100:2(IDX:0-1),gpu:rtx:1(IDX:7)", 63 | input: "gpu:p100:2(IDX:0-1),gpu:rtx:1(IDX:7)", 64 | expect: 3, 65 | expectMap: slurm.GresMap{"p100": 2, "rtx": 1}, 66 | }, 67 | } 68 | ) 69 | 70 | func TestParseGRES(t *testing.T) { 71 | for i, v := range gresTestTable { 72 | t.Logf("Running test %d : %q\n", i, v.testName) 73 | rez := *slurm.ParseGRES(v.input) 74 | t.Logf("Expect: %d Got: %d\n", v.expect, rez) 75 | if rez != v.expect { 76 | t.Fatal("FAILED !!!") 77 | } 78 | } 79 | } 80 | 81 | func TestParseGRESAll(t *testing.T) { 82 | for i, v := range gresTestTable { 83 | t.Logf("Running test %d : %q\n", i, v.testName) 84 | rez := *slurm.ParseGRESAll(v.input) 85 | t.Logf("Expect: %#v Got: %#v\n", v.expectMap, rez) 86 | if !reflect.DeepEqual(rez, v.expectMap) { 87 | t.Fatal("FAILED !!!") 88 | } 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /internal/slurm/squeue.go: -------------------------------------------------------------------------------- 1 | package slurm 2 | 3 | import ( 4 | "github.com/CLIP-HPC/SlurmCommander/internal/openapi" 5 | ) 6 | 7 | type SqueueJSON struct { 8 | Jobs []openapi.V0039JobResponseProperties 9 | } 10 | -------------------------------------------------------------------------------- /internal/stats/stats.go: -------------------------------------------------------------------------------- 1 | package stats 2 | 3 | import ( 4 | "sort" 5 | "time" 6 | 7 | "gonum.org/v1/gonum/stat" 8 | ) 9 | 10 | // Return med,min,max 11 | func Median(s []time.Duration) (time.Duration, time.Duration, time.Duration) { 12 | var ret time.Duration 13 | 14 | n := len(s) 15 | switch n { 16 | case 0: 17 | return 0, 0, 0 18 | case 1: 19 | return s[0], s[0], s[0] 20 | } 21 | 22 | n -= 1 23 | 24 | sort.Slice(s, func(i, j int) bool { 25 | if s[i] < s[j] { 26 | return true 27 | } else { 28 | return false 29 | } 30 | }) 31 | 32 | if (n+1)%2 == 0 { 33 | ret = (s[n/2] + s[n/2+1]) / 2 34 | } else { 35 | ret = s[(n+1)/2] 36 | } 37 | // n-1? we've already deducted 1? 38 | //return ret, s[0], s[n-1] 39 | return ret, s[0], s[n] 40 | } 41 | 42 | func Avg(s []time.Duration) time.Duration { 43 | var ( 44 | ret time.Duration 45 | sf []float64 46 | i int 47 | v time.Duration 48 | ) 49 | 50 | if len(s) == 0 { 51 | return time.Duration(0) 52 | } 53 | 54 | sf = make([]float64, len(s)) 55 | for i, v = range s { 56 | sf[i] = float64(v) 57 | } 58 | 59 | ret = time.Duration(stat.Mean(sf, nil)) 60 | return ret 61 | } 62 | 63 | func AvgX(s []time.Duration) time.Duration { 64 | var ret time.Duration 65 | 66 | n := len(s) 67 | if n == 0 { 68 | return 0 69 | } 70 | 71 | for _, v := range s { 72 | ret += v 73 | } 74 | ret = ret / time.Duration(n) 75 | 76 | return ret 77 | } 78 | -------------------------------------------------------------------------------- /internal/styles/styles.go: -------------------------------------------------------------------------------- 1 | package styles 2 | 3 | import ( 4 | "github.com/charmbracelet/lipgloss" 5 | ) 6 | 7 | var ( 8 | // Blue 9 | Blue = lipgloss.Color("#0057b7") 10 | // Yellow 11 | Yellow = lipgloss.Color("#ffd700") 12 | // Red 13 | Red = lipgloss.Color("#cc0000") 14 | //red = lipgloss.Color("#b30000") 15 | //Green = lipgloss.Color("#009900") 16 | Green = lipgloss.Color("#00b300") 17 | 18 | Bluegrey = lipgloss.Color("#c2d1f0") 19 | 20 | // Generic text color styles 21 | TextRed = lipgloss.NewStyle().Foreground(Red) 22 | TextYellow = lipgloss.NewStyle().Foreground(Yellow) 23 | TextGreen = lipgloss.NewStyle().Foreground(Green) 24 | TextBlue = lipgloss.NewStyle().Foreground(Blue) 25 | TextBlueGrey = lipgloss.NewStyle().Foreground(Bluegrey) 26 | TextYellowOnBlue = lipgloss.NewStyle().Foreground(Yellow).Background(Blue).Underline(true) 27 | 28 | // ErrorHelp Box 29 | //ErrorHelp = lipgloss.NewStyle().Foreground(red).Border(lipgloss.RoundedBorder()).BorderForeground(red) 30 | ErrorHelp = lipgloss.NewStyle().Foreground(Red) 31 | 32 | // TABS 33 | Tab = lipgloss.NewStyle(). 34 | Border(TabTabBorder, true). 35 | BorderForeground(TabColor). 36 | Padding(0, 1) 37 | TabColor = lipgloss.AdaptiveColor{Light: "#0057B7", Dark: "#0057B7"} 38 | TabActiveTab = Tab.Copy().Border(TabActiveTabBorder, true).Foreground(Yellow) 39 | TabActiveTabBorder = lipgloss.ThickBorder() 40 | TabTabBorder = lipgloss.Border{ 41 | Top: "─", 42 | Bottom: "─", 43 | Left: "│", 44 | Right: "│", 45 | TopLeft: "╭", 46 | TopRight: "╮", 47 | BottomLeft: "┴", 48 | BottomRight: "┴", 49 | } 50 | TabGap = Tab.Copy(). 51 | BorderTop(false). 52 | BorderLeft(false). 53 | BorderRight(false) 54 | 55 | // (S)tats Box Style 56 | StatsBoxStyle = lipgloss.NewStyle().Padding(0, 1).BorderStyle(lipgloss.DoubleBorder()).BorderForeground(Blue) 57 | StatsSeparatorTitle = lipgloss.NewStyle().Foreground(Yellow).Background(Blue) 58 | 59 | // JobDetails viewport box 60 | //JDviewportBox = lipgloss.NewStyle().Border(lipgloss.DoubleBorder(), true, false).BorderForeground(Yellow).Padding(1, 1) 61 | JDviewportBox = lipgloss.NewStyle() 62 | 63 | // ClusterTab Stats Box 64 | ClusterTabStats = StatsBoxStyle.Copy() 65 | 66 | //MenuTitleStyle = lipgloss.NewStyle().Background(blue).Foreground(yellow) 67 | MenuBoxStyle = lipgloss.NewStyle().Padding(1, 1).BorderStyle(lipgloss.DoubleBorder()).BorderForeground(Blue) 68 | MenuTitleStyle = lipgloss.NewStyle().Foreground(Yellow) 69 | MenuNormalTitle = lipgloss.NewStyle().Foreground(Blue) 70 | MenuSelectedTitle = lipgloss.NewStyle().Foreground(Yellow).Background(Blue) 71 | MenuNormalDesc = lipgloss.NewStyle().Foreground(Yellow).Background(Blue) 72 | MenuSelectedDesc = lipgloss.NewStyle().Foreground(Yellow) 73 | 74 | CountsBox = lipgloss.NewStyle().Border(lipgloss.RoundedBorder()).Padding(0, 0).BorderForeground(Blue) 75 | 76 | // Main Window area 77 | MainWindow = lipgloss.NewStyle().MaxHeight(80) 78 | HelpWindow = lipgloss.NewStyle().Padding(0, 0).Border(lipgloss.RoundedBorder(), true, false, false).Height(2).MaxHeight(3).BorderForeground(Blue) 79 | 80 | // JobTemplates, template not found 81 | NotFound = lipgloss.NewStyle().Foreground(Red) 82 | 83 | // JobQueue tab, infobox 84 | JobInfoBox = lipgloss.NewStyle() 85 | JobInfoInBox = lipgloss.NewStyle().BorderStyle(lipgloss.RoundedBorder()).BorderForeground(Blue).MaxHeight(7) 86 | JobInfoInBottomBox = lipgloss.NewStyle().BorderStyle(lipgloss.RoundedBorder()).BorderForeground(Blue).MaxHeight(7) 87 | 88 | // JobDetails tab 89 | 90 | // Job steps 91 | JobStepBoxStyle = lipgloss.NewStyle().Padding(1, 2).BorderStyle(lipgloss.RoundedBorder()).BorderForeground(Blue) 92 | JobStepExitStatusRed = lipgloss.NewStyle().Foreground(Red) 93 | JobStepExitStatusGreen = lipgloss.NewStyle().Foreground(Green) 94 | 95 | //TresBox = lipgloss.NewStyle().Border(lipgloss.NormalBorder()).BorderForeground(blue).Width(40) 96 | TresBox = lipgloss.NewStyle() 97 | ) 98 | -------------------------------------------------------------------------------- /internal/table/table.go: -------------------------------------------------------------------------------- 1 | package table 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/charmbracelet/bubbles/key" 7 | "github.com/charmbracelet/bubbles/viewport" 8 | tea "github.com/charmbracelet/bubbletea" 9 | "github.com/charmbracelet/lipgloss" 10 | "github.com/mattn/go-runewidth" 11 | ) 12 | 13 | // Model defines a state for the table widget. 14 | type Model struct { 15 | KeyMap KeyMap 16 | 17 | cols []Column 18 | rows []Row 19 | cursor int 20 | focus bool 21 | styles Styles 22 | 23 | viewport viewport.Model 24 | renderedLines 25 | } 26 | 27 | type renderedLines struct { 28 | start, end int 29 | } 30 | 31 | // Row represents one line in the table. 32 | type Row []string 33 | 34 | // Column defines the table structure. 35 | type Column struct { 36 | Title string 37 | Width int 38 | } 39 | 40 | // KeyMap defines keybindings. It satisfies to the help.KeyMap interface, which 41 | // is used to render the menu menu. 42 | type KeyMap struct { 43 | LineUp key.Binding 44 | LineDown key.Binding 45 | PageUp key.Binding 46 | PageDown key.Binding 47 | HalfPageUp key.Binding 48 | HalfPageDown key.Binding 49 | GotoTop key.Binding 50 | GotoBottom key.Binding 51 | } 52 | 53 | // DefaultKeyMap returns a default set of keybindings. 54 | func DefaultKeyMap() KeyMap { 55 | const spacebar = " " 56 | return KeyMap{ 57 | LineUp: key.NewBinding( 58 | key.WithKeys("up", "k"), 59 | key.WithHelp("↑/k", "up"), 60 | ), 61 | LineDown: key.NewBinding( 62 | key.WithKeys("down", "j"), 63 | key.WithHelp("↓/j", "down"), 64 | ), 65 | PageUp: key.NewBinding( 66 | key.WithKeys("b", "pgup"), 67 | key.WithHelp("b/pgup", "page up"), 68 | ), 69 | PageDown: key.NewBinding( 70 | key.WithKeys("f", "pgdown", spacebar), 71 | key.WithHelp("f/pgdn", "page down"), 72 | ), 73 | HalfPageUp: key.NewBinding( 74 | key.WithKeys("u", "ctrl+u"), 75 | key.WithHelp("u", "½ page up"), 76 | ), 77 | HalfPageDown: key.NewBinding( 78 | key.WithKeys("d", "ctrl+d"), 79 | key.WithHelp("d", "½ page down"), 80 | ), 81 | GotoTop: key.NewBinding( 82 | key.WithKeys("home", "g"), 83 | key.WithHelp("g/home", "go to start"), 84 | ), 85 | GotoBottom: key.NewBinding( 86 | key.WithKeys("end", "G"), 87 | key.WithHelp("G/end", "go to end"), 88 | ), 89 | } 90 | } 91 | 92 | // Styles contains style definitions for this list component. By default, these 93 | // values are generated by DefaultStyles. 94 | type Styles struct { 95 | Header lipgloss.Style 96 | Cell lipgloss.Style 97 | Selected lipgloss.Style 98 | } 99 | 100 | // DefaultStyles returns a set of default style definitions for this table. 101 | func DefaultStyles() Styles { 102 | return Styles{ 103 | Selected: lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("212")), 104 | Header: lipgloss.NewStyle().Bold(true).Padding(0, 1), 105 | Cell: lipgloss.NewStyle().Padding(0, 1), 106 | } 107 | } 108 | 109 | // SetStyles sets the table styles. 110 | func (m *Model) SetStyles(s Styles) { 111 | m.styles = s 112 | m.UpdateViewport() 113 | } 114 | 115 | // Option is used to set options in New. For example: 116 | // 117 | // table := New(WithColumns([]Column{{Title: "ID", Width: 10}})) 118 | type Option func(*Model) 119 | 120 | // New creates a new model for the table widget. 121 | func New(opts ...Option) Model { 122 | m := Model{ 123 | cursor: 0, 124 | viewport: viewport.New(0, 20), 125 | 126 | KeyMap: DefaultKeyMap(), 127 | styles: DefaultStyles(), 128 | } 129 | 130 | for _, opt := range opts { 131 | opt(&m) 132 | } 133 | 134 | m.UpdateViewport() 135 | 136 | return m 137 | } 138 | 139 | // WithColumns sets the table columns (headers). 140 | func WithColumns(cols []Column) Option { 141 | return func(m *Model) { 142 | m.cols = cols 143 | } 144 | } 145 | 146 | // WithRows sets the table rows (data). 147 | func WithRows(rows []Row) Option { 148 | return func(m *Model) { 149 | m.rows = rows 150 | } 151 | } 152 | 153 | // WithHeight sets the height of the table. 154 | func WithHeight(h int) Option { 155 | return func(m *Model) { 156 | m.viewport.Height = h 157 | } 158 | } 159 | 160 | // WithWidth sets the width of the table. 161 | func WithWidth(w int) Option { 162 | return func(m *Model) { 163 | m.viewport.Width = w 164 | } 165 | } 166 | 167 | // WithFocused sets the focus state of the table. 168 | func WithFocused(f bool) Option { 169 | return func(m *Model) { 170 | m.focus = f 171 | } 172 | } 173 | 174 | // WithStyles sets the table styles. 175 | func WithStyles(s Styles) Option { 176 | return func(m *Model) { 177 | m.styles = s 178 | } 179 | } 180 | 181 | // WithKeyMap sets the key map. 182 | func WithKeyMap(km KeyMap) Option { 183 | return func(m *Model) { 184 | m.KeyMap = km 185 | } 186 | } 187 | 188 | // Update is the Bubble Tea update loop. 189 | func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) { 190 | if !m.focus { 191 | return m, nil 192 | } 193 | 194 | var cmds []tea.Cmd 195 | 196 | switch msg := msg.(type) { 197 | case tea.KeyMsg: 198 | switch { 199 | case key.Matches(msg, m.KeyMap.LineUp): 200 | m.MoveUp(1) 201 | case key.Matches(msg, m.KeyMap.LineDown): 202 | m.MoveDown(1) 203 | case key.Matches(msg, m.KeyMap.PageUp): 204 | m.MoveUp(m.viewport.Height) 205 | case key.Matches(msg, m.KeyMap.PageDown): 206 | m.MoveDown(m.viewport.Height) 207 | case key.Matches(msg, m.KeyMap.HalfPageUp): 208 | m.MoveUp(m.viewport.Height / 2) 209 | case key.Matches(msg, m.KeyMap.HalfPageDown): 210 | m.MoveDown(m.viewport.Height / 2) 211 | case key.Matches(msg, m.KeyMap.LineDown): 212 | m.MoveDown(1) 213 | case key.Matches(msg, m.KeyMap.GotoTop): 214 | m.GotoTop() 215 | case key.Matches(msg, m.KeyMap.GotoBottom): 216 | m.GotoBottom() 217 | } 218 | } 219 | 220 | return m, tea.Batch(cmds...) 221 | } 222 | 223 | // Focused returns the focus state of the table. 224 | func (m Model) Focused() bool { 225 | return m.focus 226 | } 227 | 228 | // Focus focusses the table, allowing the user to move around the rows and 229 | // interact. 230 | func (m *Model) Focus() { 231 | m.focus = true 232 | m.UpdateViewport() 233 | } 234 | 235 | // Blur blurs the table, preventing selection or movement. 236 | func (m *Model) Blur() { 237 | m.focus = false 238 | m.UpdateViewport() 239 | } 240 | 241 | // View renders the component. 242 | func (m Model) View() string { 243 | return m.headersView() + "\n" + m.viewport.View() 244 | } 245 | 246 | // UpdateViewport updates the list content based on the previously defined 247 | // columns and rows. 248 | func (m *Model) UpdateViewport() { 249 | renderedRows := make([]string, 0, len(m.rows)) 250 | 251 | // Render only rows from: m.cursor-m.viewport.Height to: m.cursor+m.viewport.Height 252 | // Constant runtime, independent of number of rows in a table. 253 | // Limits the numer of renderedRows to a maximum of 2*m.viewport.Height 254 | // TODO: bug: m.cursor=0, m.cusor-height==-1, clamp fails causes start to be -1 255 | //log.Printf("cursor: %d height: %d len(rows): %d\n", m.cursor, m.viewport.Height, len(m.rows)) 256 | if m.cursor >= 0 { 257 | m.renderedLines.start = clamp(m.cursor-m.viewport.Height, 0, m.cursor) 258 | } else { 259 | m.renderedLines.start = 0 260 | } 261 | m.renderedLines.end = clamp(m.cursor+m.viewport.Height, m.cursor, len(m.rows)) 262 | //log.Printf("rows: %d start: %d end: %d cursor: %d height: %d yoffset: %d ypos: %d range: %d\n", len(m.rows), m.renderedLines.start, m.renderedLines.end, m.cursor, m.viewport.Height, m.viewport.YOffset, m.viewport.YPosition, m.renderedLines.end-m.renderedLines.start) 263 | //log.Printf("viewport at top: %t bottom: %t", m.viewport.AtTop(), m.viewport.AtBottom()) 264 | for i := m.renderedLines.start; i < m.renderedLines.end; i++ { 265 | renderedRows = append(renderedRows, m.renderRow(i)) 266 | } 267 | 268 | m.viewport.SetContent( 269 | lipgloss.JoinVertical(lipgloss.Left, renderedRows...), 270 | ) 271 | } 272 | 273 | // SelectedRow returns the selected row. 274 | // You can cast it to your own implementation. 275 | func (m Model) SelectedRow() Row { 276 | return m.rows[m.cursor] 277 | } 278 | 279 | // SetRows set a new rows state. 280 | func (m *Model) SetRows(r []Row) { 281 | m.rows = r 282 | m.UpdateViewport() 283 | } 284 | 285 | // SetWidth sets the width of the viewport of the table. 286 | func (m *Model) SetWidth(w int) { 287 | m.viewport.Width = w 288 | m.UpdateViewport() 289 | } 290 | 291 | // SetHeight sets the height of the viewport of the table. 292 | func (m *Model) SetHeight(h int) { 293 | m.viewport.Height = h 294 | m.UpdateViewport() 295 | } 296 | 297 | // Height returns the viewport height of the table. 298 | func (m Model) Height() int { 299 | return m.viewport.Height 300 | } 301 | 302 | // Width returns the viewport width of the table. 303 | func (m Model) Width() int { 304 | return m.viewport.Width 305 | } 306 | 307 | // Cursor returns the index of the selected row. 308 | func (m Model) Cursor() int { 309 | return m.cursor 310 | } 311 | 312 | // SetCursor sets the cursor position in the table. 313 | func (m *Model) SetCursor(n int) { 314 | m.cursor = clamp(n, 0, len(m.rows)-1) 315 | m.UpdateViewport() 316 | } 317 | 318 | // MoveUp moves the selection up by any number of row. 319 | // It can not go above the first row. 320 | func (m *Model) MoveUp(n int) { 321 | m.cursor = clamp(m.cursor-n, 0, len(m.rows)-1) 322 | switch { 323 | case m.renderedLines.start == 0: 324 | m.viewport.SetYOffset(clamp(m.viewport.YOffset, 0, m.cursor)) 325 | //log.Printf("start reached, offset = %d\n", m.viewport.YOffset) 326 | case m.renderedLines.start < m.viewport.Height: 327 | m.viewport.SetYOffset(clamp(m.viewport.YOffset+n, 0, m.cursor)) 328 | //log.Printf("start about to be reached, offset = %d\n", m.viewport.YOffset) 329 | case m.viewport.YOffset >= 1: 330 | //log.Printf("offset >=1 n=%d new offset = %d\n", n, m.viewport.YOffset) 331 | m.viewport.YOffset = clamp(m.viewport.YOffset+n, 1, m.viewport.Height) 332 | //log.Printf("offset >=1 n=%d new offset = %d\n", n, m.viewport.YOffset) 333 | } 334 | m.UpdateViewport() 335 | 336 | } 337 | 338 | // MoveDown moves the selection down by any number of row. 339 | // It can not go below the last row. 340 | func (m *Model) MoveDown(n int) { 341 | m.cursor = clamp(m.cursor+n, 0, len(m.rows)-1) 342 | m.UpdateViewport() 343 | 344 | switch { 345 | case m.renderedLines.end == len(m.rows): 346 | //log.Printf("going down, at the end\n") 347 | m.viewport.SetYOffset(clamp(m.viewport.YOffset-n, 1, m.viewport.Height)) 348 | case m.cursor > (m.renderedLines.end-m.renderedLines.start)/2: 349 | //log.Printf("going down, excess yoffset\n") 350 | m.viewport.SetYOffset(clamp(m.viewport.YOffset-n, 1, m.cursor)) 351 | case m.viewport.YOffset > 1: 352 | //log.Printf("going down, yoffset>=1\n") 353 | case m.cursor > m.viewport.YOffset+m.viewport.Height-1: 354 | //log.Printf("going down, last\n") 355 | m.viewport.SetYOffset(clamp(m.viewport.YOffset+1, 0, 1)) 356 | } 357 | } 358 | 359 | // GotoTop moves the selection to the first row. 360 | func (m *Model) GotoTop() { 361 | m.MoveUp(m.cursor) 362 | } 363 | 364 | // GotoBottom moves the selection to the last row. 365 | func (m *Model) GotoBottom() { 366 | m.MoveDown(len(m.rows)) 367 | } 368 | 369 | // FromValues create the table rows from a simple string. It uses `\n` by 370 | // default for getting all the rows and the given separator for the fields on 371 | // each row. 372 | func (m *Model) FromValues(value, separator string) { 373 | rows := []Row{} 374 | for _, line := range strings.Split(value, "\n") { 375 | r := Row{} 376 | for _, field := range strings.Split(line, separator) { 377 | r = append(r, field) 378 | } 379 | rows = append(rows, r) 380 | } 381 | 382 | m.SetRows(rows) 383 | } 384 | 385 | func (m Model) headersView() string { 386 | var s = make([]string, 0, len(m.cols)) 387 | for _, col := range m.cols { 388 | style := lipgloss.NewStyle().Width(col.Width).MaxWidth(col.Width).Inline(true) 389 | renderedCell := style.Render(runewidth.Truncate(col.Title, col.Width, "…")) 390 | s = append(s, m.styles.Header.Render(renderedCell)) 391 | } 392 | return lipgloss.JoinHorizontal(lipgloss.Left, s...) 393 | } 394 | 395 | func (m *Model) renderRow(rowID int) string { 396 | var s = make([]string, 0, len(m.cols)) 397 | for i, value := range m.rows[rowID] { 398 | style := lipgloss.NewStyle().Width(m.cols[i].Width).MaxWidth(m.cols[i].Width).Inline(true) 399 | renderedCell := m.styles.Cell.Render(style.Render(runewidth.Truncate(value, m.cols[i].Width, "…"))) 400 | s = append(s, renderedCell) 401 | } 402 | 403 | row := lipgloss.JoinHorizontal(lipgloss.Left, s...) 404 | 405 | if rowID == m.cursor { 406 | return m.styles.Selected.Render(row) 407 | } 408 | 409 | return row 410 | } 411 | 412 | func max(a, b int) int { 413 | if a > b { 414 | return a 415 | } 416 | 417 | return b 418 | } 419 | 420 | func min(a, b int) int { 421 | if a < b { 422 | return a 423 | } 424 | 425 | return b 426 | } 427 | 428 | func clamp(v, low, high int) int { 429 | return min(max(v, low), high) 430 | } 431 | -------------------------------------------------------------------------------- /internal/table/table_test.go: -------------------------------------------------------------------------------- 1 | package table 2 | 3 | import "testing" 4 | 5 | func TestFromValues(t *testing.T) { 6 | input := "foo1,bar1\nfoo2,bar2\nfoo3,bar3" 7 | table := New(WithColumns([]Column{{Title: "Foo"}, {Title: "Bar"}})) 8 | table.FromValues(input, ",") 9 | 10 | if len(table.rows) != 3 { 11 | t.Fatalf("expect table to have 3 rows but it has %d", len(table.rows)) 12 | } 13 | 14 | expect := []Row{ 15 | {"foo1", "bar1"}, 16 | {"foo2", "bar2"}, 17 | {"foo3", "bar3"}, 18 | } 19 | if !deepEqual(table.rows, expect) { 20 | t.Fatal("table rows is not equals to the input") 21 | } 22 | } 23 | 24 | func TestFromValuesWithTabSeparator(t *testing.T) { 25 | input := "foo1.\tbar1\nfoo,bar,baz\tbar,2" 26 | table := New(WithColumns([]Column{{Title: "Foo"}, {Title: "Bar"}})) 27 | table.FromValues(input, "\t") 28 | 29 | if len(table.rows) != 2 { 30 | t.Fatalf("expect table to have 2 rows but it has %d", len(table.rows)) 31 | } 32 | 33 | expect := []Row{ 34 | {"foo1.", "bar1"}, 35 | {"foo,bar,baz", "bar,2"}, 36 | } 37 | if !deepEqual(table.rows, expect) { 38 | t.Fatal("table rows is not equals to the input") 39 | } 40 | } 41 | 42 | func deepEqual(a, b []Row) bool { 43 | if len(a) != len(b) { 44 | return false 45 | } 46 | for i, r := range a { 47 | for j, f := range r { 48 | if f != b[i][j] { 49 | return false 50 | } 51 | } 52 | } 53 | return true 54 | } 55 | -------------------------------------------------------------------------------- /internal/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import "fmt" 4 | 5 | var ( 6 | BuildVersion string 7 | BuildCommit string 8 | ) 9 | 10 | func DumpVersion() { 11 | fmt.Printf("----------------------------------------\n") 12 | fmt.Printf("Version: %s\n", BuildVersion) 13 | fmt.Printf("Build commit hash: %s\n", BuildCommit) 14 | fmt.Printf("----------------------------------------\n") 15 | } 16 | -------------------------------------------------------------------------------- /sc-demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/SlurmCommander/c8722facaf382f5892eb7ba187ad170ac2a3a95f/sc-demo.gif --------------------------------------------------------------------------------