├── .github └── workflows │ ├── build_on_pr.yml │ ├── build_on_tag.yml │ └── go.yml ├── .gitignore ├── BUGS.md ├── LICENSE ├── Makefile ├── README.md ├── VERSION ├── cmd ├── discoslurmbot │ ├── LICENSE │ ├── discoslurmbot.conf │ └── discoslurmbot.go ├── gobler │ ├── conmon.go │ ├── conmon_test.go │ ├── gobler.conf │ ├── gobler.go │ ├── gobler.toml │ ├── monitor.go │ ├── picker.go │ └── sender.go ├── goslmailer │ ├── goslmailer.conf │ ├── goslmailer.conf.annotated_example │ ├── goslmailer.go │ ├── goslmailer.toml.annotated_example │ ├── invocation_context.go │ ├── invocation_context_test.go │ ├── invocation_context_testcases_test.go │ └── telegramTemplate.md ├── matrixslurmbot │ └── matrixslurmbot.go ├── mattermostbot │ ├── mattermost.conf │ └── mattermostbot.go └── tgslurmbot │ └── tgslurmbot.go ├── connectors ├── connectorX │ ├── README.md │ ├── conX.tmpl │ ├── connectorX.go │ ├── connector_data.go │ └── goslmailer.conf ├── discord │ ├── connector_data.go │ └── discord.go ├── mailto │ ├── connector_data.go │ └── mailto.go ├── matrix │ ├── connector_data.go │ ├── matrix.go │ └── template.md ├── mattermost │ ├── connector_data.go │ └── mattermost.go ├── msteams │ ├── connector_data.go │ └── msteams.go ├── slack │ ├── connector_data.go │ └── slack.go └── telegram │ ├── connector_data.go │ └── telegram.go ├── go.mod ├── go.sum ├── images ├── archSketch.png ├── discord.png ├── matrix.png ├── mattermost.png ├── msteams.png ├── slack.png └── telegram.png ├── internal ├── cmdline │ └── cmdline.go ├── config │ ├── config.go │ └── config_test.go ├── connectors │ ├── connectors.go │ └── connectors_test.go ├── logger │ └── logger.go ├── lookup │ └── lookup.go ├── message │ └── message.go ├── renderer │ └── renderer.go ├── slurmjob │ ├── getjobcontext.go │ ├── getjobcontext_test.go │ ├── job_data.go │ ├── sacct.go │ └── sacct_test.go ├── spool │ ├── spool.go │ ├── spool_test.go │ └── spoolfiles.go └── version │ └── version.go ├── templates ├── README.md ├── adaptive_card_template.json ├── matrix_template.md ├── mattermostTemplate.md └── telegramTemplate.html ├── test_data ├── config_test │ ├── gobler.conf │ ├── gobler.toml │ ├── goslmailer.conf │ ├── goslmailer.toml │ └── goslmailer_annotated.toml ├── sacct.txt └── sstat.txt └── test_e2e ├── README.md ├── cases ├── test_00 │ ├── README.md │ ├── conf │ │ ├── gobler.conf │ │ ├── goslmailer.conf │ │ └── tgslurmbot.conf │ ├── sacct │ │ ├── sacct │ │ └── sacct.txt │ ├── slurm_env │ │ └── slurmenv.sh │ └── test.yaml ├── test_01 │ ├── README.md │ ├── conf │ │ ├── adaptive_card_template.json │ │ ├── gobler.conf │ │ └── goslmailer.conf │ ├── sacct │ │ ├── sacct │ │ └── sacct.txt │ ├── slurm_env │ │ └── slurmenv.sh │ └── test.yaml ├── test_02 │ ├── README.md │ ├── conf │ │ ├── adaptive_card_template.json │ │ ├── gobler.conf │ │ └── goslmailer.conf │ ├── sacct │ │ ├── sacct │ │ └── sacct.txt │ ├── slurm_env │ │ └── slurmenv.sh │ └── test.yaml ├── test_03 │ ├── README.md │ ├── conf │ │ ├── adaptive_card_template.json │ │ ├── gobler.conf │ │ └── goslmailer.conf │ ├── results │ │ └── rendered-1052477-petar.jager@imba.oeaw.ac.at-1653378962712164702.json │ ├── sacct │ │ ├── sacct │ │ ├── sacct.txt │ │ ├── sstat │ │ └── sstat.txt │ ├── slurm_env │ │ └── slurmenv.sh │ └── test.yaml ├── test_04 │ ├── README.md │ ├── conf │ │ ├── adaptive_card_template.json │ │ ├── gobler.conf │ │ └── goslmailer.conf │ ├── results │ │ └── rendered-1052477-petar.jager@imba.oeaw.ac.at-1653372112324147944.json │ ├── sacct │ │ ├── sacct │ │ └── sacct.txt │ ├── slurm_env │ │ └── slurmenv.sh │ └── test.yaml ├── test_05 │ ├── README.md │ ├── conf │ │ ├── adaptive_card_template.json │ │ ├── gobler.conf │ │ └── goslmailer.conf │ ├── results │ │ └── rendered-1052477-petar.jager@imba.oeaw.ac.at-1653378962712164702.json │ ├── sacct │ │ ├── sacct │ │ ├── sacct_1052477.txt │ │ ├── sstat │ │ └── sstat_1052477.txt │ ├── slurm_env │ │ └── slurmenv.sh │ └── test.yaml └── test_06 │ ├── README.md │ ├── conf │ ├── gobler.toml │ ├── goslmailer.toml │ └── telegramTemplate.html │ ├── sacct │ ├── sacct │ ├── sacct.txt │ ├── sstat │ └── sstat.txt │ ├── slurm_env │ └── slurmenv.sh │ └── test.yaml └── run.yaml /.github/workflows/build_on_pr.yml: -------------------------------------------------------------------------------- 1 | name: Build_on_tag 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v2 14 | 15 | - name: Set up Go 16 | uses: actions/setup-go@v2 17 | with: 18 | go-version: 1.17 19 | 20 | - name: Prepare SSH for endly 21 | run: | 22 | mkdir ~/.secret 23 | mkdir ~/.ssh 24 | touch ~/.ssh/authorized_keys 25 | chmod 600 ~/.ssh/authorized_keys 26 | ssh-keygen -t ed25519 -f id_rsa -P "" -f ~/.secret/id_rsa 27 | cat ~/.secret/id_rsa.pub >> ~/.ssh/authorized_keys 28 | echo "set enable-bracketed-paste off" >> ~/.inputrc 29 | cat >>~/.ssh/config <> ~/.ssh/authorized_keys 28 | echo "set enable-bracketed-paste off" >> ~/.inputrc 29 | cat >>~/.ssh/config <> ~/.ssh/authorized_keys 25 | cat >>~/.ssh/config < Now building: $$i" 55 | echo "................................................................................" 56 | go build -v -ldflags '-X $(buildVersionVar)=$(version) -X $(buildCommitVar)=$(commit)' $$i; 57 | done; 58 | 59 | install: 60 | mkdir -p $(installdir) $(installconfdir) $(installtempldir) 61 | cp $(bins) $(readme) $(installdir) 62 | cp $(config) $(installconfdir) 63 | cp $(templates) $(installtempldir) 64 | 65 | test_new: 66 | $(foreach dir, $(testdirs), go test -v -count=1 $(dir) || exit $$?;) 67 | 68 | test: 69 | @echo "********************************************************************************" 70 | @echo Testing 71 | @echo "********************************************************************************" 72 | go test -v -cover -count=1 ./... 73 | 74 | endly_linux_$(endly_version).tar.gz: 75 | curl -L -O https://github.com/viant/endly/releases/download/v$(endly_version)/endly_linux_$(endly_version).tar.gz 76 | 77 | test_e2e/endly: 78 | tar -C test_e2e/ -xzf endly_linux_$(endly_version).tar.gz 79 | 80 | get_endly: endly_linux_$(endly_version).tar.gz test_e2e/endly 81 | 82 | test_endly: 83 | cd test_e2e 84 | ./endly 85 | 86 | clean: 87 | rm $(bins) 88 | rm -rf $(installdir) 89 | rm test_e2e/rendered-* 90 | #rm endly_linux_$(endly_version).tar.gz 91 | rm test_e2e/endly 92 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | v2.7.1 2 | -------------------------------------------------------------------------------- /cmd/discoslurmbot/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, Bruce Marriner 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * Neither the name of discordgo nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | -------------------------------------------------------------------------------- /cmd/discoslurmbot/discoslurmbot.conf: -------------------------------------------------------------------------------- 1 | { # remember to remove comments from this json example ;) 2 | "logfile": "", # if empty -> stderr, else log to specified file 3 | "connectors": { 4 | "discord": { 5 | "name": "DiscoSlurmBot", # name that is used in the bot welcome message 6 | "triggerString": "showmeslurm", # string (in channel or DM) that triggers the bot to respond with an instructional DM to the user 7 | "token": "PasteBotTokenHere", # place to put the bot token 8 | "messageTemplate": "/path/to/template.md" # template file to use 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /cmd/discoslurmbot/discoslurmbot.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "os/signal" 8 | "syscall" 9 | 10 | "github.com/CLIP-HPC/goslmailer/internal/cmdline" 11 | "github.com/CLIP-HPC/goslmailer/internal/config" 12 | "github.com/CLIP-HPC/goslmailer/internal/logger" 13 | "github.com/CLIP-HPC/goslmailer/internal/version" 14 | "github.com/bwmarrin/discordgo" 15 | ) 16 | 17 | const app = "discoslurmbot" 18 | 19 | type botConfig struct { 20 | config.ConfigContainer 21 | l *log.Logger 22 | } 23 | 24 | // This function will be called (due to AddHandler above) every time a new 25 | // message is created on any channel that the authenticated bot has access to. 26 | // 27 | // It is called whenever a message is created but only when it's sent through a 28 | // server as we did not request IntentsDirectMessages. 29 | func messageCreate(bc botConfig) func(*discordgo.Session, *discordgo.MessageCreate) { 30 | return func(s *discordgo.Session, m *discordgo.MessageCreate) { 31 | 32 | fmt.Printf("session: %#v\n", s) 33 | fmt.Printf("message: %#v\n", m) 34 | fmt.Printf("message content: %#v\n", m.Content) 35 | fmt.Printf("author: %#v\n", m.Author.ID) 36 | fmt.Printf("user.id: %#v\n", s.State.User.ID) 37 | 38 | // Ignore all messages created by the bot itself 39 | // This isn't required in this specific example but it's a good practice. 40 | if m.Author.ID == s.State.User.ID { 41 | return 42 | } 43 | // In this example, we only care about messages that are "ping". 44 | if m.Content != bc.Connectors["discord"]["triggerString"] { 45 | return 46 | } 47 | 48 | // We create the private channel with the user who sent the message. 49 | channel, err := s.UserChannelCreate(m.Author.ID) 50 | if err != nil { 51 | // If an error occurred, we failed to create the channel. 52 | // 53 | // Some common causes are: 54 | // 1. We don't share a server with the user (not possible here). 55 | // 2. We opened enough DM channels quickly enough for Discord to 56 | // label us as abusing the endpoint, blocking us from opening 57 | // new ones. 58 | bc.l.Println("error creating channel:", err) 59 | s.ChannelMessageSend( 60 | m.ChannelID, 61 | "Something went wrong while sending the DM!", 62 | ) 63 | return 64 | } 65 | // Then we send the message through the channel we created. 66 | msg := fmt.Sprintf("Welcome,\nI am %s,\nplease use this switch in your job submission script in addition to '--mail-type' and i'll get back to you:\n '--mail-user=discord:%s'", bc.Connectors["discord"]["botname"], channel.ID) 67 | _, err = s.ChannelMessageSend(channel.ID, msg) 68 | if err != nil { 69 | // If an error occurred, we failed to send the message. 70 | // 71 | // It may occur either when we do not share a server with the 72 | // user (highly unlikely as we just received a message) or 73 | // the user disabled DM in their settings (more likely). 74 | bc.l.Println("error sending DM message:", err) 75 | s.ChannelMessageSend( 76 | m.ChannelID, 77 | "Failed to send you a DM. "+ 78 | "Did you disable DM in your privacy settings?", 79 | ) 80 | } 81 | } 82 | } 83 | 84 | func main() { 85 | 86 | // parse command line params 87 | cmd, err := cmdline.NewCmdArgs(app) 88 | if err != nil { 89 | log.Fatalf("ERROR: parse command line failed with: %q\n", err) 90 | } 91 | 92 | if *(cmd.Version) { 93 | l := log.New(os.Stderr, app+":", log.Lshortfile|log.Ldate|log.Lmicroseconds) 94 | version.DumpVersion(l) 95 | os.Exit(0) 96 | } 97 | 98 | // read config file 99 | cfg := config.NewConfigContainer() 100 | err = cfg.GetConfig(*(cmd.CfgFile)) 101 | if err != nil { 102 | log.Fatalf("ERROR: getConfig() failed: %s\n", err) 103 | } 104 | 105 | // setup logger 106 | l, err := logger.SetupLogger(cfg.Logfile, "gobler") 107 | if err != nil { 108 | log.Fatalf("setuplogger(%s) failed with: %q\n", cfg.Logfile, err) 109 | } 110 | 111 | l.Println("===================== discoslurmbot start ======================================") 112 | 113 | version.DumpVersion(l) 114 | 115 | if _, ok := cfg.Connectors["discord"]["token"]; !ok { 116 | l.Fatalf("MAIN: fetching config[connectors][discord][token] failed: %s\n", err) 117 | } 118 | 119 | // Create a new Discord session using the provided bot token. 120 | dg, err := discordgo.New("Bot " + cfg.Connectors["discord"]["token"]) 121 | if err != nil { 122 | l.Println("error creating Discord session,", err) 123 | return 124 | } 125 | 126 | // Register the messageCreate func as a callback for MessageCreate events. 127 | bc := botConfig{ 128 | *cfg, 129 | l, 130 | } 131 | dg.AddHandler(messageCreate(bc)) 132 | 133 | // In this example, we only care about receiving message events. 134 | // pja: and DMs 135 | dg.Identify.Intents = discordgo.IntentsGuildMessages | discordgo.IntentDirectMessages 136 | 137 | // Open a websocket connection to Discord and begin listening. 138 | err = dg.Open() 139 | if err != nil { 140 | l.Println("error opening connection,", err) 141 | return 142 | } 143 | 144 | // Wait here until CTRL-C or other term signal is received. 145 | l.Println("Bot is now running. Press CTRL-C to exit.") 146 | sc := make(chan os.Signal, 1) 147 | signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill) 148 | <-sc 149 | 150 | // Cleanly close down the Discord session. 151 | dg.Close() 152 | 153 | l.Println("===================== discoslurmbot end ========================================") 154 | } 155 | -------------------------------------------------------------------------------- /cmd/gobler/conmon.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "log" 6 | "strconv" 7 | "strings" 8 | "sync" 9 | "time" 10 | 11 | "github.com/CLIP-HPC/goslmailer/internal/connectors" 12 | "github.com/CLIP-HPC/goslmailer/internal/spool" 13 | ) 14 | 15 | type conMon struct { 16 | conn string 17 | spoolDir string 18 | monitorT time.Duration 19 | pickerT time.Duration 20 | pickSendBufLen int 21 | numSenders int 22 | maxMsgPU int 23 | } 24 | 25 | const ( 26 | monitorTdefault = 10 27 | pickerTdefault = 2 28 | psBufLenDefault = 1 29 | numSendersDefault = 1 30 | maxMsgPUDefault = 10 31 | ) 32 | 33 | type psGob struct { 34 | fileGob *spool.FileGob 35 | deletedCount uint32 36 | } 37 | 38 | // getConfTime converts config string to time.Duration value. 39 | // If string is suffixed with "ms", return miliseconds, else seconds. 40 | func getConfTime(e string) (time.Duration, error) { 41 | var milis bool = false 42 | 43 | if ms := strings.TrimSuffix(e, "ms"); ms != e { 44 | milis = true 45 | e = ms 46 | } 47 | 48 | T, err := strconv.ParseUint(e, 10, 64) 49 | if err != nil { 50 | return -1 * time.Second, errors.New("problem converting time from config to uint") 51 | } 52 | 53 | if milis { 54 | return time.Duration(T) * time.Millisecond, nil 55 | } else { 56 | return time.Duration(T) * time.Second, nil 57 | } 58 | } 59 | 60 | func NewConMon(con string, conCfg map[string]string, l *log.Logger) (*conMon, error) { 61 | var ( 62 | cm conMon 63 | err error 64 | ) 65 | 66 | cm.conn = con 67 | cm.spoolDir = conCfg["spoolDir"] 68 | 69 | psbl, err := strconv.Atoi(conCfg["psBufLen"]) 70 | if err != nil { 71 | // return nil, errors.New("psBufLen is not integer") 72 | // todo: no need to be so agressive, let's do default... or should we abort so the user knows he made a mistake? 73 | cm.pickSendBufLen = psBufLenDefault 74 | } else { 75 | cm.pickSendBufLen = psbl 76 | } 77 | 78 | ns, err := strconv.Atoi(conCfg["numSenders"]) 79 | if err != nil { 80 | //return nil, errors.New("numSenders is not integer") 81 | cm.numSenders = numSendersDefault 82 | } else { 83 | cm.numSenders = ns 84 | } 85 | 86 | mpu, err := strconv.Atoi(conCfg["maxMsgPU"]) 87 | if err != nil { 88 | //return nil, errors.New("maxNewMsgPU is not integer") 89 | cm.maxMsgPU = maxMsgPUDefault 90 | } else { 91 | cm.maxMsgPU = mpu 92 | } 93 | 94 | // if monitorT is specified... 95 | if e, ok := conCfg["monitorT"]; ok { 96 | cm.monitorT, err = getConfTime(e) 97 | if err != nil { 98 | return nil, err 99 | } 100 | } else { 101 | // nothing specified in config, use default seconds 102 | cm.monitorT = time.Duration(monitorTdefault) * time.Second 103 | } 104 | 105 | // if pickerT is specified... 106 | if e, ok := conCfg["pickerT"]; ok { 107 | cm.pickerT, err = getConfTime(e) 108 | if err != nil { 109 | return nil, err 110 | } 111 | } else { 112 | // nothing specified in config, use default seconds 113 | cm.pickerT = time.Duration(pickerTdefault) * time.Second 114 | } 115 | l.Printf("CM setup: %#v\n", cm) 116 | return &cm, nil 117 | } 118 | 119 | // SpinUp start 3 goroutines: monitor, picker and sender for a connector (each that has "spoolDir" attribute in .conf) 120 | func (cm *conMon) SpinUp(conns connectors.Connectors, wg *sync.WaitGroup, l *log.Logger) error { 121 | 122 | mpChan := make(chan *spool.SpooledGobs, 1) 123 | psChan := make(chan psGob, cm.pickSendBufLen) 124 | psChanFailed := make(chan psGob, cm.pickSendBufLen) 125 | 126 | mon, err := NewMonitor(cm.conn, cm.spoolDir, cm.monitorT) 127 | if err != nil { 128 | l.Printf("Monitor %s inst FAILED\n", cm.conn) 129 | } else { 130 | l.Printf("Monitor %s startup...\n", cm.conn) 131 | wg.Add(1) 132 | go mon.MonitorWorker(mpChan, wg, l) 133 | } 134 | 135 | pickr, err := NewPicker(cm.conn, cm.spoolDir, cm.pickerT, cm.maxMsgPU) 136 | if err != nil { 137 | l.Printf("Picker %s inst FAILED\n", cm.conn) 138 | } else { 139 | l.Printf("Picker %s startup...\n", cm.conn) 140 | wg.Add(1) 141 | go pickr.PickerWorker(mpChan, psChan, psChanFailed, wg, l) 142 | } 143 | 144 | for i := 1; i <= cm.numSenders; i++ { 145 | sendr, err := NewSender(cm.conn, cm.spoolDir, &conns, i) 146 | if err != nil { 147 | l.Printf("Sender %d - %s inst failed\n", i, cm.conn) 148 | } else { 149 | l.Printf("Sender %d - %s startup...\n", i, cm.conn) 150 | wg.Add(1) 151 | go sendr.SenderWorker(psChan, psChanFailed, wg, l) 152 | } 153 | } 154 | 155 | return nil 156 | } 157 | -------------------------------------------------------------------------------- /cmd/gobler/conmon_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "log" 6 | "runtime" 7 | "strconv" 8 | "sync" 9 | "testing" 10 | "time" 11 | 12 | "github.com/CLIP-HPC/goslmailer/internal/config" 13 | "github.com/CLIP-HPC/goslmailer/internal/connectors" 14 | ) 15 | 16 | const con = "msteams" 17 | 18 | type timesTests []struct { 19 | name string 20 | t string 21 | want time.Duration 22 | wanterr bool 23 | } 24 | 25 | func TestGetConfTime(t *testing.T) { 26 | var tt = timesTests{ 27 | { 28 | name: "testEmpty", 29 | t: "", 30 | want: -1 * time.Second, 31 | wanterr: true, 32 | }, 33 | { 34 | name: "test1000ms", 35 | t: "1000ms", 36 | want: 1 * time.Second, 37 | wanterr: false, 38 | }, 39 | { 40 | name: "test1s", 41 | t: "1", 42 | want: 1 * time.Second, 43 | wanterr: false, 44 | }, 45 | { 46 | name: "testJunk", 47 | t: "asd", 48 | want: -1 * time.Second, 49 | wanterr: true, 50 | }, 51 | } 52 | 53 | for k, v := range tt { 54 | t.Logf("Running test %d", k) 55 | t.Run(v.name, func(t *testing.T) { 56 | got, err := getConfTime(v.t) 57 | t.Logf("Test %q: GOT: %v WANT: %v WANTERR: %v", v.name, got, v.want, v.wanterr) 58 | switch { 59 | case !v.wanterr && err != nil: 60 | t.Fatalf("FAILED: test %q didn't want error and got one", v.name) 61 | case v.wanterr && err == nil: 62 | t.Fatalf("FAILED: test %q wanted error and got none", v.name) 63 | case v.want != got: 64 | t.Fatalf("FAILED: test %q wanted: %v and got: %v", v.name, v.want, got) 65 | } 66 | }) 67 | } 68 | // todo 69 | } 70 | 71 | func TestConmonGoRoutines(t *testing.T) { 72 | var ( 73 | conns = make(connectors.Connectors) 74 | wg sync.WaitGroup 75 | ) 76 | 77 | wr := bytes.Buffer{} 78 | l := log.New(&wr, "Testing: ", log.Llongfile) 79 | 80 | cfg := config.NewConfigContainer() 81 | err := cfg.GetConfig("../../test_data/config_test/gobler.conf") 82 | if err != nil { 83 | t.Fatalf("MAIN: getConfig(gobconfig) failed: %s", err) 84 | } 85 | ns, err := strconv.Atoi(cfg.Connectors[con]["numSenders"]) 86 | if err != nil { 87 | t.Fatalf("Atoi(numSenders) failed: %s\n", err) 88 | } 89 | expected := 4 + ns 90 | 91 | cm, err := NewConMon(con, cfg.Connectors[con], l) 92 | if err != nil { 93 | t.Fatalf("MAIN: NewConMon(%s) failed with: %s\n", con, err) 94 | } 95 | 96 | err = cm.SpinUp(conns, &wg, l) 97 | if err != nil { 98 | t.Fatalf("MAIN: SpinUp(%s) failed with: %s\n", con, err) 99 | } 100 | 101 | t.Logf("Num goroutines test: got: %d, expected %d (test,main,monitor,picker,%dx sender)\n", runtime.NumGoroutine(), expected, ns) 102 | if runtime.NumGoroutine() != expected { 103 | t.Fatal("numGoroutines test failed.") 104 | } else { 105 | t.Log("numGoroutines test OK.") 106 | } 107 | 108 | } 109 | -------------------------------------------------------------------------------- /cmd/gobler/gobler.conf: -------------------------------------------------------------------------------- 1 | { # see gobler.toml for annotations, remove this comment in production config 2 | "logfile": "", 3 | "defaultconnector": "msteams", 4 | "connectors": { 5 | "msteams": { 6 | "name": "dev channel", 7 | "renderToFile": "no", 8 | "spoolDir": "/tmp", 9 | "adaptiveCardTemplate": "/etc/slurm/adaptive_card_template.json", 10 | "url": "http://localhost:9999/", 11 | "useLookup": "GECOS", 12 | "monitorT": "20000ms", 13 | "pickerT": "5000ms", 14 | "psBufLen": "3", 15 | "numSenders": "1", 16 | "maxMsgPU": "5" 17 | }, 18 | "telegram": { 19 | "name": "telegram bot connector", 20 | "url": "", 21 | "token": "PasteHereTelegramBotToken", 22 | "renderToFile": "no", 23 | "spoolDir": "/tmp/telegramgobs", 24 | "messageTemplate": "/etc/slurm/telegramTemplate.md", 25 | "useLookup": "no", 26 | "format": "HTML", 27 | "monitorT": "5000ms", 28 | "pickerT": "1000ms", 29 | "psBufLen": "3", 30 | "numSenders": "3", 31 | "maxMsgPU": "6" 32 | }, 33 | "textfile": { 34 | "path": "/tmp" 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /cmd/gobler/gobler.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "sync" 7 | 8 | _ "github.com/CLIP-HPC/goslmailer/connectors/discord" 9 | _ "github.com/CLIP-HPC/goslmailer/connectors/mailto" 10 | _ "github.com/CLIP-HPC/goslmailer/connectors/matrix" 11 | _ "github.com/CLIP-HPC/goslmailer/connectors/mattermost" 12 | _ "github.com/CLIP-HPC/goslmailer/connectors/msteams" 13 | _ "github.com/CLIP-HPC/goslmailer/connectors/slack" 14 | _ "github.com/CLIP-HPC/goslmailer/connectors/telegram" 15 | "github.com/CLIP-HPC/goslmailer/internal/cmdline" 16 | "github.com/CLIP-HPC/goslmailer/internal/config" 17 | "github.com/CLIP-HPC/goslmailer/internal/connectors" 18 | "github.com/CLIP-HPC/goslmailer/internal/logger" 19 | "github.com/CLIP-HPC/goslmailer/internal/message" 20 | "github.com/CLIP-HPC/goslmailer/internal/version" 21 | ) 22 | 23 | var lock sync.Mutex 24 | 25 | type MsgList []message.MessagePack 26 | 27 | func main() { 28 | 29 | var ( 30 | wg sync.WaitGroup 31 | ) 32 | 33 | // parse command line params 34 | cmd, err := cmdline.NewCmdArgs("gobler") 35 | if err != nil { 36 | log.Fatalf("ERROR: parse command line failed with: %q\n", err) 37 | } 38 | 39 | if *(cmd.Version) { 40 | l := log.New(os.Stderr, "gobler:", log.Lshortfile|log.Ldate|log.Lmicroseconds) 41 | version.DumpVersion(l) 42 | os.Exit(0) 43 | } 44 | 45 | // read config file 46 | cfg := config.NewConfigContainer() 47 | err = cfg.GetConfig(*(cmd.CfgFile)) 48 | if err != nil { 49 | log.Fatalf("ERROR: getConfig() failed: %s\n", err) 50 | } 51 | 52 | // setup logger 53 | l, err := logger.SetupLogger(cfg.Logfile, "gobler") 54 | if err != nil { 55 | log.Fatalf("setuplogger(%s) failed with: %q\n", cfg.Logfile, err) 56 | } 57 | 58 | l.Println("======================= Gobler start ===========================================") 59 | 60 | version.DumpVersion(l) 61 | 62 | cfg.DumpConfig(l) 63 | 64 | // populate map with configured referenced connectors 65 | err = connectors.ConMap.PopulateConnectors(cfg, l) 66 | if err != nil { 67 | l.Printf("MAIN: PopulateConnectors() failed with: %s\n", err) 68 | } 69 | 70 | // iterate and spin up monitor,picker and sender routines 71 | for con := range cfg.Connectors { 72 | spd, ok := cfg.Connectors[con]["spoolDir"] 73 | if ok { 74 | l.Printf("MAIN: %s spoolDir exists: %s - %s\n", con, cfg.Connectors[con]["spoolDir"], spd) 75 | 76 | cm, err := NewConMon(con, cfg.Connectors[con], l) 77 | if err != nil { 78 | l.Printf("MAIN: NewConMon(%s) failed with: %s\n", con, err) 79 | l.Printf("MAIN: skipping %s...\n", con) 80 | continue 81 | } 82 | // func (cm *conMon) SpinUp(conns connectors.Connectors, wg sync.WaitGroup, l *log.Logger) error { 83 | err = cm.SpinUp(connectors.ConMap, &wg, l) 84 | if err != nil { 85 | l.Printf("MAIN: SpinUp(%s) failed with: %s\n", con, err) 86 | } 87 | } else { 88 | l.Printf("MAIN: connector %s doesn't have spoolDir defined\n", con) 89 | } 90 | } 91 | 92 | l.Printf("MAIN: Waiting for routines to finish...\n") 93 | wg.Wait() 94 | l.Printf("MAIN: All routines finished, exiting main\n") 95 | 96 | l.Println("======================= Gobler end =============================================") 97 | } 98 | -------------------------------------------------------------------------------- /cmd/gobler/gobler.toml: -------------------------------------------------------------------------------- 1 | # 2 | # gobler annotated configuration file 3 | # 4 | # note: configuration file syntax is the same as from goslmailer, only the differences are commented here 5 | # 6 | 7 | logfile = "/tmp/goslmailer.log" 8 | defaultconnector = "msteams" 9 | 10 | [binpaths] 11 | sacct="/usr/bin/sacct" 12 | sstat="/usr/bin/sstat" 13 | 14 | [connectors.msteams] 15 | name = "gobler.conf" 16 | renderToFile = "no" 17 | spoolDir = "/tmp" 18 | adaptiveCardTemplate = "/etc/slurm/adaptive_card_template.json" 19 | url = "https://msteams/webhook/url" 20 | useLookup = "GECOS" 21 | # 22 | # Gobler specific configuration, set this in every connector config that supports, and is used with spooling enabled. 23 | # 24 | # monitor period, now often will `monitor` goroutine scan the spoolDir for new gobs (if "ms" is omitted, default T is in seconds) 25 | monitorT = "20000ms" 26 | # 27 | # picker period, now often will `picker` goroutine pick the next message to send to `sender` 28 | # Tune depending on the endpoint throughput capability. 29 | # note: sender picks up and tries to send the message immediately, so this determines how fast are the messages sent out. 30 | pickerT = "5000ms" 31 | # 32 | # picker-sender buffer length 33 | # How many undelivered messages can the `picker` send to `sender` without blocking. 34 | # Tune depending on the endpoint throughput capability. 35 | psBufLen = "3" 36 | # 37 | # number of `sender` goroutines 38 | # Multiple `sender`s can wait for messages from the `picker`. 39 | # Tune depending on the endpoint throughput capability. 40 | numSenders = "4" 41 | # 42 | # maximum messages per user 43 | # On receipt of new messages from the `monitor`, `picker` will scan the list and delete latest messages above this limit. 44 | # The number of deleted messages is recorded in the messagepack structure and can be referenced like this: `{{ .Job.PrunedMessageCount }}` 45 | # in the template to notify user that some of his messages were deleted. 46 | # Unlimited if this is set to 0. 47 | maxMsgPU = "5" 48 | 49 | [connectors.telegram] 50 | name = "telegram bot connector" 51 | url = "" 52 | token = "PasteHereTelegramBotToken" 53 | renderToFile = "no" 54 | spoolDir = "/tmp/telegramgobs" 55 | messageTemplate = "/etc/slurm/telegramTemplate.md" 56 | useLookup = "no" 57 | format = "HTML" 58 | monitorT = "5000ms" 59 | pickerT = "1000ms" 60 | psBufLen = "3" 61 | numSenders = "3" 62 | maxMsgPU = "6" 63 | 64 | [connectors.textfile] 65 | path = "/tmp" 66 | -------------------------------------------------------------------------------- /cmd/gobler/monitor.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "log" 6 | "sync" 7 | "time" 8 | 9 | "github.com/CLIP-HPC/goslmailer/internal/spool" 10 | ) 11 | 12 | type monitor struct { 13 | connector string 14 | spoolDir string 15 | monitorT time.Duration 16 | } 17 | 18 | // NewMonitor creates and initializes a new monitor object with: 19 | // c connector name, s spooldir location (from config file), t polling time period 20 | func NewMonitor(c string, s string, t time.Duration) (*monitor, error) { 21 | var m monitor 22 | 23 | if s != "" { 24 | m.connector = c 25 | m.spoolDir = s 26 | m.monitorT = t 27 | } else { 28 | return nil, errors.New("no spooldir, aborting") 29 | } 30 | 31 | return &m, nil 32 | } 33 | 34 | func (m *monitor) MonitorWorker(ch chan<- *spool.SpooledGobs, wg *sync.WaitGroup, l *log.Logger) error { 35 | 36 | var oldList, newList, newFiles *spool.SpooledGobs 37 | oldList = &spool.SpooledGobs{} 38 | newFiles = &spool.SpooledGobs{} 39 | 40 | defer wg.Done() 41 | ticker := time.Tick(m.monitorT) 42 | 43 | l.Println("======================= Monitor start ==========================================") 44 | l.Printf("MONITOR %s Starting\n", m.connector) 45 | sp, err := spool.NewSpool(m.spoolDir) 46 | if err != nil { 47 | return err 48 | } 49 | for { 50 | lock.Lock() 51 | // get new list of files 52 | newList, err = sp.GetSpooledGobsList(l) 53 | lock.Unlock() 54 | if err != nil { 55 | l.Printf("MONITOR %s: Failed on Getspooledgobslist(), error %s\n", m.connector, err) 56 | return err 57 | } 58 | // iterate over newlist and each file that doesn't exist in old, put into newfiles to be sent to the Picker 59 | for k, v := range *newList { 60 | if _, ok := (*oldList)[k]; !ok { 61 | // doesn't 62 | (*newFiles)[k] = v 63 | } 64 | } 65 | 66 | // send new-found files 67 | l.Printf("MONITOR %s: Sent %d gobs\n", m.connector, len(*newFiles)) 68 | ch <- newFiles 69 | oldList = newList 70 | newFiles = &spool.SpooledGobs{} 71 | 72 | <-ticker 73 | } 74 | l.Printf("Exiting monitor routine %s\n", m.spoolDir) 75 | l.Println("======================= Monitor end ============================================") 76 | return nil 77 | } 78 | -------------------------------------------------------------------------------- /cmd/gobler/sender.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "sync" 7 | 8 | "github.com/CLIP-HPC/goslmailer/internal/connectors" 9 | "github.com/CLIP-HPC/goslmailer/internal/spool" 10 | ) 11 | 12 | type sender struct { 13 | connector string 14 | spoolDir string 15 | conn connectors.Connector 16 | num int 17 | } 18 | 19 | func NewSender(c string, sd string, cons *connectors.Connectors, num int) (*sender, error) { 20 | var s sender 21 | 22 | s.connector = c // connector name 23 | s.spoolDir = sd // connector spooldir 24 | s.conn = (*cons)[c] // connector interface 25 | s.num = num // sender number 26 | 27 | return &s, nil 28 | } 29 | 30 | func (s *sender) SenderWorker(psCh <-chan psGob, psfCh chan<- psGob, wg *sync.WaitGroup, l *log.Logger) error { 31 | 32 | defer wg.Done() 33 | 34 | l.Println("======================= Sender start ===========================================") 35 | for { 36 | msg := <-psCh 37 | l.Printf("SENDER %s#%d: received %#v\n", s.connector, s.num, msg) 38 | 39 | // fetch gob mp 40 | // todo: error handling here needs more attention! 41 | sd, err := spool.NewSpool(s.spoolDir) 42 | if err != nil { 43 | // todo: this should not happen, in this case, do we ignore this gob? send it back? return error? 44 | l.Printf("SENDER %s#%d: newspool returned error %s\n", s.connector, s.num, err) 45 | // ignore... 46 | continue 47 | } 48 | 49 | mp, err := sd.FetchGob(msg.fileGob.Filename, l) 50 | if err != nil { 51 | // todo: this should not happen, in this case, do we ignore this gob? send it back? return error? 52 | l.Printf("SENDER %s#%d: fetchgob returned error %s\n", s.connector, s.num, err) 53 | // ignore... 54 | continue 55 | } 56 | 57 | // modify mp: inject the deleted message count from picker 58 | // todo: this causes panic if malformed message reaches this point, example empty MessagePack saved to disk, no JobContext inside (nil) 59 | // see: BUGS.md 60 | mp.JobContext.PrunedMessageCount = msg.deletedCount 61 | 62 | // useSpool == false when called from here, gob is already on disk! 63 | err = s.conn.SendMessage(mp, false, l) 64 | if err != nil { 65 | l.Printf("SENDER %s#%d: connector.sendmessage() returned error %s\n", s.connector, s.num, err) 66 | // failed, send it back to picker 67 | psfCh <- msg 68 | } else { 69 | // Send succeeded, delete gob 70 | lock.Lock() 71 | err = os.Remove(s.spoolDir + "/" + msg.fileGob.Filename) 72 | if err != nil { 73 | l.Printf("SENDER %s#%d: error removing file %s\n", s.connector, s.num, err) 74 | // todo: unlock and return error? or leave this logged and proceed? 75 | } else { 76 | l.Printf("SENDER %s#%d: Gob deleted\n", s.connector, s.num) 77 | } 78 | lock.Unlock() 79 | } 80 | } 81 | l.Println("======================= Sender end =============================================") 82 | return nil 83 | } 84 | -------------------------------------------------------------------------------- /cmd/goslmailer/goslmailer.conf: -------------------------------------------------------------------------------- 1 | { 2 | "logfile": "/tmp/goslmailer.log", 3 | "debugconfig": true, 4 | "binpaths": { 5 | "sacct":"/usr/bin/sacct", 6 | "sstat":"/usr/bin/sstat" 7 | }, 8 | "defaultconnector": "msteams", 9 | "connectors": { 10 | "msteams": { 11 | "name": "dev channel", 12 | "renderToFile": "yes", 13 | "spoolDir": "/tmp", 14 | "url": "https://msteams/webhook/url", 15 | "adaptiveCardTemplate": "/path/template.json", 16 | "useLookup": "GECOS" 17 | }, 18 | "mailto": { 19 | "name": "original slurm mail functionality,extended.", 20 | "mailCmd": "/usr/bin/mutt", 21 | "mailCmdParams": "-s \"Job {{ .SlurmEnvironment.SLURM_JOB_ID }} ({{ .SlurmEnvironment.SLURM_JOB_NAME }}) {{ .SlurmEnvironment.SLURM_JOB_MAIL_TYPE }}\"", 22 | "mailTemplate": "/etc/slurm/mailTemplate.tmpl", 23 | "mailFormat": "HTML", 24 | "allowList": ".+@(imp|imba.oeaw|gmi.oeaw).ac.at", 25 | }, 26 | "telegram": { 27 | "name": "telegram bot", 28 | "url": "", 29 | "token": "PasteHereTelegramBotToken", 30 | "renderToFile": "no", 31 | "spoolDir": "/tmp/telegramgobs", 32 | "messageTemplate": "/etc/slurm/telegramTemplate.md", 33 | "useLookup": "no", 34 | "format": "MarkdownV2" 35 | }, 36 | "discord": { 37 | "name": "DiscoSlurmBot", 38 | "triggerString": "showmeslurm", 39 | "token": "PasteBotTokenHere", 40 | "messageTemplate": "/path/to/template.md" 41 | }, 42 | "mattermost": { 43 | "name": "MatTheSlurmBot", 44 | "serverUrl": "https://someSpaceName.cloud.mattermost.com", 45 | "wsUrl": "wss://someSpaceName.cloud.mattermost.com", 46 | "token": "PasteBotTokenHere", 47 | "triggerString": "showmeslurm", 48 | "messageTemplate" : "/path/to/mattermostTemplate.md" 49 | }, 50 | "matrix": { 51 | "username": "@myuser:matrix.org", 52 | "token": "syt_dGRpZG9ib3QXXXXXXXEyQMBEmvOVp_10Jm93", 53 | "homeserver": "matrix.org", 54 | "template": "/path/to/matrix_template.md" 55 | }, 56 | "slack": { 57 | "token": "PasteSlackBotTokenHere", 58 | "messageTemplate": "/path/to/template.md", 59 | "renderToFile": "spool", 60 | "spoolDir": "/tmp" 61 | } 62 | "textfile": { 63 | "path": "/tmp" 64 | } 65 | }, 66 | "qosmap": { 67 | "RAPID": 3600, 68 | "SHORT": 28800, 69 | "MEDIUM": 172800, 70 | "LONG": 1209600 71 | }, 72 | } 73 | -------------------------------------------------------------------------------- /cmd/goslmailer/goslmailer.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "os" 6 | 7 | _ "github.com/CLIP-HPC/goslmailer/connectors/discord" 8 | _ "github.com/CLIP-HPC/goslmailer/connectors/mailto" 9 | _ "github.com/CLIP-HPC/goslmailer/connectors/matrix" 10 | _ "github.com/CLIP-HPC/goslmailer/connectors/mattermost" 11 | _ "github.com/CLIP-HPC/goslmailer/connectors/msteams" 12 | _ "github.com/CLIP-HPC/goslmailer/connectors/slack" 13 | _ "github.com/CLIP-HPC/goslmailer/connectors/telegram" 14 | "github.com/CLIP-HPC/goslmailer/internal/config" 15 | "github.com/CLIP-HPC/goslmailer/internal/connectors" 16 | "github.com/CLIP-HPC/goslmailer/internal/logger" 17 | "github.com/CLIP-HPC/goslmailer/internal/message" 18 | "github.com/CLIP-HPC/goslmailer/internal/slurmjob" 19 | "github.com/CLIP-HPC/goslmailer/internal/version" 20 | ) 21 | 22 | const goslmailer_config_file = "/etc/slurm/goslmailer.conf" 23 | 24 | func main() { 25 | 26 | var ( 27 | ic invocationContext 28 | job slurmjob.JobContext 29 | ) 30 | 31 | // get ENV var GOSLMAILERCONF if it's set, if not, use default /etc... 32 | cf, pres := os.LookupEnv("GOSLMAILER_CONF") 33 | if !pres || cf == "" { 34 | cf = goslmailer_config_file 35 | } 36 | 37 | // read config file 38 | cfg := config.NewConfigContainer() 39 | err := cfg.GetConfig(cf) 40 | if err != nil { 41 | log.Fatalf("ERROR: getConfig() failed: %s\n", err) 42 | } 43 | 44 | // setup logger 45 | l, err := logger.SetupLogger(cfg.Logfile, "goslmailer") 46 | if err != nil { 47 | l.Fatalf("setuplogger(%s) failed with: %q\n", cfg.Logfile, err) 48 | } 49 | 50 | l.Println("======================== START OF RUN ==========================================") 51 | 52 | version.DumpVersion(l) 53 | 54 | cfg.DumpConfig(l) 55 | 56 | // get '-s "subject" userid' command line parameters with which we're called 57 | ic.getCMDLine() 58 | ic.dumpCMDLine(l) 59 | 60 | // parse CmdParams and generate a list of {scheme, target} receivers (e.g. [ {skype, skypeid}, {msteams, msteamsid}, ...]) 61 | ic.generateReceivers(cfg.DefaultConnector, l) 62 | ic.dumpReceivers(l) 63 | 64 | // get SLURM_* environment variables 65 | job.GetSlurmEnvVars() 66 | 67 | // get job statistics based on the SLURM_JOB_ID from slurmEnv struct 68 | // only if job is END or FAIL(?) 69 | err = job.GetJobStats(ic.CmdParams.Subject, cfg.Binpaths, l) 70 | if err != nil { 71 | l.Fatalf("Unable to retrieve job stats. Error: %v", err) 72 | } 73 | 74 | // generate hints based on SlurmEnv and JobStats (e.g. "too much memory requested" or "walltime << requested queue") 75 | // only if job is END or fail(?) 76 | job.GenerateHints(cfg.QosMap) 77 | 78 | // populate map with configured referenced connectors 79 | connectors.ConMap.PopulateConnectors(cfg, l) 80 | 81 | // Iterate over 'Receivers' map and for each call the connector.SendMessage() (if the receiver scheme is configured in conf file AND has an object in connectors map) 82 | if ic.Receivers == nil { 83 | l.Fatalln("No receivers defined. Aborting!") 84 | } 85 | // here we loop through requested receivers and invoke SendMessage() 86 | for _, v := range ic.Receivers { 87 | mp, err := message.NewMsgPack(v.scheme, v.target, &job) 88 | if err != nil { 89 | l.Printf("ERROR in message.NewMsgPack(%s): %q\n", v.scheme, err) 90 | } 91 | con, ok := connectors.ConMap[v.scheme] 92 | if !ok { 93 | l.Printf("%s connector is not initialized for target %s. Ignoring.\n", v.scheme, v.target) 94 | } else { 95 | // useSpool == true when called from here, for connectors that use this capability 96 | err := con.SendMessage(mp, true, l) 97 | if err != nil { 98 | l.Printf("ERROR in %s.SendMessage(): %q\n", v.scheme, err) 99 | } 100 | } 101 | } 102 | 103 | l.Println("========================== END OF RUN ==========================================") 104 | } 105 | -------------------------------------------------------------------------------- /cmd/goslmailer/goslmailer.toml.annotated_example: -------------------------------------------------------------------------------- 1 | # 2 | # Annotated goslmailer configuration file 3 | # 4 | 5 | # if specified; append logs to this file; else; dump to stderr 6 | logfile = "/tmp/goslmailer.log" 7 | 8 | # if specified and true ; goslmailer and gobler will dump configuration to log; else; they won't 9 | debugconfig = true 10 | 11 | # default connector to be used for message delivery for receivers without full 'connector:user' specification 12 | defaultconnector = "msteams" 13 | 14 | # paths to slurm binaries (optional, will default to these if not specified) 15 | [binpaths] 16 | sacct = "/usr/bin/sacct" 17 | sstat = "/usr/bin/sstat" 18 | 19 | # map of connector configurations, remove any connectors that you don't wish to expose to the users 20 | [connectors] 21 | 22 | # each connector has it's own map of config attributes: 23 | [connectors.msteams] 24 | name = "dev channel" # unused 25 | renderToFile = "yes" # debug render of message to local file instead of sending ("yes" - render to file, "no" - send, "spool" - spool for gobler) 26 | spoolDir = "/tmp" # dir to use for spooling, remove if spooling not used 27 | url = "https://msteams/webhook/url" # ms teams webhook url 28 | adaptiveCardTemplate = "/path/template.json" # full path to adaptive card template file 29 | useLookup = "GECOS" # which function from lookup package the connector uses to map cmdline userid to end-system userid 30 | # available lookups ("GECOS", "none") 31 | 32 | [connectors.mailto] 33 | name = "original mail functionality, extended." # unused 34 | mailCmd = "/usr/bin/mutt" # mail client to use 35 | # mailCmdParams: templateable command line to be passed to mailCmd 36 | mailCmdParams = "-s \"Job {{ .SlurmEnvironment.SLURM_JOB_ID }} ({{ .SlurmEnvironment.SLURM_JOB_NAME }}) {{ .SlurmEnvironment.SLURM_JOB_MAIL_TYPE }}\"" 37 | mailTemplate = "/etc/slurm/mailTemplate.tmpl" # message body template 38 | mailFormat = "HTML" # `HTML` or `text` (can use telegram html in templates/) 39 | allowList = ".+@(imp|imba.oeaw|gmi.oeaw).ac.at" # golang re2 expression : https://github.com/google/re2/wiki/Syntax 40 | 41 | [connectors.telegram] 42 | name = "telegram bot" # bot uses this in hello message "Welcome to "name"" 43 | url = "" # unused, leave empty, might change in the future 44 | token = "PasteHereTelegramBotToken" # token obtained when creating the bot with botfather 45 | renderToFile = "no" # debug render of message to local file instead of sending ("yes" - render to file, "no" - send, "spool" - spool for gobler) 46 | spoolDir = "/tmp/telegramgobs" # where to spool 47 | messageTemplate = "/etc/slurm/telegramTemplate.md" # template file 48 | useLookup = "no" # remove if not using custom lookup functions 49 | format = "MarkdownV2" # options: "MarkdownV2", "HTML" ,depending on the template used 50 | 51 | [connectors.discord] 52 | name = "DiscoSlurmBot" # name that is used in the bot welcome message 53 | triggerString = "showmeslurm" # string (in channel or DM) that triggers the bot to respond with an instructional DM to the user 54 | token = "PasteBotTokenHere" # place to put the bot token 55 | messageTemplate = "/path/to/template.md" # template file to use 56 | 57 | [connectors.mattermost] 58 | name = "MatTheSlurmBot", # mandatory, name used in the bot welcome message 59 | serverUrl = "https://someSpaceName.cloud.mattermost.com", # mandatory, REST url which client uses to send messages 60 | wsUrl = "wss://someSpaceName.cloud.mattermost.com", # mandatory, websocket event delivery system endpoint 61 | token = "PasteBotTokenHere", # mandatory, bot access token 62 | triggerString = "showmeslurm", # string on which bot reacts with the --mail-user message 63 | messageTemplate = "/path/to/mattermostTemplate.md" # markdown template (example in ./templates/mattermostTemplate.md) 64 | 65 | [connectors.matrix] 66 | username = "@myuser:matrix.org" 67 | token = "syt_dGRpZG9ib3QXXXXXXXEyQMBEmvOVp_10Jm93" 68 | homeserver = "matrix.org" 69 | template = "/path/to/matrix_template.md" 70 | 71 | # fictitious "textfile" connector, package code for it doesn't exist, implementation left as the exercise for the reader 72 | [connectors.textfile] 73 | path = "/tmp" 74 | 75 | # map of your sites configured QoSes, with their timelimits (seconds), used for hint generation 76 | [qosmap] 77 | RAPID = 3600 78 | SHORT = 28800 79 | MEDIUM = 172800 80 | LONG = 1209600 -------------------------------------------------------------------------------- /cmd/goslmailer/invocation_context.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "log" 6 | "strings" 7 | ) 8 | 9 | type CmdParams struct { 10 | Subject string 11 | Other []string 12 | } 13 | 14 | type Receivers []struct { 15 | scheme string 16 | target string 17 | } 18 | 19 | // Holder of: 20 | // 1. command line parameters (via log package) 21 | // 2. receivers: from parsed command line (1.) comming from: --mail-user userx,mailto:usery@domain,skype:userid via (*invocationContext) generateReceivers() method 22 | type invocationContext struct { 23 | CmdParams 24 | Receivers 25 | } 26 | 27 | func (ic *invocationContext) getCMDLine() { 28 | flag.StringVar(&ic.CmdParams.Subject, "s", "Default Blank Subject", "e-mail subject") 29 | flag.Parse() 30 | ic.CmdParams.Other = flag.Args() 31 | } 32 | 33 | func (ic *invocationContext) dumpCMDLine(l *log.Logger) { 34 | l.Println("Parsing CMDLine:") 35 | l.Printf("CMD subject: %#v\n", ic.CmdParams.Subject) 36 | l.Printf("CMD others: %#v\n", ic.CmdParams.Other) 37 | l.Println("--------------------------------------------------------------------------------") 38 | } 39 | 40 | func (ic *invocationContext) dumpReceivers(l *log.Logger) { 41 | l.Println("DUMP RECEIVERS:") 42 | l.Printf("Receivers: %#v\n", ic.Receivers) 43 | l.Printf("invocationContext: %#v\n", ic) 44 | l.Println("--------------------------------------------------------------------------------") 45 | } 46 | 47 | // generateReceivers populates ic.Receivers (scheme:target) from ic.CmdParams.Other using defCon (defaultconnector) config parameter for undefined schemes 48 | func (ic *invocationContext) generateReceivers(defCon string, l *log.Logger) { 49 | for _, v := range ic.CmdParams.Other { 50 | targets := strings.Split(v, ",") 51 | for i, t := range targets { 52 | targetsSplit := strings.SplitN(t, ":", 2) 53 | l.Printf("generateReceivers: target %d = %#v\n", i, targetsSplit) 54 | // todo: needs rework to accept multiple targets in a single receiver; e.g. mailto:x;y;z 55 | // also " " cornercase is not handled; add to tests as well 56 | switch len(targetsSplit) { 57 | case 1: 58 | if targetsSplit[0] != "" { 59 | ic.Receivers = append(ic.Receivers, struct { 60 | scheme string 61 | target string 62 | }{ 63 | // receivers with unspecified connector scheme get global config key "DefaultConnector" set here: 64 | scheme: defCon, 65 | target: targetsSplit[0], 66 | }) 67 | } else { 68 | l.Printf("generateReceivers: target %d = %#v is an empty receiver, ignoring!\n", i, targetsSplit) 69 | } 70 | case 2: 71 | //Handle "connector:::" (2..n of ":" ) as if the target was empty 72 | if strings.Count(targetsSplit[1], ":") == len(targetsSplit[1]) { 73 | targetsSplit[1] = "" 74 | } 75 | if targetsSplit[1] != "" && targetsSplit[0] != "" { 76 | ic.Receivers = append(ic.Receivers, struct { 77 | scheme string 78 | target string 79 | }{ 80 | scheme: targetsSplit[0], 81 | target: targetsSplit[1], 82 | }) 83 | } else { 84 | l.Printf("generateReceivers: target %d = %#v is an empty receiver, ignoring!\n", i, targetsSplit) 85 | } 86 | default: 87 | l.Printf("generateReceivers: IGNORING! unrecognized target string: %s\n", t) 88 | } 89 | } 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /cmd/goslmailer/invocation_context_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "log" 6 | "reflect" 7 | "testing" 8 | ) 9 | 10 | type ic_test_case struct { 11 | name string 12 | defcon string 13 | invocationContext 14 | want Receivers 15 | } 16 | 17 | func TestGenerateReceivers(t *testing.T) { 18 | 19 | wr := bytes.Buffer{} 20 | l := log.New(&wr, "Testing: ", log.Llongfile) 21 | 22 | for _, v := range ic_tc { 23 | t.Run(v.name, func(t *testing.T) { 24 | // func (ic *invocationContext) generateReceivers(defCon string, l *log.Logger) { 25 | v.invocationContext.generateReceivers(v.defcon, l) 26 | t.Logf("\nTest : %s\nSent : %q, %v\nGot : %q\nExpect: %q\n", v.name, v.invocationContext.CmdParams, v.defcon, v.invocationContext.Receivers, v.want) 27 | if !reflect.DeepEqual(v.want, v.invocationContext.Receivers) { 28 | //t.Logf("\nTest : %s\nSent : %q, %v\nGot : %q\nExpect: %q\n", v.name, v.invocationContext.CmdParams, v.defcon, v.invocationContext.Receivers, v.want) 29 | t.Fail() 30 | } 31 | }) 32 | } 33 | 34 | } 35 | -------------------------------------------------------------------------------- /cmd/goslmailer/invocation_context_testcases_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | var ( 4 | ic_tc = []ic_test_case{ 5 | { 6 | name: "TestMissingOtherCmdArgs", 7 | defcon: "msteams", 8 | invocationContext: invocationContext{ 9 | CmdParams{ 10 | Subject: "Slurm subject line", 11 | Other: []string{}, 12 | }, 13 | Receivers{}, 14 | }, 15 | want: Receivers{}, 16 | }, 17 | { 18 | name: "TestEmptyStringArg", 19 | defcon: "msteams", 20 | invocationContext: invocationContext{ 21 | CmdParams{ 22 | Subject: "Slurm subject line", 23 | Other: []string{ 24 | "", 25 | }, 26 | }, 27 | Receivers{}, 28 | }, 29 | want: Receivers{}, 30 | }, 31 | { 32 | name: "TestLongSingleArg", 33 | defcon: "conX", 34 | invocationContext: invocationContext{ 35 | CmdParams{ 36 | Subject: "Slurm subject line", 37 | Other: []string{ 38 | ",conX,conX:pj,msteams:pja,:::xxx,,msteams::,petarj,matrix:!channelid:server.org,:xxx", 39 | }, 40 | }, 41 | Receivers{}, 42 | }, 43 | want: Receivers{ 44 | { 45 | scheme: "conX", 46 | target: "conX", 47 | }, 48 | { 49 | scheme: "conX", 50 | target: "pj", 51 | }, 52 | { 53 | scheme: "msteams", 54 | target: "pja", 55 | }, 56 | { 57 | scheme: "conX", 58 | target: "petarj", 59 | }, 60 | { 61 | scheme: "matrix", 62 | target: "!channelid:server.org", 63 | }, 64 | }, 65 | }, 66 | { 67 | name: "TestMultipleArgs", 68 | defcon: "mailto", 69 | invocationContext: invocationContext{ 70 | CmdParams{ 71 | Subject: "Slurm subject line", 72 | Other: []string{ 73 | "", 74 | "msteams:::", 75 | "mailto:pja@bla.bla,,,", 76 | "pja@bla.bla", 77 | "msteams:pja", 78 | ":::pja", 79 | "matrix:!channelid:server.org", 80 | "::pja", 81 | }, 82 | }, 83 | Receivers{}, 84 | }, 85 | want: Receivers{ 86 | { 87 | scheme: "mailto", 88 | target: "pja@bla.bla", 89 | }, 90 | { 91 | scheme: "mailto", 92 | target: "pja@bla.bla", 93 | }, 94 | { 95 | scheme: "msteams", 96 | target: "pja", 97 | }, 98 | { 99 | scheme: "matrix", 100 | target: "!channelid:server.org", 101 | }, 102 | }, 103 | }, 104 | } 105 | ) 106 | -------------------------------------------------------------------------------- /cmd/goslmailer/telegramTemplate.md: -------------------------------------------------------------------------------- 1 | {{ .Job.MailSubject }} {{ .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE }} 2 | `----------------------------------------` 3 | {{ if ne .Job.PrunedMessageCount 0 }} 4 | *WARNING: Rate limiting triggered. {{ .Job.PrunedMessageCount }} additonal notificiations have been suppressed* 5 | `----------------------------------------` 6 | {{ end }} 7 | ``` 8 | Job Name : {{ .Job.SlurmEnvironment.SLURM_JOB_NAME }} 9 | Job ID : {{ .Job.SlurmEnvironment.SLURM_JOB_ID }} 10 | User : {{ .Job.SlurmEnvironment.SLURM_JOB_USER }} 11 | Partition : {{ .Job.SlurmEnvironment.SLURM_JOB_PARTITION }} 12 | Nodes Used : {{ .Job.SlurmEnvironment.SLURM_JOB_NODELIST }} 13 | Cores : {{ .Job.JobStats.Ncpus }} 14 | Job state : {{ .Job.SlurmEnvironment.SLURM_JOB_STATE }} 15 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }} 16 | Exit Code : {{ .Job.SlurmEnvironment.SLURM_JOB_EXIT_CODE_MAX }} 17 | {{- end }} 18 | Submit : {{ .Job.JobStats.Submittime }} 19 | Start : {{ .Job.JobStats.Starttime }} 20 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }} 21 | End : {{ .Job.JobStats.Endtime }} 22 | {{- end }} 23 | Reserved Walltime : {{ .Job.JobStats.WalltimeStr }} 24 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE "Began" }} 25 | Used Walltime : {{ .Job.SlurmEnvironment.SLURM_JOB_RUN_TIME }} 26 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }} 27 | Used CPU time : {{ .Job.JobStats.TotalCPUStr }} 28 | % User (Computation) : {{ printf "%5.2f%%" .Job.JobStats.CalcUserComputePercentage }} 29 | % System (I/O) : {{ printf "%5.2f%%" .Job.JobStats.CalcSystemComputePercentage }} 30 | {{- end }} 31 | {{- end }} 32 | Memory Requested : {{ .Job.JobStats.ReqMem | humanBytes }} 33 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE "Began" }} 34 | Max Memory Used : {{ .Job.JobStats.MaxRSS | humanBytes }} 35 | Max Disk Write : {{ .Job.JobStats.MaxDiskWrite | humanBytes }} 36 | Max Disk Read : {{ .Job.JobStats.MaxDiskRead | humanBytes }} 37 | {{- end }} 38 | ``` 39 | `----------------------------------------` 40 | ``` 41 | {{- range .Job.Hints }} 42 | {{ . }} 43 | {{- end }} 44 | ``` 45 | `----------------------------------------` 46 | -------------------------------------------------------------------------------- /cmd/mattermostbot/mattermost.conf: -------------------------------------------------------------------------------- 1 | { 2 | "logFile": "/tmp/mm.log", 3 | "connectors": { 4 | "mattermost": { 5 | "name": "MatTheSlurmBot", 6 | "serverUrl": "https://someSpaceName.cloud.mattermost.com", 7 | "wsUrl": "wss://someSpaceName.cloud.mattermost.com", 8 | "token": "PasteBotTokenHere", 9 | "triggerString": "showmeslurm" 10 | } 11 | } 12 | } 13 | 14 | -------------------------------------------------------------------------------- /cmd/mattermostbot/mattermostbot.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "strings" 8 | 9 | "github.com/CLIP-HPC/goslmailer/internal/cmdline" 10 | "github.com/CLIP-HPC/goslmailer/internal/config" 11 | "github.com/CLIP-HPC/goslmailer/internal/logger" 12 | "github.com/CLIP-HPC/goslmailer/internal/version" 13 | "github.com/mattermost/mattermost-server/v5/model" 14 | ) 15 | 16 | const app = "mattermost" 17 | 18 | func TestServer(c *model.Client4, l *log.Logger) error { 19 | if props, resp := c.GetOldClientConfig(""); resp.Error != nil { 20 | // server ping failed! 21 | return resp.Error 22 | } else { 23 | l.Printf("Server detected and is running version " + props["Version"]) 24 | l.Printf("Server returned: %#v\n", props) 25 | return nil 26 | } 27 | } 28 | 29 | func main() { 30 | 31 | var ( 32 | l *log.Logger 33 | err error 34 | ) 35 | 36 | // parse command line params 37 | cmd, err := cmdline.NewCmdArgs(app) 38 | if err != nil { 39 | log.Fatalf("ERROR: parse command line failed with: %q\n", err) 40 | } 41 | 42 | if *(cmd.Version) { 43 | l = log.New(os.Stderr, app+":", log.Lshortfile|log.Ldate|log.Lmicroseconds) 44 | version.DumpVersion(l) 45 | os.Exit(0) 46 | } 47 | 48 | // read config file 49 | cfg := config.NewConfigContainer() 50 | err = cfg.GetConfig(*(cmd.CfgFile)) 51 | if err != nil { 52 | log.Fatalf("ERROR: getConfig() failed: %s\n", err) 53 | } 54 | 55 | // setup logger 56 | l, err = logger.SetupLogger(cfg.Logfile, app) 57 | if err != nil { 58 | log.Fatalf("setuplogger(%s) failed with: %q\n", cfg.Logfile, err) 59 | } 60 | 61 | l.Println("==================== mattermostbot start =======================================") 62 | 63 | version.DumpVersion(l) 64 | 65 | if _, ok := cfg.Connectors[app]["triggerString"]; !ok { 66 | l.Printf("Info: fetching config[connectors][mattermost][triggerString] failed: setting it to default: showmeslurm\n") 67 | cfg.Connectors[app]["triggerString"] = "showmeslurm" 68 | } 69 | if _, ok := cfg.Connectors[app]["token"]; !ok { 70 | l.Fatalf("MAIN: fetching config[connectors][mattermost][token] failed: %s\n", err) 71 | } 72 | 73 | l.Printf("Starting: %q\n", cfg.Connectors[app]["name"]) 74 | 75 | client := model.NewAPIv4Client(cfg.Connectors[app]["serverUrl"]) 76 | client.SetOAuthToken(cfg.Connectors[app]["token"]) 77 | l.Printf("\nclient: %#v\n", client) 78 | 79 | if e := TestServer(client, l); e != nil { 80 | l.Fatalf("Can not proceed, TestServer() returned error: %s\n", e) 81 | } 82 | 83 | // Main loop 84 | for { 85 | webSocketClient, err := model.NewWebSocketClient4(cfg.Connectors[app]["wsUrl"], client.AuthToken) 86 | if err != nil { 87 | l.Fatalf("ERROR: NewWebSocketClient(): %s\n", err) 88 | } 89 | l.Printf("Connected to WS: %s\n", cfg.Connectors[app]["wsUrl"]) 90 | l.Printf("Websocketclient: %#v\n\n", webSocketClient) 91 | webSocketClient.Listen() 92 | 93 | l.Printf("Listening to event channels...\n") 94 | for resp := range webSocketClient.EventChannel { 95 | //l.Printf("GOT WS EVENT: %#v\n", resp) 96 | data := resp.GetData() 97 | //l.Printf("Data: %#v\n", data) 98 | l.Printf("Channel name: %s\n", data["channel_name"]) 99 | //l.Printf("JSON: %s\n", resp.ToJson()) 100 | 101 | x, ok := resp.GetData()["post"].(string) 102 | if !ok { 103 | l.Printf("Info: post == nil, skipping.\n") 104 | } else { 105 | post := model.PostFromJson(strings.NewReader(x)) 106 | //l.Printf("POST: %#v\n", post) 107 | l.Printf("POST.channelid: %s\n", post.ChannelId) 108 | l.Printf("POST.userid: %s\n", post.UserId) 109 | l.Printf("POST.message: %s\n", post.Message) 110 | if strings.Contains(post.Message, cfg.Connectors[app]["triggerString"]) { 111 | // Post something back! 112 | resPost := model.Post{} 113 | resPost.ChannelId = post.ChannelId 114 | resPost.Message = fmt.Sprintf("Hello!\nI'm %s!\nTo receive your job results here, use the following switch in your job scripts:\n--mail-user=mattermost:%s\n", cfg.Connectors[app]["name"], resPost.ChannelId) 115 | if _, r := client.CreatePost(&resPost); r.Error == nil { 116 | l.Printf("Post response to chan: %s successfull!\n", resPost.ChannelId) 117 | } else { 118 | l.Printf("Post response FAILED!\n") 119 | } 120 | } 121 | } 122 | } 123 | } 124 | 125 | l.Println("==================== mattermostbot end =========================================") 126 | } 127 | -------------------------------------------------------------------------------- /cmd/tgslurmbot/tgslurmbot.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "time" 8 | 9 | "github.com/CLIP-HPC/goslmailer/internal/cmdline" 10 | "github.com/CLIP-HPC/goslmailer/internal/config" 11 | "github.com/CLIP-HPC/goslmailer/internal/logger" 12 | "github.com/CLIP-HPC/goslmailer/internal/version" 13 | tele "gopkg.in/telebot.v3" 14 | ) 15 | 16 | const app = "tgslurmbot" 17 | 18 | func main() { 19 | 20 | var ( 21 | l *log.Logger 22 | err error 23 | ) 24 | 25 | // parse command line params 26 | cmd, err := cmdline.NewCmdArgs(app) 27 | if err != nil { 28 | log.Fatalf("ERROR: parse command line failed with: %q\n", err) 29 | } 30 | 31 | if *(cmd.Version) == true { 32 | l = log.New(os.Stderr, app+":", log.Lshortfile|log.Ldate|log.Lmicroseconds) 33 | version.DumpVersion(l) 34 | os.Exit(0) 35 | } 36 | 37 | // read config file 38 | cfg := config.NewConfigContainer() 39 | err = cfg.GetConfig(*(cmd.CfgFile)) 40 | if err != nil { 41 | log.Fatalf("ERROR: getConfig() failed: %s\n", err) 42 | } 43 | 44 | // setup logger 45 | l, err = logger.SetupLogger(cfg.Logfile, "tgslurmbot") 46 | if err != nil { 47 | log.Fatalf("setuplogger(%s) failed with: %q\n", cfg.Logfile, err) 48 | } 49 | 50 | l.Println("======================= tgslurmbot start =======================================") 51 | 52 | version.DumpVersion(l) 53 | 54 | if _, ok := cfg.Connectors["telegram"]["token"]; !ok { 55 | l.Fatalf("MAIN: fetching config[connectors][telegram][token] failed: %s\n", err) 56 | } 57 | 58 | l.Printf("Starting: %q\n", cfg.Connectors["telegram"]["name"]) 59 | 60 | pref := tele.Settings{ 61 | Token: cfg.Connectors["telegram"]["token"], 62 | Poller: &tele.LongPoller{Timeout: 10 * time.Second}, 63 | } 64 | 65 | b, err := tele.NewBot(pref) 66 | if err != nil { 67 | log.Fatal(err) 68 | return 69 | } 70 | 71 | b.Handle(tele.OnText, func(c tele.Context) error { 72 | return c.Send("Sorry, i'm not programmed to reply, to get the slurm command line switch to receive messages type: /start") 73 | }) 74 | 75 | b.Handle("/start", func(c tele.Context) error { 76 | // todo: logging of msg exchanges? 77 | str := fmt.Sprintf("Welcome to %s,\nplease use this switch in your job submission script in addition to '--mail-type' and i'll get back to you:\n '--mail-user=telegram:%d'", cfg.Connectors["telegram"]["name"], c.Chat().ID) 78 | return c.Send(str) 79 | }) 80 | 81 | b.Start() 82 | 83 | } 84 | -------------------------------------------------------------------------------- /connectors/connectorX/README.md: -------------------------------------------------------------------------------- 1 | # Example boilerplate code for a fully featured connector (spooling, rendering, lookups) 2 | 3 | connectorX is a sample boilerplate connector. 4 | 5 | Delivers job notification (rendered template) over a TCP connection to the specified address:port. 6 | It is an example of how to use all of the 'extra' capabilities: lookups, rendertofile and spooling. 7 | 8 | Files you'll need to get started: 9 | 10 | * connector data structure [connector_data.go](./connector_data.go) 11 | * connector code [connectorX.go](./connectorX.go) 12 | * example [config file](./goslmailer.conf) 13 | * example [template file](./conX.tmpl) 14 | 15 | ## Exercise for the reader: 16 | 17 | To make this connector work, add the missing blank import to [goslmailer main package](../../cmd/goslmailer/goslmailer.go) to trigger init(). 18 | 19 | Recompile and try it out. 20 | 21 | To verify it works: 22 | 23 | ``` 24 | [pja@red0 goslmailer]$ nc -lkv localhost 9999 25 | Ncat: Version 7.70 ( https://nmap.org/ncat ) 26 | Ncat: Listening on ::1:9999 27 | Ncat: Connection from ::1. 28 | Ncat: Connection from ::1:32848. 29 | Job Name : SendAllArrayJob 30 | Job ID : 1051492 31 | User : petar.jager 32 | Partition : c 33 | Nodes Used : stg-c2-0 34 | Cores : 4 35 | Job state : COMPLETED 36 | Exit Code : 0 37 | Submit : 2022-02-16T20:40:15 38 | Start : 2022-02-16T20:40:15 39 | End : 2022-02-17T01:11:04 40 | Res. Walltime : 08:00:00 41 | Used Walltime : 00:00:30 42 | Used CPU time : 01:57.511 43 | % User (Comp) : 86.81% 44 | % System (I/O) : 13.19% 45 | Memory Requested : 34 GB 46 | Max Memory Used : 1.1 GB 47 | Max Disk Write : 10 kB 48 | Max Disk Read : 136 kB 49 | TIP: Please consider lowering the ammount of requested memory in the future, your job has consumed less then half of the requested memory. 50 | TIP: Please consider lowering the amount of requested CPU cores in the future, your job has consumed less than half of requested CPU cores 51 | ^C 52 | ``` 53 | 54 | **Good Luck!** -------------------------------------------------------------------------------- /connectors/connectorX/conX.tmpl: -------------------------------------------------------------------------------- 1 | Job Name : {{ .Job.SlurmEnvironment.SLURM_JOB_NAME }} 2 | Job ID : {{ .Job.SlurmEnvironment.SLURM_JOB_ID }} 3 | User : {{ .Job.SlurmEnvironment.SLURM_JOB_USER }} 4 | Partition : {{ .Job.SlurmEnvironment.SLURM_JOB_PARTITION }} 5 | Nodes Used : {{ .Job.SlurmEnvironment.SLURM_JOB_NODELIST }} 6 | Cores : {{ .Job.JobStats.Ncpus }} 7 | Job state : {{ .Job.SlurmEnvironment.SLURM_JOB_STATE }} 8 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }} 9 | Exit Code : {{ .Job.SlurmEnvironment.SLURM_JOB_EXIT_CODE_MAX }} 10 | {{- end }} 11 | Submit : {{ .Job.JobStats.Submittime }} 12 | Start : {{ .Job.JobStats.Starttime }} 13 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }} 14 | End : {{ .Job.JobStats.Endtime }} 15 | {{- end }} 16 | Res. Walltime : {{ .Job.JobStats.WalltimeStr }} 17 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE "Began" }} 18 | Used Walltime : {{ .Job.SlurmEnvironment.SLURM_JOB_RUN_TIME }} 19 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }} 20 | Used CPU time : {{ .Job.JobStats.TotalCPUStr }} 21 | % User (Comp) : {{ printf "%5.2f%%" .Job.JobStats.CalcUserComputePercentage }} 22 | % System (I/O) : {{ printf "%5.2f%%" .Job.JobStats.CalcSystemComputePercentage }} 23 | {{- end }} 24 | {{- end }} 25 | Memory Requested : {{ .Job.JobStats.ReqMem | humanBytes }} 26 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE "Began" }} 27 | Max Memory Used : {{ .Job.JobStats.MaxRSS | humanBytes }} 28 | Max Disk Write : {{ .Job.JobStats.MaxDiskWrite | humanBytes }} 29 | Max Disk Read : {{ .Job.JobStats.MaxDiskRead | humanBytes }} 30 | {{- end }} 31 | {{- range .Job.Hints }} 32 | {{ . }} 33 | 34 | {{- end }} 35 | -------------------------------------------------------------------------------- /connectors/connectorX/connector_data.go: -------------------------------------------------------------------------------- 1 | /* 2 | connectorX is a sample boilerplate connector. It opens a TCP connection to the specified address:port and sends a rendered template string. 3 | It is an example of how to use all of the 'extra' capabilities: lookups, rendertofile and spooling. 4 | You can start from here and replace the parts of code needed/not needed to implement a new connector. 5 | 6 | MUST: Besides the connector package code, in order to "register" the connector, you MUST add its 'case block' to connectors package Connectors.PupulateConnectors(). 7 | HERE: ../../internal/connectors/connectors.go:25 8 | */ 9 | package connectorX 10 | 11 | import "log" 12 | 13 | // Name of the connector, used in the init() function to Register() it to connectors package. 14 | const connectorName = "discord" 15 | 16 | // Connector structure contains configuration data read in from config file with connectorX.NewConnector(). 17 | // Populate this structure with the configuration variables a new connector needs 18 | type Connector struct { 19 | name string // optional 20 | addr string // hostname/ip to connect to 21 | port string // port to connect to 22 | templateFile string // template file 23 | 24 | // these 3 are optional, if the connector won't use lookups and/or spooling capabilities (gobler service) but send directly from goslmailer they can be completely removed 25 | renderToFile string // renderToFile can be: "yes", "no", "spool" 26 | spoolDir string // where to place spooled messages 27 | useLookup string // string passed to lookup.ExtLookupUser() which determines which lookup function to call 28 | } 29 | 30 | // dumpConnector logs the connector configuration read from config file 31 | func (c *Connector) dumpConnector(l *log.Logger) { 32 | l.Printf("connectorX.dumpConnector: name: %q\n", c.name) 33 | l.Printf("connectorX.dumpConnector: addr: %q\n", c.addr) 34 | l.Printf("connectorX.dumpConnector: port: %q\n", c.port) 35 | l.Printf("connectorX.dumpConnector: renderToFile: %q\n", c.renderToFile) 36 | l.Printf("connectorX.dumpConnector: spoolDir: %q\n", c.spoolDir) 37 | l.Printf("connectorX.dumpConnector: useLookup: %q\n", c.useLookup) 38 | l.Println("................................................................................") 39 | 40 | } 41 | 42 | // Variable holding the connector configuration, mandatory 43 | var connConnectorX *Connector = new(Connector) 44 | -------------------------------------------------------------------------------- /connectors/connectorX/goslmailer.conf: -------------------------------------------------------------------------------- 1 | { 2 | "logfile": "", 3 | "defaultconnector": "msteams", 4 | "connectors": { 5 | "msteams": { 6 | "renderToFile": "spool", 7 | "spoolDir": "/tmp", 8 | "useLookup": "GECOS" 9 | }, 10 | "connectorX": { 11 | "name": "conX", 12 | "addr": "localhost", 13 | "port": "9999", 14 | "templateFile": "/tmp/conX.tmpl", 15 | "renderToFile": "yes", 16 | "spoolDir": "/tmp", 17 | "useLookup": "no" 18 | }, 19 | "mailto": { 20 | "name": "original slurm mail functionality, extended.", 21 | "mailCmd": "/etc/slurm/mail.sh", 22 | "mailCmdParams": "-s \"Job {{ .SlurmEnvironment.SLURM_JOB_ID }} ({{ .SlurmEnvironment.SLURM_JOB_NAME }}) {{ .SlurmEnvironment.SLURM_JOB_MAIL_TYPE }}\"", 23 | "mailTemplate": "/etc/slurm/mailTemplate.tmpl", 24 | "mailFormat": "HTML", 25 | "allowList": ".+@(imp|imba.oeaw|gmi.oeaw).ac.at", 26 | "blockList": "" 27 | }, 28 | "telegram": { 29 | "name": "CLIP SlurmBot", 30 | "url": "", 31 | "token": "PasteTokenHere", 32 | "renderToFile": "no", 33 | "spoolDir": "/tmp/telegramgobs", 34 | "messageTemplate": "/etc/slurm/telegramTemplate.html", 35 | "useLookup": "no", 36 | "format": "HTML" 37 | }, 38 | "textfile": { 39 | "path": "/tmp" 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /connectors/discord/connector_data.go: -------------------------------------------------------------------------------- 1 | package discord 2 | 3 | import "log" 4 | 5 | const connectorName = "discord" 6 | 7 | type Connector struct { 8 | name string 9 | triggerString string 10 | token string 11 | renderToFile string 12 | spoolDir string 13 | messageTemplate string 14 | useLookup string 15 | format string 16 | } 17 | 18 | func (c *Connector) dumpConnector(l *log.Logger) { 19 | l.Printf("discord.dumpConnector: name: %q\n", c.name) 20 | l.Printf("discord.dumpConnector: triggerstring: %q\n", c.triggerString) 21 | l.Printf("discord.dumpConnector: token: PRESENT\n") 22 | l.Printf("discord.dumpConnector: renderToFile: %q\n", c.renderToFile) 23 | l.Printf("discord.dumpConnector: spoolDir: %q\n", c.spoolDir) 24 | l.Printf("discord.dumpConnector: messageTemplate: %q\n", c.messageTemplate) 25 | l.Printf("discord.dumpConnector: useLookup: %q\n", c.useLookup) 26 | l.Printf("discord.dumpConnector: format: %q\n", c.format) 27 | l.Println("................................................................................") 28 | 29 | } 30 | 31 | var connDiscord *Connector = new(Connector) 32 | -------------------------------------------------------------------------------- /connectors/discord/discord.go: -------------------------------------------------------------------------------- 1 | package discord 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "io" 7 | "log" 8 | "os" 9 | "strconv" 10 | "time" 11 | 12 | "github.com/CLIP-HPC/goslmailer/internal/connectors" 13 | "github.com/CLIP-HPC/goslmailer/internal/lookup" 14 | "github.com/CLIP-HPC/goslmailer/internal/message" 15 | "github.com/CLIP-HPC/goslmailer/internal/renderer" 16 | "github.com/CLIP-HPC/goslmailer/internal/spool" 17 | "github.com/bwmarrin/discordgo" 18 | ) 19 | 20 | func init() { 21 | connectors.Register(connectorName, connDiscord) 22 | } 23 | 24 | func (c *Connector) ConfigConnector(conf map[string]string) error { 25 | 26 | // here we need some test if the connectors "minimal" configuration is satisfied, e.g. must have url at minimum 27 | c.name = conf["name"] 28 | c.triggerString = conf["triggerString"] 29 | c.token = conf["token"] 30 | c.renderToFile = conf["renderToFile"] 31 | c.spoolDir = conf["spoolDir"] 32 | c.messageTemplate = conf["messageTemplate"] 33 | c.useLookup = conf["useLookup"] 34 | c.format = conf["format"] 35 | 36 | switch { 37 | // token must be present 38 | case c.token == "": 39 | return errors.New("discord bot token must be defined, aborting") 40 | // if renderToFile=="no" or "spool" then spoolDir must not be empty 41 | case c.renderToFile == "no" || c.renderToFile == "spool": 42 | if c.spoolDir == "" { 43 | return errors.New("discord spoolDir must be defined, aborting") 44 | } 45 | 46 | } 47 | 48 | return nil 49 | } 50 | 51 | func (c *Connector) SendMessage(mp *message.MessagePack, useSpool bool, l *log.Logger) error { 52 | 53 | var ( 54 | e error = nil 55 | outFile string 56 | dts bool = false // DumpToSpool 57 | buffer bytes.Buffer 58 | ) 59 | 60 | l.Println("................... sendTodiscord START ........................................") 61 | 62 | // debug purposes 63 | c.dumpConnector(l) 64 | 65 | // spin up new bot 66 | // Create a new Discord session using the provided bot token. 67 | dg, err := discordgo.New("Bot " + c.token) 68 | if err != nil { 69 | l.Println("error creating Discord session,", err) 70 | return err 71 | } 72 | 73 | // lookup the end-system userid from the one sent by slurm (if lookup is set in "useLookup" config param) 74 | enduser, err := lookup.ExtLookupUser(mp.TargetUser, c.useLookup, l) 75 | if err != nil { 76 | l.Printf("Lookup failed for %s with %s\n", mp.TargetUser, err) 77 | return err 78 | } 79 | l.Printf("Looked up with %q %s -> %s\n", c.useLookup, mp.TargetUser, enduser) 80 | 81 | l.Printf("Sending to targetUserID: %s\n", enduser) 82 | 83 | // don't render template when using spool 84 | if c.renderToFile != "spool" { 85 | // buffer to place rendered json in 86 | buffer = bytes.Buffer{} 87 | //err := c.discordRenderTemplate(mp.JobContext, enduser, &buffer) 88 | err := renderer.RenderTemplate(c.messageTemplate, c.format, mp.JobContext, enduser, &buffer) 89 | if err != nil { 90 | return err 91 | } 92 | } 93 | 94 | // this can be: "yes", "spool", anythingelse 95 | switch c.renderToFile { 96 | case "yes": 97 | // render template to a file in working directory - debug purposes 98 | // prepare outfile name 99 | t := strconv.FormatInt(time.Now().UnixNano(), 10) 100 | l.Printf("Time: %s\n", t) 101 | outFile = "rendered-" + mp.JobContext.SLURM_JOB_ID + "-" + enduser + "-" + t + ".msg" 102 | res, err := io.ReadAll(&buffer) 103 | if err != nil { 104 | return err 105 | } 106 | err = os.WriteFile(outFile, res, 0644) 107 | if err != nil { 108 | return err 109 | } 110 | l.Printf("Send successful to file: %s\n", outFile) 111 | case "spool": 112 | // deposit GOB to spoolDir if allowed 113 | if useSpool { 114 | err := spool.DepositToSpool(c.spoolDir, mp) 115 | if err != nil { 116 | l.Printf("DepositToSpool Failed!\n") 117 | return err 118 | } 119 | } 120 | default: 121 | // Then we send the message through the channel we created. 122 | //_, err = dg.ChannelMessageSend(enduser, "A successfull message at "+time.Now().String()) 123 | _, err = dg.ChannelMessageSend(enduser, buffer.String()) 124 | if err != nil { 125 | l.Printf("error sending DM message: %s\n", err) 126 | dts = true 127 | } else { 128 | l.Printf("bot.Send() successful\n") 129 | dts = false 130 | } 131 | 132 | dg.Close() 133 | } 134 | 135 | // save mp to spool if we're allowed (not allowed when called from gobler, to prevent gobs multiplying) 136 | if dts && useSpool { 137 | l.Printf("Backing off to spool.\n") 138 | err := spool.DepositToSpool(c.spoolDir, mp) 139 | if err != nil { 140 | l.Printf("DepositToSpool Failed!\n") 141 | return err 142 | } 143 | } 144 | 145 | l.Println("................... sendTodiscord END ..........................................") 146 | 147 | return e 148 | } 149 | -------------------------------------------------------------------------------- /connectors/mailto/connector_data.go: -------------------------------------------------------------------------------- 1 | package mailto 2 | 3 | const connectorName = "mailto" 4 | 5 | type Connector struct { 6 | name string 7 | mailCmd string 8 | mailCmdParams string 9 | mailTemplate string 10 | mailFormat string 11 | allowList string 12 | blockList string 13 | } 14 | 15 | var connMailto *Connector = new(Connector) 16 | -------------------------------------------------------------------------------- /connectors/mailto/mailto.go: -------------------------------------------------------------------------------- 1 | package mailto 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "log" 7 | "os/exec" 8 | "regexp" 9 | "text/template" 10 | 11 | "github.com/CLIP-HPC/goslmailer/internal/connectors" 12 | "github.com/CLIP-HPC/goslmailer/internal/message" 13 | "github.com/CLIP-HPC/goslmailer/internal/renderer" 14 | ) 15 | 16 | func init() { 17 | connectors.Register(connectorName, connMailto) 18 | } 19 | 20 | func (c *Connector) ConfigConnector(conf map[string]string) error { 21 | c.name = conf["name"] 22 | c.mailCmd = conf["mailCmd"] 23 | c.mailCmdParams = conf["mailCmdParams"] 24 | c.mailTemplate = conf["mailTemplate"] 25 | c.mailFormat = conf["mailFormat"] 26 | c.allowList = conf["allowList"] 27 | c.blockList = conf["blockList"] 28 | 29 | // here we need some test if the connectors "minimal" configuration is satisfied, e.g. must have url at minimum 30 | // 31 | // if ok, return nil error 32 | return nil 33 | } 34 | 35 | func (c *Connector) SendMessage(mp *message.MessagePack, useSpool bool, l *log.Logger) error { 36 | var ( 37 | e error 38 | cmdparams = bytes.Buffer{} 39 | body = bytes.Buffer{} 40 | ) 41 | 42 | // render mail command line params (-s "mail subject" et.al.) 43 | tmpl := template.Must(template.New("cmdparams").Parse(c.mailCmdParams)) 44 | e = tmpl.Execute(&cmdparams, mp.JobContext) 45 | if e != nil { 46 | return e 47 | } 48 | 49 | // render mail body 50 | err := renderer.RenderTemplate(c.mailTemplate, c.mailFormat, mp.JobContext, mp.TargetUser, &body) 51 | if err != nil { 52 | return err 53 | } 54 | 55 | l.Printf("PARAMS: %#v\n", c) 56 | l.Printf("CMD: %q\n", string(cmdparams.Bytes())) 57 | l.Printf("BODY: %q\n", string(body.Bytes())) 58 | 59 | // todo: 60 | // - call lookup on targetUserId? 61 | // - implement useSpool mechanics for gobler 62 | 63 | // allowList 64 | re, err := regexp.Compile(c.allowList) 65 | if err != nil { 66 | return err 67 | } 68 | if !re.Match([]byte(mp.TargetUser)) { 69 | // not in allowList 70 | return errors.New("not allowed to send mail to user") 71 | } 72 | 73 | // send: 74 | cmd := exec.Command(c.mailCmd, cmdparams.String(), mp.TargetUser) 75 | l.Printf("ExecCMD: %q %q\n", cmd.Path, cmd.Args) 76 | cmd.Stdin = &body 77 | //cmd.Stdin = bytes.NewBuffer([]byte{0x04}) 78 | out, e := cmd.Output() 79 | if e != nil { 80 | return e 81 | } 82 | 83 | l.Println(string(out)) 84 | 85 | return e 86 | } 87 | -------------------------------------------------------------------------------- /connectors/matrix/connector_data.go: -------------------------------------------------------------------------------- 1 | package matrix 2 | 3 | const connectorName = "matrix" 4 | 5 | type Connector struct { 6 | username string 7 | token string 8 | homeserver string 9 | template string 10 | } 11 | 12 | var connMatrix *Connector = new(Connector) 13 | -------------------------------------------------------------------------------- /connectors/matrix/matrix.go: -------------------------------------------------------------------------------- 1 | package matrix 2 | 3 | import ( 4 | "bytes" 5 | "log" 6 | 7 | "github.com/CLIP-HPC/goslmailer/internal/connectors" 8 | "github.com/CLIP-HPC/goslmailer/internal/message" 9 | "github.com/CLIP-HPC/goslmailer/internal/renderer" 10 | 11 | "maunium.net/go/mautrix" 12 | "maunium.net/go/mautrix/event" 13 | "maunium.net/go/mautrix/format" 14 | "maunium.net/go/mautrix/id" 15 | ) 16 | 17 | func init() { 18 | connectors.Register(connectorName, connMatrix) 19 | } 20 | 21 | func (c *Connector) ConfigConnector(conf map[string]string) error { 22 | 23 | c.username = conf["username"] 24 | c.token = conf["token"] 25 | c.homeserver = conf["homeserver"] 26 | c.template = conf["template"] 27 | 28 | // here we need some test if the connectors "minimal" configuration is satisfied, e.g. must have url at minimum 29 | // 30 | // if ok, return nil error 31 | return nil 32 | } 33 | 34 | func (c *Connector) SendMessage(mp *message.MessagePack, useSpool bool, l *log.Logger) error { 35 | var ( 36 | err error = nil 37 | buffer bytes.Buffer 38 | roomid string = mp.TargetUser 39 | ) 40 | 41 | buffer = bytes.Buffer{} 42 | err = renderer.RenderTemplate(c.template, "text", mp.JobContext, roomid, &buffer) 43 | if err != nil { 44 | return err 45 | } 46 | 47 | l.Println("Logging into", c.homeserver, "as", c.username) 48 | client, err := mautrix.NewClient(c.homeserver, id.UserID(c.username), c.token) 49 | if err != nil { 50 | return err 51 | } 52 | 53 | content := format.RenderMarkdown(buffer.String(), true, true) 54 | content.MsgType = event.MsgNotice 55 | _, err = client.SendMessageEvent(id.RoomID(roomid), event.EventMessage, content) 56 | 57 | return err 58 | } 59 | -------------------------------------------------------------------------------- /connectors/matrix/template.md: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ## {{ .Job.MailSubject }} {{ .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE }} 4 | Created {{ .Created }} 5 | {{ if ne .Job.PrunedMessageCount 0 }} 6 | WARNING: Rate limiting triggered. {{ .Job.PrunedMessageCount }} additonal notificiations have been suppressed 7 | {{ end }} 8 | 9 | #### Details 10 | 11 | - Job Name : {{ .Job.SlurmEnvironment.SLURM_JOB_NAME }} 12 | - Job ID : {{ .Job.SlurmEnvironment.SLURM_JOB_ID }} 13 | - User : {{ .Job.JobStats.User }} 14 | - Partition : {{ .Job.JobStats.Partition }} 15 | - Nodes Used : {{ .Job.JobStats.NodeList }} 16 | - Cores : {{ .Job.JobStats.Ncpus }} 17 | - Job state : {{ .Job.SlurmEnvironment.SLURM_JOB_STATE }} 18 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }} 19 | - Exit Code : {{ .Job.SlurmEnvironment.SLURM_JOB_EXIT_CODE_MAX }} 20 | {{- end }} 21 | - Submit : {{ .Job.JobStats.Submittime }} 22 | - Start : {{ .Job.JobStats.Starttime }} 23 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }} 24 | - End : {{ .Job.JobStats.Endtime }} 25 | {{- end }} 26 | - Res. Walltime : {{ .Job.JobStats.WalltimeStr }} 27 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE "Began" }} 28 | - Used Walltime : {{ .Job.SlurmEnvironment.SLURM_JOB_RUN_TIME }} 29 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }} 30 | - Used CPU time : {{ .Job.JobStats.TotalCPUStr }} 31 | - % User (Comp) : {{ printf "%5.2f%%" .Job.JobStats.CalcUserComputePercentage }} 32 | - % System (I/O) : {{ printf "%5.2f%%" .Job.JobStats.CalcSystemComputePercentage }} 33 | {{- end }} 34 | {{- end }} 35 | - Memory Requested : {{ .Job.JobStats.ReqMem | humanBytes }} 36 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE "Began" }} 37 | - Max Memory Used : {{ .Job.JobStats.MaxRSS | humanBytes }} 38 | - Max Disk Write : {{ .Job.JobStats.MaxDiskWrite | humanBytes }} 39 | - Max Disk Read : {{ .Job.JobStats.MaxDiskRead | humanBytes }} 40 | {{- end }} 41 | 42 | {{- range .Job.Hints }} 43 | - {{ . }} 44 | 45 | {{- end }} 46 | 47 | --- 48 | 49 | -------------------------------------------------------------------------------- /connectors/mattermost/connector_data.go: -------------------------------------------------------------------------------- 1 | package mattermost 2 | 3 | import "log" 4 | 5 | const connectorName = "mattermost" 6 | 7 | type Connector struct { 8 | name string 9 | serverUrl string 10 | wsUrl string 11 | triggerString string 12 | token string 13 | renderToFile string 14 | spoolDir string 15 | messageTemplate string 16 | useLookup string 17 | format string 18 | } 19 | 20 | func (c *Connector) dumpConnector(l *log.Logger) { 21 | l.Printf("mattermost.dumpConnector: name: %q\n", c.name) 22 | l.Printf("mattermost.dumpConnector: serverUrl: %q\n", c.serverUrl) 23 | l.Printf("mattermost.dumpConnector: wsUrl: %q\n", c.wsUrl) 24 | l.Printf("mattermost.dumpConnector: triggerString: %qn", c.triggerString) 25 | l.Printf("mattermost.dumpConnector: token: ***\n") 26 | l.Printf("mattermost.dumpConnector: renderToFile: %q\n", c.renderToFile) 27 | l.Printf("mattermost.dumpConnector: spoolDir: %q\n", c.spoolDir) 28 | l.Printf("mattermost.dumpConnector: messageTemplate: %q\n", c.messageTemplate) 29 | l.Printf("mattermost.dumpConnector: useLookup: %q\n", c.useLookup) 30 | l.Printf("mattermost.dumpConnector: format: %q\n", c.format) 31 | l.Println("................................................................................") 32 | 33 | } 34 | 35 | var connmattermost *Connector = new(Connector) 36 | -------------------------------------------------------------------------------- /connectors/mattermost/mattermost.go: -------------------------------------------------------------------------------- 1 | package mattermost 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "io" 7 | "log" 8 | "os" 9 | "strconv" 10 | "time" 11 | 12 | "github.com/CLIP-HPC/goslmailer/internal/connectors" 13 | "github.com/CLIP-HPC/goslmailer/internal/lookup" 14 | "github.com/CLIP-HPC/goslmailer/internal/message" 15 | "github.com/CLIP-HPC/goslmailer/internal/renderer" 16 | "github.com/CLIP-HPC/goslmailer/internal/spool" 17 | "github.com/mattermost/mattermost-server/v5/model" 18 | ) 19 | 20 | func init() { 21 | connectors.Register(connectorName, connmattermost) 22 | } 23 | 24 | func (c *Connector) ConfigConnector(conf map[string]string) error { 25 | 26 | c.name = conf["name"] 27 | c.serverUrl = conf["serverUrl"] 28 | c.wsUrl = conf["wsUrl"] 29 | c.triggerString = conf["triggerString"] 30 | c.token = conf["token"] 31 | c.renderToFile = conf["renderToFile"] 32 | c.spoolDir = conf["spoolDir"] 33 | c.messageTemplate = conf["messageTemplate"] 34 | c.useLookup = conf["useLookup"] 35 | c.format = conf["format"] 36 | 37 | // if renderToFile=="no" or "spool" then spoolDir must not be empty 38 | switch c.renderToFile { 39 | case "no", "spool": 40 | if c.spoolDir == "" { 41 | return errors.New("mattermost spoolDir must be defined, aborting") 42 | } 43 | } 44 | return nil 45 | } 46 | 47 | func (c *Connector) SendMessage(mp *message.MessagePack, useSpool bool, l *log.Logger) error { 48 | 49 | var ( 50 | e error = nil 51 | outFile string 52 | dts bool = false // DumpToSpool 53 | buffer bytes.Buffer 54 | ) 55 | 56 | l.Println("................... sendTomattermost START ........................................") 57 | 58 | // debug purposes 59 | c.dumpConnector(l) 60 | 61 | // lookup the end-system userid from the one sent by slurm (if lookup is set in "useLookup" config param) 62 | enduser, err := lookup.ExtLookupUser(mp.TargetUser, c.useLookup, l) 63 | if err != nil { 64 | l.Printf("Lookup failed for %s with %s\n", mp.TargetUser, err) 65 | return err 66 | } 67 | l.Printf("Looked up with %q %s -> %s\n", c.useLookup, mp.TargetUser, enduser) 68 | 69 | l.Printf("Sending to targetUserID: %s\n", enduser) 70 | 71 | // don't render template when using spool 72 | if c.renderToFile != "spool" { 73 | // buffer to place rendered json in 74 | buffer = bytes.Buffer{} 75 | err := renderer.RenderTemplate(c.messageTemplate, c.format, mp.JobContext, enduser, &buffer) 76 | if err != nil { 77 | return err 78 | } 79 | } 80 | 81 | // this can be: "yes", "spool", anythingelse 82 | switch c.renderToFile { 83 | case "yes": 84 | // render markdown template to a file in working directory - debug purposes 85 | // prepare outfile name 86 | t := strconv.FormatInt(time.Now().UnixNano(), 10) 87 | l.Printf("Time: %s\n", t) 88 | outFile = "rendered-" + mp.JobContext.SLURM_JOB_ID + "-" + enduser + "-" + t + ".md" 89 | res, err := io.ReadAll(&buffer) 90 | if err != nil { 91 | return err 92 | } 93 | err = os.WriteFile(outFile, res, 0644) 94 | if err != nil { 95 | return err 96 | } 97 | l.Printf("Send successful to file: %s\n", outFile) 98 | case "spool": 99 | // deposit GOB to spoolDir if allowed 100 | if useSpool { 101 | err := spool.DepositToSpool(c.spoolDir, mp) 102 | if err != nil { 103 | l.Printf("DepositToSpool Failed!\n") 104 | return err 105 | } 106 | } 107 | default: 108 | // Send message via mattermost 109 | 110 | client := model.NewAPIv4Client(c.serverUrl) 111 | client.SetOAuthToken(c.token) 112 | l.Printf("\nclient: %#v\n", client) 113 | 114 | resPost := model.Post{} 115 | resPost.ChannelId = enduser 116 | resPost.Message = buffer.String() 117 | if _, r := client.CreatePost(&resPost); r.Error == nil { 118 | l.Printf("Post response to chan: %s successfull!\n", resPost.ChannelId) 119 | } else { 120 | l.Printf("Post response FAILED with: %#v\n", r) 121 | dts = true 122 | } 123 | } 124 | 125 | // save mp to spool if we're allowed (not allowed when called from gobler, to prevent gobs multiplying) 126 | if dts && useSpool { 127 | l.Printf("Backing off to spool.\n") 128 | err := spool.DepositToSpool(c.spoolDir, mp) 129 | if err != nil { 130 | l.Printf("DepositToSpool Failed!\n") 131 | return err 132 | } 133 | } 134 | 135 | l.Println("................... sendTomattermost END ..........................................") 136 | 137 | return e 138 | } 139 | -------------------------------------------------------------------------------- /connectors/msteams/connector_data.go: -------------------------------------------------------------------------------- 1 | package msteams 2 | 3 | import "log" 4 | 5 | const connectorName = "msteams" 6 | 7 | type Connector struct { 8 | name string 9 | url string 10 | // renderToFile can be: "yes", "no", "spool" <- to chain with "throttler" 11 | renderToFile string 12 | spoolDir string 13 | adaptiveCardTemplate string 14 | useLookup string 15 | } 16 | 17 | func (c *Connector) dumpConnector(l *log.Logger) { 18 | l.Printf("msteams.dumpConnector: name: %q\n", c.name) 19 | l.Printf("msteams.dumpConnector: url: %q\n", c.url) 20 | l.Printf("msteams.dumpConnector: renderToFile: %q\n", c.renderToFile) 21 | l.Printf("msteams.dumpConnector: spoolDir: %q\n", c.spoolDir) 22 | l.Printf("msteams.dumpConnector: adaptiveCardTemplate: %q\n", c.adaptiveCardTemplate) 23 | l.Printf("msteams.dumpConnector: useLookup: %q\n", c.useLookup) 24 | l.Println("................................................................................") 25 | 26 | } 27 | 28 | var connMsteams *Connector = new(Connector) 29 | -------------------------------------------------------------------------------- /connectors/msteams/msteams.go: -------------------------------------------------------------------------------- 1 | package msteams 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "io" 7 | "log" 8 | "net/http" 9 | "os" 10 | "strconv" 11 | "time" 12 | 13 | "github.com/CLIP-HPC/goslmailer/internal/connectors" 14 | "github.com/CLIP-HPC/goslmailer/internal/lookup" 15 | "github.com/CLIP-HPC/goslmailer/internal/message" 16 | "github.com/CLIP-HPC/goslmailer/internal/renderer" 17 | "github.com/CLIP-HPC/goslmailer/internal/spool" 18 | ) 19 | 20 | func init() { 21 | connectors.Register(connectorName, connMsteams) 22 | } 23 | 24 | func (c *Connector) ConfigConnector(conf map[string]string) error { 25 | 26 | c.name = conf["name"] 27 | c.url = conf["url"] 28 | c.renderToFile = conf["renderToFile"] 29 | c.spoolDir = conf["spoolDir"] 30 | c.adaptiveCardTemplate = conf["adaptiveCardTemplate"] 31 | c.useLookup = conf["useLookup"] 32 | 33 | // if renderToFile=="no" or "spool" then spoolDir must not be empty 34 | switch c.renderToFile { 35 | case "no", "spool": 36 | if c.spoolDir == "" { 37 | return errors.New("spoolDir must be defined, aborting") 38 | } 39 | } 40 | return nil 41 | } 42 | 43 | func (c *Connector) SendMessage(mp *message.MessagePack, useSpool bool, l *log.Logger) error { 44 | 45 | var ( 46 | e error = nil 47 | outFile string 48 | dts bool = false // DumpToSpool 49 | buffer bytes.Buffer 50 | ) 51 | 52 | l.Println("................... sendToMSTeams START ........................................") 53 | 54 | // lookup the end-system userid from the one sent by slurm (if lookup is set in "useLookup" config param) 55 | enduser, err := lookup.ExtLookupUser(mp.TargetUser, c.useLookup, l) 56 | if err != nil { 57 | l.Printf("Lookup failed for %s with %s\n", mp.TargetUser, err) 58 | return err 59 | } 60 | l.Printf("Looked up with %q %s -> %s\n", c.useLookup, mp.TargetUser, enduser) 61 | 62 | l.Printf("Sending to targetUserID: %s\n", enduser) 63 | 64 | // debug purposes 65 | c.dumpConnector(l) 66 | 67 | // don't render template when using spool 68 | if c.renderToFile != "spool" { 69 | // buffer to place rendered json in 70 | buffer = bytes.Buffer{} 71 | //err := c.msteamsRenderCardTemplate(mp.JobContext, enduser, &buffer) 72 | err := renderer.RenderTemplate(c.adaptiveCardTemplate, "text", mp.JobContext, enduser, &buffer) 73 | if err != nil { 74 | return err 75 | } 76 | } 77 | 78 | // this can be: "yes", "spool", anythingelse 79 | switch c.renderToFile { 80 | case "yes": 81 | // render json template to a file in working directory - debug purposes 82 | 83 | // prepare outfile name 84 | t := strconv.FormatInt(time.Now().UnixNano(), 10) 85 | l.Printf("Time: %s\n", t) 86 | outFile = "rendered-" + mp.JobContext.SLURM_JOB_ID + "-" + enduser + "-" + t + ".json" 87 | res, err := io.ReadAll(&buffer) 88 | if err != nil { 89 | return err 90 | } 91 | err = os.WriteFile(outFile, res, 0644) 92 | if err != nil { 93 | return err 94 | } 95 | l.Printf("Send successful to file: %s\n", outFile) 96 | case "spool": 97 | // deposit GOB to spoolDir if allowed 98 | if useSpool { 99 | err := spool.DepositToSpool(c.spoolDir, mp) 100 | if err != nil { 101 | l.Printf("DepositToSpool Failed!\n") 102 | return err 103 | } 104 | } 105 | default: 106 | // handle here "too many requests" 4xx and place the rendered message to spool dir to be picked up later by the "throttler" 107 | resp, err := http.Post(c.url, "application/json", &buffer) 108 | if err != nil { 109 | l.Printf("http.Post Failed!\n") 110 | dts = true 111 | //return err 112 | e = err 113 | } else { 114 | l.Printf("RESPONSE Status: %s\n", resp.Status) 115 | switch resp.StatusCode { 116 | case 429: 117 | l.Printf("429 received.\n") 118 | dts = true 119 | default: 120 | l.Printf("Send OK!\n") 121 | } 122 | } 123 | } 124 | 125 | // either http.Post failed, or it got 429, save mp to spool if we're allowed (not allowed when called from gobler, to prevent gobs multiplying) 126 | if dts && useSpool { 127 | l.Printf("Backing off to spool.\n") 128 | err := spool.DepositToSpool(c.spoolDir, mp) 129 | if err != nil { 130 | l.Printf("DepositToSpool Failed!\n") 131 | return err 132 | } 133 | } 134 | 135 | l.Println("................... sendToMSTeams END ..........................................") 136 | 137 | return e 138 | } 139 | -------------------------------------------------------------------------------- /connectors/slack/connector_data.go: -------------------------------------------------------------------------------- 1 | package slack 2 | 3 | import "log" 4 | 5 | const connectorName = "slack" 6 | 7 | type Connector struct { 8 | token string // slack api token of the bot 9 | messageTemplate string // path to template file 10 | 11 | renderToFile string // renderToFile can be: "yes", "no", "spool" 12 | spoolDir string // where to place spooled messages 13 | useLookup string // string passed to lookup.ExtLookupUser() which determines which lookup function to call 14 | } 15 | 16 | func (c *Connector) dumpConnector(l *log.Logger) { 17 | l.Printf("slack.dumpConnector: messageTemplate: %q\n", c.messageTemplate) 18 | l.Printf("slack.dumpConnector: token: PRESENT\n") 19 | l.Printf("slack.dumpConnector: renderToFile: %q\n", c.renderToFile) 20 | l.Printf("slack.dumpConnector: spoolDir: %q\n", c.spoolDir) 21 | l.Printf("slack.dumpConnector: useLookup: %q\n", c.useLookup) 22 | l.Println("................................................................................") 23 | 24 | } 25 | 26 | var connSlack *Connector = new(Connector) 27 | -------------------------------------------------------------------------------- /connectors/slack/slack.go: -------------------------------------------------------------------------------- 1 | package slack 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "io" 7 | "log" 8 | "os" 9 | "strconv" 10 | "time" 11 | 12 | "github.com/CLIP-HPC/goslmailer/internal/connectors" 13 | "github.com/CLIP-HPC/goslmailer/internal/lookup" 14 | "github.com/CLIP-HPC/goslmailer/internal/message" 15 | "github.com/CLIP-HPC/goslmailer/internal/renderer" 16 | "github.com/CLIP-HPC/goslmailer/internal/spool" 17 | "github.com/eritikass/githubmarkdownconvertergo" 18 | "github.com/slack-go/slack" 19 | ) 20 | 21 | func init() { 22 | connectors.Register(connectorName, connSlack) 23 | } 24 | 25 | func (c *Connector) ConfigConnector(conf map[string]string) error { 26 | // Fill out the Connector structure with values from config file 27 | c.token = conf["token"] 28 | c.messageTemplate = conf["messageTemplate"] 29 | c.renderToFile = conf["renderToFile"] 30 | c.spoolDir = conf["spoolDir"] 31 | c.useLookup = conf["useLookup"] 32 | 33 | switch { 34 | // slack token must be present 35 | case c.token == "": 36 | return errors.New("slack bot token must be defined, aborting") 37 | // if renderToFile=="no" or "spool" then spoolDir must not be empty 38 | case c.renderToFile == "no" || c.renderToFile == "spool": 39 | if c.spoolDir == "" { 40 | return errors.New("slack spoolDir must be defined, aborting") 41 | } 42 | 43 | } 44 | return nil 45 | } 46 | 47 | func (c *Connector) SendMessage(mp *message.MessagePack, useSpool bool, l *log.Logger) error { 48 | 49 | var ( 50 | e error = nil 51 | outFile string 52 | dts bool = false // DumpToSpool 53 | buffer bytes.Buffer 54 | ) 55 | 56 | l.Println("................... sendToSlack START ........................................") 57 | l.Print("MessagePack: ", mp) 58 | 59 | // Create a new Slack sesison using the provided bot token 60 | api := slack.New(c.token) 61 | 62 | enduser, err := lookup.ExtLookupUser(mp.TargetUser, c.useLookup, l) 63 | if err != nil { 64 | l.Printf("Lookup failed for %s with %s\n", mp.TargetUser, err) 65 | return err 66 | } 67 | l.Printf("Looked up with %q %s -> %s\n", c.useLookup, mp.TargetUser, enduser) 68 | 69 | // Get the correct enduser to send to 70 | // The bot should not be added to channels it may not send messages in. 71 | if c.renderToFile != "spool" { 72 | // buffer to place rendered json in 73 | buffer = bytes.Buffer{} 74 | err := renderer.RenderTemplate(c.messageTemplate, "", mp.JobContext, enduser, &buffer) 75 | if err != nil { 76 | return err 77 | } 78 | } 79 | 80 | // this can be: "yes", "spool", anythingelse 81 | switch c.renderToFile { 82 | case "yes": 83 | // render json template to a file in working directory - debug purposes 84 | // Optional. But can be extremely useful. 85 | t := strconv.FormatInt(time.Now().UnixNano(), 10) 86 | l.Printf("Time: %s\n", t) 87 | outFile = "rendered-" + mp.JobContext.SLURM_JOB_ID + "-" + enduser + "-" + t + ".json" 88 | res, err := io.ReadAll(&buffer) 89 | if err != nil { 90 | return err 91 | } 92 | err = os.WriteFile(outFile, res, 0644) 93 | if err != nil { 94 | return err 95 | } 96 | l.Printf("Send successful to file: %s\n", outFile) 97 | case "spool": 98 | // deposit GOB to spoolDir if allowed (can be: YES from goslmailer, NO from gobler, since it's already spooled) 99 | if useSpool { 100 | l.Printf(c.spoolDir) 101 | err := spool.DepositToSpool(c.spoolDir, mp) 102 | if err != nil { 103 | l.Printf("DepositToSpool Failed!\n") 104 | return err 105 | } 106 | } 107 | default: 108 | l.Printf("Sending to channelID or userID: %s\n", enduser) 109 | 110 | markdown := githubmarkdownconvertergo.Slack(buffer.String(), githubmarkdownconvertergo.SlackConvertOptions{Headlines: true}) 111 | // markdown := strings.ReplaceAll(buffer.String(), "**", "*") 112 | mdBlock := slack.NewTextBlockObject("mrkdwn", markdown, false, false) 113 | sectionBlock := slack.NewSectionBlock(mdBlock, nil, nil) 114 | options := slack.MsgOptionBlocks(sectionBlock) 115 | _, _, _, err := api.SendMessage(enduser, options) 116 | if err != nil { 117 | l.Println("PostMessage error: ", err) 118 | dts = true 119 | } 120 | } 121 | 122 | // BACKOFF code, sending failed, we set dts to true and if we're allowed to spool (again, NO from gobler) then we spool. 123 | if dts && useSpool { 124 | l.Printf("Backing off to spool.\n") 125 | err := spool.DepositToSpool(c.spoolDir, mp) 126 | if err != nil { 127 | l.Printf("DepositToSpool Failed!\n") 128 | return err 129 | } 130 | } 131 | 132 | l.Println("................... sendToSlack END ..........................................") 133 | 134 | return e 135 | } 136 | -------------------------------------------------------------------------------- /connectors/telegram/connector_data.go: -------------------------------------------------------------------------------- 1 | package telegram 2 | 3 | import "log" 4 | 5 | const connectorName = "telegram" 6 | 7 | type Connector struct { 8 | name string 9 | url string 10 | token string 11 | renderToFile string 12 | spoolDir string 13 | messageTemplate string 14 | useLookup string 15 | format string 16 | } 17 | 18 | func (c *Connector) dumpConnector(l *log.Logger) { 19 | l.Printf("telegram.dumpConnector: name: %q\n", c.name) 20 | l.Printf("telegram.dumpConnector: url: %q\n", c.url) 21 | l.Printf("telegram.dumpConnector: token: %q\n", c.token) 22 | l.Printf("telegram.dumpConnector: renderToFile: %q\n", c.renderToFile) 23 | l.Printf("telegram.dumpConnector: spoolDir: %q\n", c.spoolDir) 24 | l.Printf("telegram.dumpConnector: messageTemplate: %q\n", c.messageTemplate) 25 | l.Printf("telegram.dumpConnector: useLookup: %q\n", c.useLookup) 26 | l.Printf("telegram.dumpConnector: format: %q\n", c.format) 27 | l.Println("................................................................................") 28 | 29 | } 30 | 31 | var connTelegram *Connector = new(Connector) 32 | -------------------------------------------------------------------------------- /connectors/telegram/telegram.go: -------------------------------------------------------------------------------- 1 | package telegram 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "io" 7 | "log" 8 | "os" 9 | "strconv" 10 | "time" 11 | 12 | "github.com/CLIP-HPC/goslmailer/internal/connectors" 13 | "github.com/CLIP-HPC/goslmailer/internal/lookup" 14 | "github.com/CLIP-HPC/goslmailer/internal/message" 15 | "github.com/CLIP-HPC/goslmailer/internal/renderer" 16 | "github.com/CLIP-HPC/goslmailer/internal/spool" 17 | telebot "gopkg.in/telebot.v3" 18 | ) 19 | 20 | func init() { 21 | connectors.Register(connectorName, connTelegram) 22 | } 23 | 24 | func (c *Connector) ConfigConnector(conf map[string]string) error { 25 | 26 | c.name = conf["name"] 27 | c.url = conf["url"] 28 | c.token = conf["token"] 29 | c.renderToFile = conf["renderToFile"] 30 | c.spoolDir = conf["spoolDir"] 31 | c.messageTemplate = conf["messageTemplate"] 32 | c.useLookup = conf["useLookup"] 33 | c.format = conf["format"] 34 | 35 | // if renderToFile=="no" or "spool" then spoolDir must not be empty 36 | switch c.renderToFile { 37 | case "no", "spool": 38 | if c.spoolDir == "" { 39 | return errors.New("telegram spoolDir must be defined, aborting") 40 | } 41 | } 42 | return nil 43 | } 44 | 45 | func (c *Connector) SendMessage(mp *message.MessagePack, useSpool bool, l *log.Logger) error { 46 | 47 | var ( 48 | e error = nil 49 | outFile string 50 | dts bool = false // DumpToSpool 51 | buffer bytes.Buffer 52 | ) 53 | 54 | l.Println("................... sendToTelegram START ........................................") 55 | 56 | // debug purposes 57 | c.dumpConnector(l) 58 | 59 | // lookup the end-system userid from the one sent by slurm (if lookup is set in "useLookup" config param) 60 | enduser, err := lookup.ExtLookupUser(mp.TargetUser, c.useLookup, l) 61 | if err != nil { 62 | l.Printf("Lookup failed for %s with %s\n", mp.TargetUser, err) 63 | return err 64 | } 65 | l.Printf("Looked up with %q %s -> %s\n", c.useLookup, mp.TargetUser, enduser) 66 | 67 | l.Printf("Sending to targetUserID: %s\n", enduser) 68 | 69 | // get chat ID which comes from --mail-user=telegram:cID switch 70 | cID, err := strconv.ParseInt(enduser, 10, 64) 71 | if err != nil { 72 | l.Printf("cID strconv failed %s", err) 73 | return err 74 | } 75 | 76 | // don't render template when using spool 77 | if c.renderToFile != "spool" { 78 | // buffer to place rendered json in 79 | buffer = bytes.Buffer{} 80 | //err := c.telegramRenderTemplate(mp.JobContext, enduser, &buffer) 81 | err := renderer.RenderTemplate(c.messageTemplate, c.format, mp.JobContext, enduser, &buffer) 82 | if err != nil { 83 | return err 84 | } 85 | } 86 | 87 | // this can be: "yes", "spool", anythingelse 88 | switch c.renderToFile { 89 | case "yes": 90 | // render markdown template to a file in working directory - debug purposes 91 | // prepare outfile name 92 | t := strconv.FormatInt(time.Now().UnixNano(), 10) 93 | l.Printf("Time: %s\n", t) 94 | outFile = "rendered-" + mp.JobContext.SLURM_JOB_ID + "-" + enduser + "-" + t + ".md" 95 | res, err := io.ReadAll(&buffer) 96 | if err != nil { 97 | return err 98 | } 99 | err = os.WriteFile(outFile, res, 0644) 100 | if err != nil { 101 | return err 102 | } 103 | l.Printf("Send successful to file: %s\n", outFile) 104 | case "spool": 105 | // deposit GOB to spoolDir if allowed 106 | if useSpool { 107 | err := spool.DepositToSpool(c.spoolDir, mp) 108 | if err != nil { 109 | l.Printf("DepositToSpool Failed!\n") 110 | return err 111 | } 112 | } 113 | default: 114 | // spin up new bot 115 | tb, err := telebot.NewBot(telebot.Settings{ 116 | Token: c.token, 117 | }) 118 | if err != nil { 119 | //l.Fatal(err) 120 | l.Println(err) 121 | return err 122 | } 123 | // get chatid 124 | chat, err := tb.ChatByID(cID) 125 | if err != nil { 126 | l.Printf("chatbyusername failed %s", err) 127 | return err 128 | } 129 | // send message 130 | //https://core.telegram.org/bots/api#formatting-options 131 | msg, err := tb.Send(chat, buffer.String(), c.format) 132 | if err != nil { 133 | l.Printf("bot.Send() Failed: %s\n", err) 134 | dts = true 135 | //return err 136 | e = err 137 | } else { 138 | l.Printf("bot.Send() successful, messageID: %d\n", msg.ID) 139 | dts = false 140 | } 141 | } 142 | 143 | // save mp to spool if we're allowed (not allowed when called from gobler, to prevent gobs multiplying) 144 | if dts && useSpool { 145 | l.Printf("Backing off to spool.\n") 146 | err := spool.DepositToSpool(c.spoolDir, mp) 147 | if err != nil { 148 | l.Printf("DepositToSpool Failed!\n") 149 | return err 150 | } 151 | } 152 | 153 | l.Println("................... sendToTelegram END ..........................................") 154 | 155 | return e 156 | } 157 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/CLIP-HPC/goslmailer 2 | 3 | go 1.17 4 | 5 | require ( 6 | github.com/BurntSushi/toml v1.2.0 7 | github.com/bwmarrin/discordgo v0.25.0 8 | github.com/dustin/go-humanize v1.0.0 9 | github.com/eritikass/githubmarkdownconvertergo v0.1.9 10 | github.com/mattermost/mattermost-server/v5 v5.39.3 11 | github.com/slack-go/slack v0.11.4 12 | gopkg.in/telebot.v3 v3.0.0 13 | maunium.net/go/mautrix v0.11.0 14 | ) 15 | 16 | require ( 17 | github.com/blang/semver v3.5.1+incompatible // indirect 18 | github.com/dyatlov/go-opengraph v0.0.0-20210112100619-dae8665a5b09 // indirect 19 | github.com/francoispqt/gojay v1.2.13 // indirect 20 | github.com/go-asn1-ber/asn1-ber v1.5.3 // indirect 21 | github.com/google/uuid v1.2.0 // indirect 22 | github.com/gorilla/websocket v1.5.0 // indirect 23 | github.com/hashicorp/errwrap v1.1.0 // indirect 24 | github.com/hashicorp/go-multierror v1.1.1 // indirect 25 | github.com/json-iterator/go v1.1.11 // indirect 26 | github.com/klauspost/cpuid/v2 v2.0.6 // indirect 27 | github.com/mattermost/go-i18n v1.11.0 // indirect 28 | github.com/mattermost/ldap v0.0.0-20201202150706-ee0e6284187d // indirect 29 | github.com/mattermost/logr v1.0.13 // indirect 30 | github.com/minio/md5-simd v1.1.2 // indirect 31 | github.com/minio/minio-go/v7 v7.0.11 // indirect 32 | github.com/minio/sha256-simd v1.0.0 // indirect 33 | github.com/mitchellh/go-homedir v1.1.0 // indirect 34 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 35 | github.com/modern-go/reflect2 v1.0.1 // indirect 36 | github.com/pborman/uuid v1.2.1 // indirect 37 | github.com/pelletier/go-toml v1.9.3 // indirect 38 | github.com/philhofer/fwd v1.1.1 // indirect 39 | github.com/pkg/errors v0.9.1 // indirect 40 | github.com/rs/xid v1.3.0 // indirect 41 | github.com/sirupsen/logrus v1.8.1 // indirect 42 | github.com/tinylib/msgp v1.1.6 // indirect 43 | github.com/wiggin77/cfg v1.0.2 // indirect 44 | github.com/wiggin77/merror v1.0.3 // indirect 45 | github.com/wiggin77/srslog v1.0.1 // indirect 46 | github.com/yuin/goldmark v1.4.12 // indirect 47 | go.uber.org/atomic v1.8.0 // indirect 48 | go.uber.org/multierr v1.7.0 // indirect 49 | go.uber.org/zap v1.17.0 // indirect 50 | golang.org/x/crypto v0.0.0-20220513210258-46612604a0f9 // indirect 51 | golang.org/x/net v0.0.0-20220513224357-95641704303c // indirect 52 | golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect 53 | golang.org/x/text v0.3.7 // indirect 54 | gopkg.in/ini.v1 v1.62.0 // indirect 55 | gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect 56 | gopkg.in/yaml.v2 v2.4.0 // indirect 57 | ) 58 | -------------------------------------------------------------------------------- /images/archSketch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/goslmailer/a1da343a71dcb302eccd0c49cb6b1507900fa856/images/archSketch.png -------------------------------------------------------------------------------- /images/discord.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/goslmailer/a1da343a71dcb302eccd0c49cb6b1507900fa856/images/discord.png -------------------------------------------------------------------------------- /images/matrix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/goslmailer/a1da343a71dcb302eccd0c49cb6b1507900fa856/images/matrix.png -------------------------------------------------------------------------------- /images/mattermost.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/goslmailer/a1da343a71dcb302eccd0c49cb6b1507900fa856/images/mattermost.png -------------------------------------------------------------------------------- /images/msteams.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/goslmailer/a1da343a71dcb302eccd0c49cb6b1507900fa856/images/msteams.png -------------------------------------------------------------------------------- /images/slack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/goslmailer/a1da343a71dcb302eccd0c49cb6b1507900fa856/images/slack.png -------------------------------------------------------------------------------- /images/telegram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CLIP-HPC/goslmailer/a1da343a71dcb302eccd0c49cb6b1507900fa856/images/telegram.png -------------------------------------------------------------------------------- /internal/cmdline/cmdline.go: -------------------------------------------------------------------------------- 1 | package cmdline 2 | 3 | import ( 4 | "errors" 5 | "flag" 6 | ) 7 | 8 | // CmdArgs holds currently supported command line parameters. 9 | type CmdArgs struct { 10 | CfgFile *string 11 | Version *bool 12 | } 13 | 14 | // NewCmdArgs return the CmdArgs structure built from command line parameters. 15 | // `prog` string is used to build the default /etc/slurm/`prog`.conf configFile string. 16 | func NewCmdArgs(prog string) (*CmdArgs, error) { 17 | c := new(CmdArgs) 18 | 19 | c.CfgFile = flag.String("c", "/etc/slurm/"+prog+".conf", "Specify configuration file to use") 20 | c.Version = flag.Bool("v", false, "Display version") 21 | flag.Parse() 22 | if !flag.Parsed() { 23 | return nil, errors.New("failed to parse command line flags") 24 | } 25 | 26 | return c, nil 27 | } 28 | -------------------------------------------------------------------------------- /internal/config/config.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package config implements the ConfigContainer structure and accompanying methods. 3 | It holds the configuration data for all utilities. 4 | Configuration file format is the same for all. 5 | */ 6 | package config 7 | 8 | import ( 9 | "encoding/json" 10 | "log" 11 | "os" 12 | "strings" 13 | 14 | "github.com/BurntSushi/toml" 15 | ) 16 | 17 | type ConfigContainer struct { 18 | DebugConfig bool `json:"debugconfig"` 19 | Logfile string `json:"logfile"` 20 | Binpaths map[string]string `json:"binpaths"` 21 | DefaultConnector string `json:"defaultconnector"` 22 | Connectors map[string]map[string]string `json:"connectors"` 23 | QosMap map[string]uint64 `json:"qosmap"` 24 | //QosMap map[uint64]string `json:"qosmap"` 25 | } 26 | 27 | func NewConfigContainer() *ConfigContainer { 28 | return new(ConfigContainer) 29 | } 30 | 31 | // Read & unmarshall configuration from 'name' file into configContainer structure 32 | func (cc *ConfigContainer) GetConfig(name string) error { 33 | f, err := os.ReadFile(name) 34 | if err != nil { 35 | return err 36 | } 37 | 38 | // if HasSuffix(".toml") -> toml.Unmarshall 39 | // else json.Unmarshall 40 | if strings.HasSuffix(name, ".toml") { 41 | err = toml.Unmarshal(f, cc) 42 | } else { 43 | err = json.Unmarshal(f, cc) 44 | } 45 | 46 | if err != nil { 47 | return err 48 | } 49 | 50 | cc.testNsetBinPaths() 51 | 52 | return nil 53 | } 54 | 55 | func (cc *ConfigContainer) testNsetBinPaths() error { 56 | 57 | if cc.Binpaths == nil { 58 | cc.Binpaths = make(map[string]string) 59 | } 60 | 61 | // default paths 62 | defaultpaths := map[string]string{ 63 | "sacct": "/usr/bin/sacct", 64 | "sstat": "/usr/bin/sstat", 65 | } 66 | 67 | for key, path := range defaultpaths { 68 | if val, exists := cc.Binpaths[key]; !exists || val == "" { 69 | cc.Binpaths[key] = path 70 | } 71 | } 72 | 73 | return nil 74 | } 75 | 76 | func (cc *ConfigContainer) DumpConfig(l *log.Logger) { 77 | if cc.DebugConfig { 78 | l.Printf("DUMP CONFIG:\n") 79 | l.Printf("CONFIGURATION: %#v\n", cc) 80 | l.Printf("CONFIGURATION logfile: %s\n", cc.Logfile) 81 | l.Printf("--------------------------------------------------------------------------------\n") 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /internal/config/config_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | ) 7 | 8 | const testDir = "../../test_data/config_test" 9 | 10 | func TestConfig(t *testing.T) { 11 | files, err := os.ReadDir(testDir) 12 | if err != nil { 13 | t.Fatalf("ERROR: can not read config test directory %s\n", err) 14 | } 15 | for _, f := range files { 16 | t.Run(f.Name(), func(t *testing.T) { 17 | t.Logf("Processing file %s\n", f.Name()) 18 | c := NewConfigContainer() 19 | e := c.GetConfig(testDir + "/" + f.Name()) 20 | if _, ok := c.Connectors["msteams"]; e != nil || !ok { 21 | t.Fatalf("Test %s failed with: %s\n", f.Name(), e) 22 | } 23 | if v, ok := c.Connectors["msteams"]["url"]; !ok || v != "https://msteams/webhook/url" { 24 | t.Fatalf("Test %s failed finding connectors.msteams.url\n", f.Name()) 25 | } 26 | }) 27 | } 28 | } 29 | 30 | func TestSetBinpaths(t *testing.T) { 31 | cc := []ConfigContainer{ 32 | { 33 | Binpaths: map[string]string{ 34 | "sacct": "/usr/bin/sacct", 35 | "sstat": "blabla", 36 | }, 37 | }, 38 | { 39 | Binpaths: map[string]string{ 40 | "sacct": "", 41 | "sstat": "blabla1", 42 | }, 43 | }, 44 | { 45 | Binpaths: map[string]string{ 46 | "sstat": "blabla1", 47 | }, 48 | }, 49 | {}, 50 | } 51 | // todo: add []results and test both 52 | 53 | for i, v := range cc { 54 | t.Logf("Running test %d\n", i) 55 | t.Logf("PRE: %v\n", v.Binpaths) 56 | err := v.testNsetBinPaths() 57 | t.Logf("POST: %v\n", v.Binpaths) 58 | if err != nil { 59 | t.Fatalf("Test %d failed with: %s\n", i, err) 60 | } 61 | if v.Binpaths["sacct"] == "/usr/bin/sacct" { 62 | t.Logf("SUCCESS") 63 | } else { 64 | t.Fatalf("FAILED: WANT: %q GOT: %q\n", "/usr/bin/sacct", v.Binpaths["sacct"]) 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /internal/connectors/connectors.go: -------------------------------------------------------------------------------- 1 | package connectors 2 | 3 | import ( 4 | "errors" 5 | "log" 6 | 7 | "github.com/CLIP-HPC/goslmailer/internal/config" 8 | "github.com/CLIP-HPC/goslmailer/internal/message" 9 | ) 10 | 11 | type Connector interface { 12 | ConfigConnector(conf map[string]string) error 13 | SendMessage(*message.MessagePack, bool, *log.Logger) error 14 | } 15 | 16 | type Connectors map[string]Connector 17 | 18 | var ConMap Connectors = Connectors{} 19 | 20 | // Register is used to pre-populate the Connectors map with ["connectorName"]connectorStruct. 21 | // The connector structure is later populated with parameters from config file via PopulateConnecors() method, or, 22 | // registered connectors are deleted from it if configuration doesn't work. 23 | // It is called from connector init(), triggered by a blank import from goslmailer/gobler. 24 | func Register(conName string, conStruct Connector) error { 25 | 26 | if _, ok := ConMap[conName]; !ok { 27 | log.Printf("Initializing connector: %s\n", conName) 28 | ConMap[conName] = conStruct 29 | } else { 30 | log.Printf("Connector %s already initialized.\n", conName) 31 | return errors.New("connector already initialized") 32 | } 33 | 34 | return nil 35 | } 36 | 37 | // Populate the map 'connectors' with connectors specified in config file and their instance from package. 38 | // Every newly developed connector must have a case block added here. 39 | func (c *Connectors) PopulateConnectors(conf *config.ConfigContainer, l *log.Logger) error { 40 | 41 | for k, v := range conf.Connectors { 42 | // test if connector from config is registered in conMap 43 | if _, ok := (*c)[k]; !ok { 44 | l.Printf("ERROR: %q connector not initialized, skipping...\n", k) 45 | continue 46 | } 47 | // l.Printf("Unsupported connector found. Ignoring %#v : %#v\n", k, v) 48 | // if it is, try to configure it 49 | l.Printf("CONFIGURING: %s with: %#v\n", k, v) 50 | if err := (*c)[k].ConfigConnector(v); err != nil { 51 | // config failed, log and remove from map 52 | l.Printf("ERROR: %q with %s connector configuration. Ignoring.\n", err, k) 53 | delete(*c, k) 54 | } else { 55 | // config successfull, log and do nothing. 56 | l.Printf("SUCCESS: %s connector configured.\n", k) 57 | } 58 | } 59 | 60 | return nil 61 | } 62 | -------------------------------------------------------------------------------- /internal/connectors/connectors_test.go: -------------------------------------------------------------------------------- 1 | package connectors_test 2 | 3 | import ( 4 | "bytes" 5 | "log" 6 | "testing" 7 | 8 | _ "github.com/CLIP-HPC/goslmailer/connectors/discord" 9 | _ "github.com/CLIP-HPC/goslmailer/connectors/mailto" 10 | _ "github.com/CLIP-HPC/goslmailer/connectors/matrix" 11 | _ "github.com/CLIP-HPC/goslmailer/connectors/msteams" 12 | _ "github.com/CLIP-HPC/goslmailer/connectors/telegram" 13 | "github.com/CLIP-HPC/goslmailer/internal/config" 14 | "github.com/CLIP-HPC/goslmailer/internal/connectors" 15 | "github.com/CLIP-HPC/goslmailer/internal/message" 16 | ) 17 | 18 | var connectorsExpected = []string{"msteams", "mailto"} 19 | var connectorsExpectedNot = []string{"textfile"} 20 | 21 | func TestPopulateConnectors(t *testing.T) { 22 | 23 | wr := bytes.Buffer{} 24 | l := log.New(&wr, "Testing: ", log.Llongfile) 25 | 26 | cfg := config.NewConfigContainer() 27 | err := cfg.GetConfig("../../test_data/config_test/gobler.conf") 28 | if err != nil { 29 | t.Fatalf("MAIN: getConfig(gobconfig) failed: %s", err) 30 | } 31 | 32 | err = connectors.ConMap.PopulateConnectors(cfg, l) 33 | if err != nil { 34 | t.Fatalf("conns.PopulateConnectors() FAILED with %s\n", err) 35 | } 36 | 37 | t.Run("connectorsExpected", func(t *testing.T) { 38 | for _, v := range connectorsExpected { 39 | t.Logf("Testing for connector %s", v) 40 | if _, ok := connectors.ConMap[v]; !ok { 41 | t.Fatalf("Connector %s not configured!", v) 42 | } else { 43 | t.Logf("FOUND... good!\n") 44 | } 45 | } 46 | }) 47 | t.Run("connectorsExpectedNot", func(t *testing.T) { 48 | for _, v := range connectorsExpectedNot { 49 | t.Logf("Testing for connector %s", v) 50 | if _, ok := connectors.ConMap[v]; ok { 51 | t.Fatalf("Connector %s configured but must NOT be!", v) 52 | } else { 53 | t.Logf("NOT FOUND... good!\n") 54 | } 55 | } 56 | }) 57 | } 58 | 59 | type testCon struct{} 60 | 61 | func (tc testCon) ConfigConnector(conf map[string]string) error { 62 | return nil 63 | } 64 | 65 | func (tc testCon) SendMessage(*message.MessagePack, bool, *log.Logger) error { 66 | return nil 67 | } 68 | 69 | func TestRegister(t *testing.T) { 70 | tc := testCon{} 71 | 72 | err := connectors.Register("test", tc) 73 | if err != nil { 74 | t.Fatalf("FAILED to register %q connector", "test") 75 | } 76 | 77 | err = connectors.Register("test", tc) 78 | if err == nil { 79 | t.Fatalf("FAILED registering already registered %q connector", "test") 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /internal/logger/logger.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import ( 4 | "errors" 5 | "log" 6 | "os" 7 | ) 8 | 9 | func SetupLogger(logfile string, app string) (*log.Logger, error) { 10 | var ( 11 | l *log.Logger 12 | lf *os.File 13 | err error 14 | ) 15 | 16 | if logfile != "" { 17 | lf, err = os.OpenFile(logfile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) 18 | if err != nil { 19 | return nil, errors.New("can not open configured log file") 20 | } 21 | } else { 22 | lf = os.Stderr 23 | } 24 | l = log.New(lf, app+":", log.Lshortfile|log.Ldate|log.Lmicroseconds) 25 | 26 | return l, nil 27 | 28 | } 29 | -------------------------------------------------------------------------------- /internal/lookup/lookup.go: -------------------------------------------------------------------------------- 1 | package lookup 2 | 3 | import ( 4 | "log" 5 | "os/exec" 6 | "strings" 7 | ) 8 | 9 | func ExtLookupUser(user string, lookup string, l *log.Logger) (u string, err error) { 10 | 11 | // todo: this whole package needs some love, quite some love 12 | switch lookup { 13 | case "GECOS": 14 | u, err = lookupGECOS(user, l) 15 | default: 16 | u = user 17 | } 18 | 19 | return u, err 20 | } 21 | 22 | func lookupGECOS(u string, l *log.Logger) (string, error) { 23 | 24 | out, err := exec.Command("/usr/bin/getent", "passwd", u).Output() 25 | if err != nil { 26 | return u, err 27 | } 28 | fields := strings.Split(string(out), ":") 29 | 30 | return fields[4], nil 31 | } 32 | -------------------------------------------------------------------------------- /internal/message/message.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/CLIP-HPC/goslmailer/internal/slurmjob" 7 | ) 8 | 9 | // MessagePack is the central data structure that holds all the data about the message that is currently being processed. 10 | // It is used to pass the "message" and its "metadata" between all of the components of the system, e.g. main->connector->spooler->gobler->sender etc. 11 | type MessagePack struct { 12 | Connector string 13 | TargetUser string 14 | JobContext *slurmjob.JobContext 15 | TimeStamp time.Time 16 | } 17 | 18 | // NewMsgPack returns the instantiated message.MessagePack structure 19 | func NewMsgPack(connectorName string, targetUser string, jobContext *slurmjob.JobContext) (*MessagePack, error) { 20 | var m = new(MessagePack) 21 | m.Connector = connectorName 22 | m.TargetUser = targetUser 23 | m.JobContext = jobContext 24 | m.TimeStamp = time.Now() 25 | return m, nil 26 | } 27 | -------------------------------------------------------------------------------- /internal/renderer/renderer.go: -------------------------------------------------------------------------------- 1 | package renderer 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | htmltemplate "html/template" 7 | "os" 8 | texttemplate "text/template" 9 | "time" 10 | 11 | "github.com/CLIP-HPC/goslmailer/internal/slurmjob" 12 | "github.com/dustin/go-humanize" 13 | ) 14 | 15 | // RenderTemplate renders the template file 'tfile' into 'buf' Buffer, using 'format' go package ('HTML' for html/template, 'text' for text/template). 16 | // 'j slurmjob.JobContext' and 'userid string' are wrapped in a structure to be used as template data. 17 | func RenderTemplate(tfile, format string, j *slurmjob.JobContext, userid string, buf *bytes.Buffer) error { 18 | 19 | var x = struct { 20 | Job slurmjob.JobContext 21 | UserID string 22 | Created string 23 | }{ 24 | *j, 25 | userid, 26 | fmt.Sprint(time.Now().Format("Mon, 2 Jan 2006 15:04:05 MST")), 27 | } 28 | 29 | // get template file 30 | f, err := os.ReadFile(tfile) 31 | if err != nil { 32 | return err 33 | } 34 | 35 | // depending on `format` (from connector map in conf file), render template 36 | if format == "HTML" { 37 | var funcMap = htmltemplate.FuncMap{ 38 | "humanBytes": humanize.Bytes, 39 | } 40 | t := htmltemplate.Must(htmltemplate.New(tfile).Funcs(funcMap).Parse(string(f))) 41 | err = t.Execute(buf, x) 42 | } else { 43 | var funcMap = texttemplate.FuncMap{ 44 | "humanBytes": humanize.Bytes, 45 | } 46 | t := texttemplate.Must(texttemplate.New(tfile).Funcs(funcMap).Parse(string(f))) 47 | err = t.Execute(buf, x) 48 | } 49 | return err 50 | } 51 | -------------------------------------------------------------------------------- /internal/slurmjob/job_data.go: -------------------------------------------------------------------------------- 1 | package slurmjob 2 | 3 | type JobContext struct { 4 | SlurmEnvironment 5 | JobStats SacctMetrics 6 | Hints []string 7 | MailSubject string 8 | PrunedMessageCount uint32 9 | } 10 | 11 | type SlurmEnvironment struct { 12 | SLURM_ARRAY_JOB_ID string 13 | SLURM_ARRAY_TASK_COUNT string 14 | SLURM_ARRAY_TASK_ID string 15 | SLURM_ARRAY_TASK_MAX string 16 | SLURM_ARRAY_TASK_MIN string 17 | SLURM_ARRAY_TASK_STEP string 18 | SLURM_CLUSTER_NAME string 19 | SLURM_JOB_ACCOUNT string 20 | SLURM_JOB_DERIVED_EC string 21 | SLURM_JOB_EXIT_CODE string 22 | SLURM_JOB_EXIT_CODE2 string 23 | SLURM_JOB_EXIT_CODE_MAX string 24 | SLURM_JOB_EXIT_CODE_MIN string 25 | SLURM_JOB_GID string 26 | SLURM_JOB_GROUP string 27 | SLURM_JOBID string 28 | SLURM_JOB_ID string 29 | SLURM_JOB_MAIL_TYPE string 30 | SLURM_JOB_NAME string 31 | SLURM_JOB_NODELIST string 32 | SLURM_JOB_PARTITION string 33 | SLURM_JOB_QUEUED_TIME string 34 | SLURM_JOB_RUN_TIME string 35 | SLURM_JOB_STATE string 36 | SLURM_JOB_STDIN string 37 | SLURM_JOB_UID string 38 | SLURM_JOB_USER string 39 | SLURM_JOB_WORK_DIR string 40 | } 41 | 42 | type SacctMetrics struct { 43 | JobName string 44 | User string 45 | Account string 46 | Partition string 47 | State string 48 | Ncpus int64 49 | Nodes int 50 | NodeList string 51 | Submittime string 52 | Starttime string 53 | Endtime string 54 | CPUTimeStr string 55 | CPUTime float64 56 | TotalCPU float64 57 | TotalCPUStr string 58 | UserCPU float64 59 | SystemCPU float64 60 | ReqMem uint64 61 | MaxRSS uint64 62 | Walltime uint64 63 | WalltimeStr string 64 | Runtime uint64 65 | RuntimeStr string 66 | MaxDiskWrite uint64 67 | MaxDiskRead uint64 68 | } 69 | 70 | type SstatMetrics struct { 71 | MaxRSS uint64 72 | MaxDiskWrite uint64 73 | MaxDiskRead uint64 74 | } 75 | -------------------------------------------------------------------------------- /internal/spool/spool.go: -------------------------------------------------------------------------------- 1 | package spool 2 | 3 | import ( 4 | "encoding/gob" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "log" 9 | "os" 10 | "strconv" 11 | 12 | "github.com/CLIP-HPC/goslmailer/internal/message" 13 | ) 14 | 15 | type spool struct { 16 | spoolDir string 17 | } 18 | 19 | // DepositToSpool is a wrapper around spool.NewSpool and *spool.DepositGob 20 | func DepositToSpool(dir string, m *message.MessagePack) error { 21 | 22 | s, err := NewSpool(dir) 23 | if err != nil { 24 | return err 25 | } 26 | err = s.DepositGob(m) 27 | if err != nil { 28 | return err 29 | } 30 | return nil 31 | } 32 | 33 | // NewSpool instantiates a spool structure with a dir path to the directory where gobs will be deposited (spooled) 34 | func NewSpool(dir string) (*spool, error) { 35 | var gd = new(spool) 36 | // test if dir exists and we can write into it 37 | fi, err := os.Stat(dir) 38 | switch { 39 | case err != nil: 40 | return nil, err 41 | case !fi.IsDir(): 42 | return nil, errors.New("ERROR: Gob directory is not a directory") 43 | // todo: missing writability test 44 | } 45 | gd.spoolDir = dir 46 | return gd, nil 47 | } 48 | 49 | // DepositGob takes a MessagePack and saves it to a gob file in the spool directory. 50 | func (s *spool) DepositGob(m *message.MessagePack) error { 51 | 52 | // if we got empty messagepack, error! 53 | if m == nil { 54 | return errors.New("got nil MessagePack") 55 | } 56 | 57 | // generate gob file name 58 | fn, err := genFileName(s.spoolDir, m) 59 | if err != nil { 60 | return err 61 | } 62 | 63 | // create gob file 64 | f, err := os.Create(fn) 65 | if err != nil { 66 | return err 67 | } 68 | defer f.Close() 69 | 70 | // deposit mp in gob 71 | genc := gob.NewEncoder(f) 72 | err = genc.Encode(*m) 73 | if err != nil { 74 | return err 75 | } 76 | 77 | // todo: proper logging 78 | fmt.Println("Deposit gob OK!") 79 | 80 | return nil 81 | } 82 | 83 | // genFileName generates gob full path-filename. Format: spooldir/connector-user-timestamp.gob 84 | func genFileName(dir string, m *message.MessagePack) (string, error) { 85 | 86 | switch { 87 | case dir == "": 88 | return "", errors.New("got empty spooldir") 89 | case m == nil: 90 | return "", errors.New("got nil messagepack") 91 | case m.Connector == "": 92 | return "", errors.New("got empty connector") 93 | case m.TargetUser == "": 94 | return "", errors.New("got empty targetuser") 95 | } 96 | 97 | return dir + "/" + m.Connector + "-" + m.TargetUser + "-" + strconv.FormatInt(m.TimeStamp.UnixNano(), 10) + ".gob", nil 98 | 99 | } 100 | 101 | // FetchGob takes the gob filename, prepends the spooldir to the name, opens it and returns the decoded MessagePack structure. 102 | // todo: test 103 | func (s *spool) FetchGob(fileName string, l *log.Logger) (*message.MessagePack, error) { 104 | 105 | f, err := os.Open(s.spoolDir + "/" + fileName) 106 | if err != nil { 107 | return nil, err 108 | } 109 | defer f.Close() 110 | 111 | mp, err := decodeGob(f, l) 112 | if err != nil { 113 | return nil, err 114 | } 115 | 116 | return mp, nil 117 | } 118 | 119 | // decodeGob take io.Reader and returns its decoded content into a MessagePack structure 120 | // todo: test 121 | func decodeGob(r io.Reader, l *log.Logger) (*message.MessagePack, error) { 122 | var mp = new(message.MessagePack) 123 | 124 | genc := gob.NewDecoder(r) 125 | err := genc.Decode(mp) 126 | if err != nil { 127 | l.Println(err) 128 | return nil, err 129 | } 130 | 131 | return mp, nil 132 | } 133 | -------------------------------------------------------------------------------- /internal/spool/spool_test.go: -------------------------------------------------------------------------------- 1 | package spool 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/CLIP-HPC/goslmailer/internal/message" 10 | ) 11 | 12 | type depTest struct { 13 | dir string 14 | mp *message.MessagePack 15 | expectErr bool 16 | } 17 | 18 | type dTList []depTest 19 | 20 | func TestDepositToSpool(t *testing.T) { 21 | // is this a smart thing to do in testing? 22 | name, err := os.MkdirTemp("", "goslTest-") 23 | if err != nil { 24 | t.Fatalf("ERROR making temp dir for test: %q\n", err) 25 | } else { 26 | t.Logf("TEMPDIR: %q\n", name) 27 | defer func() { 28 | e := os.RemoveAll(name) 29 | if e != nil { 30 | t.Fatalf("ERROR removing temp dir for test: %q\n", err) 31 | } 32 | }() 33 | } 34 | dts := dTList{ 35 | { 36 | dir: "", 37 | mp: &message.MessagePack{}, 38 | expectErr: true, 39 | }, 40 | { 41 | dir: "/for/sure/this/doesnt/exist", 42 | mp: &message.MessagePack{}, 43 | expectErr: true, 44 | }, 45 | { 46 | dir: name, 47 | mp: &message.MessagePack{}, 48 | expectErr: true, 49 | }, 50 | { 51 | dir: name, 52 | mp: nil, 53 | expectErr: true, 54 | }, 55 | { 56 | dir: name, 57 | mp: &message.MessagePack{ 58 | Connector: "conn", 59 | TargetUser: "pja", 60 | }, 61 | expectErr: false, 62 | }, 63 | } 64 | 65 | for i, v := range dts { 66 | t.Run(fmt.Sprintf("TEST %d: %q %v", i, v.dir, v.mp), func(t *testing.T) { 67 | err := DepositToSpool(v.dir, v.mp) 68 | switch { 69 | case v.expectErr && err == nil: 70 | t.Fatalf("FAILED test %q, deposit to %q with %q\n", i, v.dir, err) 71 | case !v.expectErr && err != nil: 72 | t.Fatalf("FAILED test %q, deposit to %q with %q\n", i, v.dir, err) 73 | } 74 | }) 75 | } 76 | } 77 | 78 | func TestNewSpool(t *testing.T) { 79 | nsl := []struct { 80 | dir string 81 | expecterr bool 82 | }{ 83 | { 84 | dir: "/tmp", 85 | expecterr: false, 86 | }, 87 | { 88 | dir: "/for/sure/this/doesnt/exist", 89 | expecterr: true, 90 | }, 91 | { 92 | dir: "", 93 | expecterr: true, 94 | }, 95 | } 96 | 97 | for i, v := range nsl { 98 | teststr := fmt.Sprintf("TEST %d %s %v", i, v.dir, v.expecterr) 99 | t.Run(teststr, func(t *testing.T) { 100 | sp, err := NewSpool(v.dir) 101 | t.Logf("Got: %v\n", sp) 102 | switch { 103 | case v.expecterr && err == nil: 104 | t.Fatalf("FAIL: Expected err and got none") 105 | case !v.expecterr && err != nil: 106 | t.Fatalf("FAIL: Expected ok and got err: %q", err) 107 | case err != nil: 108 | // here we break the switch if we're in "expected" error, so we can't test sp value below 109 | break 110 | case v.dir != sp.spoolDir: 111 | t.Fatalf("FAIL: Expected spooldir=%q ,got %q\n", v.dir, sp.spoolDir) 112 | } 113 | }) 114 | } 115 | } 116 | 117 | func TestGenFileName(t *testing.T) { 118 | testList := []struct { 119 | name string 120 | sdir string 121 | mp *message.MessagePack 122 | wantstr string 123 | expecterr bool 124 | }{ 125 | { 126 | name: "test all ok", 127 | sdir: "/tmp", 128 | mp: &message.MessagePack{ 129 | Connector: "testCon", 130 | TargetUser: "pja", 131 | }, 132 | wantstr: "/tmp/testCon-pja-", 133 | expecterr: false, 134 | }, 135 | { 136 | name: "test empty spooldir", 137 | sdir: "", 138 | mp: &message.MessagePack{ 139 | Connector: "testCon", 140 | TargetUser: "pja", 141 | }, 142 | wantstr: "/tmp/testCon-pja-", 143 | expecterr: true, 144 | }, 145 | { 146 | name: "test missing targetuser", 147 | sdir: "/tmp", 148 | mp: &message.MessagePack{ 149 | Connector: "testCon", 150 | }, 151 | wantstr: "/tmp/testCon-pja-", 152 | expecterr: true, 153 | }, 154 | { 155 | name: "test empty messagepack", 156 | sdir: "/tmp", 157 | mp: &message.MessagePack{}, 158 | wantstr: "/tmp/testCon-pja-", 159 | expecterr: true, 160 | }, 161 | { 162 | name: "test nil messagepack", 163 | sdir: "/tmp", 164 | mp: nil, 165 | wantstr: "/tmp/testCon-pja-", 166 | expecterr: true, 167 | }, 168 | } 169 | 170 | for _, test := range testList { 171 | t.Run(test.name, func(t *testing.T) { 172 | fn, err := genFileName(test.sdir, test.mp) 173 | switch { 174 | case test.expecterr && err == nil: 175 | t.Fatalf("FAIL: Expected err and got none") 176 | case !test.expecterr && err != nil: 177 | t.Fatalf("FAIL: Expected ok and got err: %q", err) 178 | case err != nil: 179 | break 180 | case !strings.HasPrefix(fn, test.wantstr): 181 | t.Fatalf("FAIL: %q doesn't have prefix %q", fn, test.wantstr) 182 | default: 183 | t.Logf("SUCCESS %q has prefix %q", fn, test.wantstr) 184 | } 185 | }) 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /internal/spool/spoolfiles.go: -------------------------------------------------------------------------------- 1 | package spool 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "strings" 7 | "time" 8 | ) 9 | 10 | type FileGob struct { 11 | //filename fs.DirEntry 12 | Filename string 13 | User string 14 | Connector string 15 | TimeStamp time.Time 16 | } 17 | 18 | //type SpooledGobs map[filename]FileGob 19 | type SpooledGobs map[string]FileGob 20 | 21 | func (s *spool) GetSpooledGobsList(l *log.Logger) (*SpooledGobs, error) { 22 | var sg SpooledGobs = SpooledGobs{} 23 | 24 | de, err := os.ReadDir(s.spoolDir) 25 | if err != nil { 26 | return nil, err 27 | } 28 | for _, v := range de { 29 | //l.Printf("FILE: %q is regular file: %v\n", v.Name(), v.Type().IsRegular()) 30 | if v.Type().IsRegular() && strings.HasSuffix(v.Name(), ".gob") { 31 | //l.Printf("FILE: %q is a GOB\n", v.Name()) 32 | mp, err := s.FetchGob(v.Name(), l) 33 | if err != nil { 34 | l.Printf("FAILED to read %q gob file: %s\n", v.Name(), err) 35 | } else { 36 | sg[v.Name()] = FileGob{ 37 | Filename: v.Name(), 38 | User: mp.TargetUser, 39 | Connector: mp.Connector, 40 | TimeStamp: mp.TimeStamp, 41 | } 42 | } 43 | 44 | } 45 | } 46 | return &sg, nil 47 | } 48 | -------------------------------------------------------------------------------- /internal/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import "log" 4 | 5 | var ( 6 | buildVersion string 7 | buildCommit string 8 | ) 9 | 10 | func DumpVersion(l *log.Logger) { 11 | l.Printf("----------------------------------------\n") 12 | l.Printf("Version: %s\n", buildVersion) 13 | l.Printf("Build commit hash: %s\n", buildCommit) 14 | l.Printf("----------------------------------------\n") 15 | } 16 | -------------------------------------------------------------------------------- /templates/README.md: -------------------------------------------------------------------------------- 1 | # Templating guide 2 | 3 | > **Info** 4 | > In SLURM < 21.08.x, only a subset of job related information are available as SLURM environment variables in the `adaptive_card_template.json` and `telegramTemplate.html` templates. Instead of the SLURM environment variables (i.e `Job.SlurmEnvironment.SLURM_JOB_USER`) the variables from `SacctMetrics` can be used (i.e. `.Job.JobStats.User`) instead. See the [adaptive_card_template.json](test_e2e/cases/test_05/conf/adaptive_card_template.json) in the `test_e2e/cases/test_05` test case as an example. 5 | 6 | Goslmailer uses golang [text/template](https://pkg.go.dev/text/template) and [html/template](https://pkg.go.dev/html/template) libraries. 7 | 8 | The connectors call `renderer.RenderTemplate` function. 9 | 10 | Data structure you can reference in the template can be found in: 11 | 12 | * [job_data.go](../internal/slurmjob/job_data.go) 13 | * [rendererer.go](../internal/renderer/renderer.go) 14 | 15 | Example: 16 | 17 | * `{{ .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE }}` 18 | * `{{ .Job.JobStats.MaxRSS | humanBytes }}` 19 | 20 | * [Example telegram html template](./telegramTemplate.html) 21 | * [More template examples](./templates/) 22 | 23 | Structures: 24 | 25 | ``` 26 | struct { 27 | Job slurmjob.JobContext 28 | UserID string 29 | Created string 30 | } 31 | 32 | type JobContext struct { 33 | SlurmEnvironment 34 | JobStats SacctMetrics 35 | Hints []string 36 | MailSubject string 37 | PrunedMessageCount uint32 38 | } 39 | 40 | type SlurmEnvironment struct { 41 | SLURM_ARRAY_JOB_ID string 42 | SLURM_ARRAY_TASK_COUNT string 43 | SLURM_ARRAY_TASK_ID string 44 | SLURM_ARRAY_TASK_MAX string 45 | SLURM_ARRAY_TASK_MIN string 46 | SLURM_ARRAY_TASK_STEP string 47 | SLURM_CLUSTER_NAME string 48 | SLURM_JOB_ACCOUNT string 49 | SLURM_JOB_DERIVED_EC string 50 | SLURM_JOB_EXIT_CODE string 51 | SLURM_JOB_EXIT_CODE2 string 52 | SLURM_JOB_EXIT_CODE_MAX string 53 | SLURM_JOB_EXIT_CODE_MIN string 54 | SLURM_JOB_GID string 55 | SLURM_JOB_GROUP string 56 | SLURM_JOBID string 57 | SLURM_JOB_ID string 58 | SLURM_JOB_MAIL_TYPE string 59 | SLURM_JOB_NAME string 60 | SLURM_JOB_NODELIST string 61 | SLURM_JOB_PARTITION string 62 | SLURM_JOB_QUEUED_TIME string 63 | SLURM_JOB_RUN_TIME string 64 | SLURM_JOB_STATE string 65 | SLURM_JOB_STDIN string 66 | SLURM_JOB_UID string 67 | SLURM_JOB_USER string 68 | SLURM_JOB_WORK_DIR string 69 | } 70 | 71 | type SacctMetrics struct { 72 | State string 73 | Ncpus int64 74 | Nodes int 75 | Submittime string 76 | Starttime string 77 | Endtime string 78 | CPUTimeStr string 79 | CPUTime float64 80 | TotalCPU float64 81 | TotalCPUStr string 82 | UserCPU float64 83 | SystemCPU float64 84 | ReqMem uint64 85 | MaxRSS uint64 86 | Walltime uint64 87 | WalltimeStr string 88 | Runtime uint64 89 | RuntimeStr string 90 | MaxDiskWrite uint64 91 | MaxDiskRead uint64 92 | } 93 | ``` 94 | -------------------------------------------------------------------------------- /templates/matrix_template.md: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ## {{ .Job.MailSubject }} ({{ .Job.SlurmEnvironment.SLURM_JOB_NAME }}) {{ .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE }} 4 | Created {{ .Created }} 5 | {{ if ne .Job.PrunedMessageCount 0 }} 6 | WARNING: Rate limiting triggered. {{ .Job.PrunedMessageCount }} additonal notificiations have been suppressed 7 | {{ end }} 8 | 9 | #### Details 10 | 11 | - **Job Name** : {{ .Job.SlurmEnvironment.SLURM_JOB_NAME }} 12 | - **Job ID** : {{ .Job.SlurmEnvironment.SLURM_JOB_ID }} 13 | - **User** : {{ .Job.JobStats.User }} 14 | - **Partition** : {{ .Job.JobStats.Partition }} 15 | - **Nodes Used** : {{ .Job.JobStats.NodeList }} 16 | - **Cores** : {{ .Job.JobStats.Ncpus }} 17 | - **Job state** : {{ .Job.SlurmEnvironment.SLURM_JOB_STATE }} 18 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }} 19 | - **Exit Code** : {{ .Job.SlurmEnvironment.SLURM_JOB_EXIT_CODE_MAX }} 20 | {{- end }} 21 | - **Submit** : {{ .Job.JobStats.Submittime }} 22 | - **Start** : {{ .Job.JobStats.Starttime }} 23 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }} 24 | - **End** : {{ .Job.JobStats.Endtime }} 25 | {{- end }} 26 | - **Res. Walltime** : {{ .Job.JobStats.WalltimeStr }} 27 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE "Began" }} 28 | - **Used Walltime** : {{ .Job.SlurmEnvironment.SLURM_JOB_RUN_TIME }} 29 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }} 30 | - **Used CPU time** : {{ .Job.JobStats.TotalCPUStr }} 31 | - **% User (Comp)** : {{ printf "%5.2f%%" .Job.JobStats.CalcUserComputePercentage }} 32 | - **% System (I/O)** : {{ printf "%5.2f%%" .Job.JobStats.CalcSystemComputePercentage }} 33 | {{- end }} 34 | {{- end }} 35 | - **Memory Requested** : {{ .Job.JobStats.ReqMem | humanBytes }} 36 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE "Began" }} 37 | - **Max Memory Used** : {{ .Job.JobStats.MaxRSS | humanBytes }} 38 | - **Max Disk Write** : {{ .Job.JobStats.MaxDiskWrite | humanBytes }} 39 | - **Max Disk Read** : {{ .Job.JobStats.MaxDiskRead | humanBytes }} 40 | {{- end }} 41 | 42 | {{- range .Job.Hints }} 43 | - {{ . }} 44 | 45 | {{- end }} 46 | 47 | --- 48 | -------------------------------------------------------------------------------- /templates/mattermostTemplate.md: -------------------------------------------------------------------------------- 1 | {{ .Job.MailSubject }} {{ .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE }} 2 | `----------------------------------------` 3 | {{ if ne .Job.PrunedMessageCount 0 }} 4 | *WARNING: Rate limiting triggered. {{ .Job.PrunedMessageCount }} additonal notificiations have been suppressed* 5 | `----------------------------------------` 6 | {{ end }} 7 | ``` 8 | Job Name : {{ .Job.SlurmEnvironment.SLURM_JOB_NAME }} 9 | Job ID : {{ .Job.SlurmEnvironment.SLURM_JOB_ID }} 10 | User : {{ .Job.SlurmEnvironment.SLURM_JOB_USER }} 11 | Partition : {{ .Job.SlurmEnvironment.SLURM_JOB_PARTITION }} 12 | Nodes Used : {{ .Job.SlurmEnvironment.SLURM_JOB_NODELIST }} 13 | Cores : {{ .Job.JobStats.Ncpus }} 14 | Job state : {{ .Job.SlurmEnvironment.SLURM_JOB_STATE }} 15 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }} 16 | Exit Code : {{ .Job.SlurmEnvironment.SLURM_JOB_EXIT_CODE_MAX }} 17 | {{- end }} 18 | Submit : {{ .Job.JobStats.Submittime }} 19 | Start : {{ .Job.JobStats.Starttime }} 20 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }} 21 | End : {{ .Job.JobStats.Endtime }} 22 | {{- end }} 23 | Reserved Walltime : {{ .Job.JobStats.WalltimeStr }} 24 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE "Began" }} 25 | Used Walltime : {{ .Job.SlurmEnvironment.SLURM_JOB_RUN_TIME }} 26 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }} 27 | Used CPU time : {{ .Job.JobStats.TotalCPUStr }} 28 | % User (Computation) : {{ printf "%5.2f%%" .Job.JobStats.CalcUserComputePercentage }} 29 | % System (I/O) : {{ printf "%5.2f%%" .Job.JobStats.CalcSystemComputePercentage }} 30 | {{- end }} 31 | {{- end }} 32 | Memory Requested : {{ .Job.JobStats.ReqMem | humanBytes }} 33 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE "Began" }} 34 | Max Memory Used : {{ .Job.JobStats.MaxRSS | humanBytes }} 35 | Max Disk Write : {{ .Job.JobStats.MaxDiskWrite | humanBytes }} 36 | Max Disk Read : {{ .Job.JobStats.MaxDiskRead | humanBytes }} 37 | {{- end }} 38 | ``` 39 | `----------------------------------------` 40 | ``` 41 | {{- range .Job.Hints }} 42 | {{ . }} 43 | {{- end }} 44 | ``` 45 | `----------------------------------------` 46 | -------------------------------------------------------------------------------- /templates/telegramTemplate.html: -------------------------------------------------------------------------------- 1 | {{ .Job.MailSubject }} {{ .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE }} 2 | Created {{ .Created }} 3 | {{ if ne .Job.PrunedMessageCount 0 }} 4 | WARNING: Rate limiting triggered. {{ .Job.PrunedMessageCount }} additonal notificiations have been suppressed 5 | {{ end }} 6 |
------------------------------
 7 | Job Name         : {{ .Job.SlurmEnvironment.SLURM_JOB_NAME }}
 8 | Job ID           : {{ .Job.SlurmEnvironment.SLURM_JOB_ID }}
 9 | User             : {{ .Job.SlurmEnvironment.SLURM_JOB_USER }}
10 | Partition        : {{ .Job.SlurmEnvironment.SLURM_JOB_PARTITION }}
11 | Nodes Used       : {{ .Job.SlurmEnvironment.SLURM_JOB_NODELIST }}
12 | Cores            : {{ .Job.JobStats.Ncpus }}
13 | Job state        : {{ .Job.SlurmEnvironment.SLURM_JOB_STATE }}
14 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }}
15 | Exit Code        : {{ .Job.SlurmEnvironment.SLURM_JOB_EXIT_CODE_MAX }}
16 | {{- end }}
17 | Submit           : {{ .Job.JobStats.Submittime }}
18 | Start            : {{ .Job.JobStats.Starttime }}
19 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }}
20 | End              : {{ .Job.JobStats.Endtime }}
21 | {{- end }}
22 | Res. Walltime    : {{ .Job.JobStats.WalltimeStr }}
23 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE "Began" }}
24 | Used Walltime    : {{ .Job.SlurmEnvironment.SLURM_JOB_RUN_TIME }}
25 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }}
26 | Used CPU time    : {{ .Job.JobStats.TotalCPUStr }}
27 | % User (Comp)    : {{ printf "%5.2f%%" .Job.JobStats.CalcUserComputePercentage }}
28 | % System (I/O)   : {{ printf "%5.2f%%" .Job.JobStats.CalcSystemComputePercentage }}
29 | {{- end }}
30 | {{- end }}
31 | Memory Requested : {{ .Job.JobStats.ReqMem | humanBytes }}
32 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE "Began" }}
33 | Max Memory Used  : {{ .Job.JobStats.MaxRSS | humanBytes }}
34 | Max Disk Write   : {{ .Job.JobStats.MaxDiskWrite | humanBytes }}
35 | Max Disk Read    : {{ .Job.JobStats.MaxDiskRead | humanBytes }}
36 | {{- end }}
37 | ------------------------------
38 | {{- range .Job.Hints }} 39 | - {{ . }} 40 | 41 | {{- end }} 42 | -------------------------------------------------------------------------------- /test_data/config_test/gobler.conf: -------------------------------------------------------------------------------- 1 | { 2 | "logfile": "/tmp/goslmailer.log", 3 | "binpaths": { 4 | "sacct":"/usr/bin/sacct", 5 | "sstat":"/usr/bin/sstat" 6 | }, 7 | "defaultconnector": "msteams", 8 | "connectors": { 9 | "msteams": { 10 | "name": "gobler.conf", 11 | "renderToFile": "no", 12 | "spoolDir": "/tmp", 13 | "adaptiveCardTemplate": "/etc/slurm/adaptive_card_template.json", 14 | "url": "https://msteams/webhook/url", 15 | "useLookup": "GECOS", 16 | "monitorT": "20000ms", 17 | "pickerT": "5000ms", 18 | "psBufLen": "3", 19 | "numSenders": "4", 20 | "maxMsgPU": "5" 21 | }, 22 | "mailto": { 23 | "name": "original slurm mail functionality, extended.", 24 | "mailCmd": "/etc/slurm/mail.sh", 25 | "mailCmdParams": "-s \"Job {{ .SlurmEnvironment.SLURM_JOB_ID }} ({{ .SlurmEnvironment.SLURM_JOB_NAME }}) {{ .SlurmEnvironment.SLURM_JOB_MAIL_TYPE }}\"", 26 | "mailTemplate": "/etc/slurm/mailTemplate.tmpl", 27 | "allowList": "pja", 28 | "blockList": "", 29 | "spoolDir": "/tmp/mailspool", 30 | "monitorT": "200", 31 | "pickerT": "60" 32 | }, 33 | "textfile": { 34 | "path": "/tmp" 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /test_data/config_test/gobler.toml: -------------------------------------------------------------------------------- 1 | # general configuration 2 | logfile = "/tmp/goslmailer.log" 3 | defaultconnector = "msteams" 4 | 5 | [binpaths] 6 | sacct="/usr/bin/sacct" 7 | sstat="/usr/bin/sstat" 8 | 9 | [connectors.msteams] 10 | name = "gobler.conf" 11 | renderToFile = "no" 12 | spoolDir = "/tmp" 13 | adaptiveCardTemplate = "/etc/slurm/adaptive_card_template.json" 14 | url = "https://msteams/webhook/url" 15 | useLookup = "GECOS" 16 | monitorT = "20000ms" 17 | pickerT = "5000ms" 18 | psBufLen = "3" 19 | numSenders = "4" 20 | maxMsgPU = "5" 21 | 22 | [connectors.mailto] 23 | name = "original slurm mail functionality, extended." 24 | mailCmd = "/etc/slurm/mail.sh" 25 | mailCmdParams = "-s \"Job {{ .SlurmEnvironment.SLURM_JOB_ID }} ({{ .SlurmEnvironment.SLURM_JOB_NAME }}) {{ .SlurmEnvironment.SLURM_JOB_MAIL_TYPE }}\"" 26 | mailTemplate = "/etc/slurm/mailTemplate.tmpl" 27 | allowList = "pja" 28 | blockList = "" 29 | spoolDir = "/tmp/mailspool" 30 | monitorT = "200" 31 | pickerT = "60" 32 | 33 | [connectors.textfile] 34 | path = "/tmp" -------------------------------------------------------------------------------- /test_data/config_test/goslmailer.conf: -------------------------------------------------------------------------------- 1 | { 2 | "logfile": "/tmp/goslmailer.log", 3 | "binpaths": { 4 | "sacct":"/usr/bin/sacct", 5 | "sstat":"/usr/bin/sstat" 6 | }, 7 | "defaultconnector": "msteams", 8 | "connectors": { 9 | "msteams": { 10 | "renderToFile": "spool", 11 | "spoolDir": "/tmp", 12 | "useLookup": "GECOS", 13 | "url": "https://msteams/webhook/url" 14 | }, 15 | "mailto": { 16 | "name": "original slurm mail functionality, extended.", 17 | "mailCmd": "/etc/slurm/mail.sh", 18 | "mailCmdParams": "-s \"Job {{ .SlurmEnvironment.SLURM_JOB_ID }} ({{ .SlurmEnvironment.SLURM_JOB_NAME }}) {{ .SlurmEnvironment.SLURM_JOB_MAIL_TYPE }}\"", 19 | "mailTemplate": "/etc/slurm/mailTemplate.tmpl", 20 | "allowList": ".+@(imp|imba.oeaw|gmi.oeaw).ac.at", 21 | "blockList": "" 22 | }, 23 | "textfile": { 24 | "path": "/tmp" 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /test_data/config_test/goslmailer.toml: -------------------------------------------------------------------------------- 1 | # general configuration 2 | logfile = "/tmp/goslmailer.log" 3 | defaultconnector = "msteams" 4 | 5 | [binpaths] 6 | sacct="/usr/bin/sacct" 7 | sstat="/usr/bin/sstat" 8 | 9 | [connectors] 10 | [connectors.msteams] 11 | renderToFile = "spool" 12 | spoolDir = "/tmp" 13 | useLookup = "GECOS" 14 | url = "https://msteams/webhook/url" 15 | 16 | [connectors.mailto] 17 | name = "original slurm mail functionality, extended." 18 | mailCmd = "/etc/slurm/mail.sh" 19 | mailCmdParams = "-s \"Job {{ .SlurmEnvironment.SLURM_JOB_ID }} ({{ .SlurmEnvironment.SLURM_JOB_NAME }}) {{ .SlurmEnvironment.SLURM_JOB_MAIL_TYPE }}\"" 20 | mailTemplate = "/etc/slurm/mailTemplate.tmpl" 21 | allowList = ".+@(imp|imba.oeaw|gmi.oeaw).ac.at" 22 | blockList = "" 23 | 24 | [connectors.textfile] 25 | path = "/tmp" 26 | -------------------------------------------------------------------------------- /test_data/config_test/goslmailer_annotated.toml: -------------------------------------------------------------------------------- 1 | 2 | # if specified; append logs to this file; else; dump to stderr 3 | # 4 | logfile = "/tmp/goslmailer.log" 5 | 6 | # default connector to be used for message delivery for receivers without full 'connector:user' specification 7 | # 8 | defaultconnector = "msteams" 9 | 10 | # enable/disable hints generation 11 | # ToBeImplemented, hints now permanently enabled 12 | # 13 | #disableHints = 1 14 | 15 | [binpaths] 16 | sacct = "/usr/bin/sacct" 17 | sstat = "/usr/bin/sstat" 18 | 19 | # map of connector configurations 20 | # 21 | [connectors] 22 | 23 | # each connector has it's own map of config attributes 24 | # 25 | [connectors.msteams] 26 | name = "dev channel" # unused 27 | spoolDir = "/tmp" # see below. 28 | renderToFile = "yes" # if "yes"; dump rendered templates to working dir; else if "spool"; dump to spoolDir; else; send to teams url 29 | url = "https://msteams/webhook/url" # ms teams webhook url 30 | adaptiveCardTemplate = "/path/template.json" # full path to adaptive card template file 31 | useLookup = "GECOS" # which function from lookup package the connector uses to map cmdline userid to end-system userid 32 | 33 | [connectors.mailto] 34 | name = "original slurm mail functionality, extended." 35 | mailCmd = "/usr/bin/mutt" 36 | mailCmdParams = "-s \"Job {{ .SlurmEnvironment.SLURM_JOB_ID }} ({{ .SlurmEnvironment.SLURM_JOB_NAME }}) {{ .SlurmEnvironment.SLURM_JOB_MAIL_TYPE }}\"" 37 | mailTemplate = "/etc/slurm/mailTemplate.tmpl" # message body template 38 | mailFormat = "HTML" # `HTML` or `text` (can use telegram html in templates/) 39 | allowList = ".+@(imp|imba.oeaw|gmi.oeaw).ac.at" 40 | blockList = "" # unused 41 | 42 | [connectors.telegram] 43 | name = "telegram bot connector" 44 | url = "" 45 | token = "PasteHereTelegramBotToken" 46 | renderToFile = "no" 47 | spoolDir = "/tmp/telegramgobs" 48 | messageTemplate = "/etc/slurm/telegramTemplate.md" 49 | useLookup = "no" 50 | format = "MarkdownV2" 51 | 52 | [connectors.matrix] 53 | username = "@myuser:matrix.org" 54 | token = "syt_dGRpZG9ib3QXXXXXXXEyQMBEmvOVp_10Jm93" 55 | homeserver = "matrix.org" 56 | template = "/path/to/matrix_template.md" 57 | 58 | # fictitious "textfile" connector, package code for it doesn't exist, implementation left as the exercise for the reader 59 | [connectors.textfile] 60 | path = "/tmp" 61 | 62 | # map of qos names with their respective time limit (seconds) 63 | # used to generate hints, if hints are disabled, you can remove this from config 64 | [qosmap] 65 | RAPID = 3600 66 | SHORT = 28800 67 | MEDIUM = 172800 68 | LONG = 1209600 -------------------------------------------------------------------------------- /test_data/sacct.txt: -------------------------------------------------------------------------------- 1 | JobName|username|account|c|clip-c2-10|4|COMPLETED|2022-02-16T20:40:15|2022-02-16T20:40:15|2022-02-17T01:11:04|08:00:00|04:30:49|18:03:16|01:57.511|01:42.011|00:15.500|32Gn|||||||| 2 | batch||account||clip-c2-10|4|COMPLETED|2022-02-16T20:40:15|2022-02-16T20:40:15|2022-02-17T01:11:04||04:30:49|18:03:16|01:57.510|01:42.011|00:15.498|32Gn|1106640K|0.01M|0.13M|clip-c2-10|clip-c2-10|clip-c2-10|| 3 | extern||account||clip-c2-10|4|COMPLETED|2022-02-16T20:40:15|2022-02-16T20:40:15|2022-02-17T01:11:04||04:30:49|18:03:16|00:00.001|00:00:00|00:00.001|32Gn|0|0|0.00M|clip-c2-10|clip-c2-10|clip-c2-10|| 4 | -------------------------------------------------------------------------------- /test_data/sstat.txt: -------------------------------------------------------------------------------- 1 | 35995652.extern||||||| 2 | 35995652.batch|1806880K|70|205384|clip-m2-0|clip-m2-0|clip-m2-0| 3 | -------------------------------------------------------------------------------- /test_e2e/README.md: -------------------------------------------------------------------------------- 1 | # Test cases 2 | 3 | 1. [test_00](./cases/test_00/README.md) 4 | 2. [test_01](./cases/test_01/README.md) 5 | 3. [test_02](./cases/test_02/README.md) 6 | 4. [test_03](./cases/test_03/README.md) 7 | 5. [test_04](./cases/test_04/README.md) 8 | 6. [test_05](./cases/test_05/README.md) 9 | 10 | --- 11 | 12 | ## test_00 13 | --- 14 | 15 | Run all binaries without the config file. 16 | 17 | --- 18 | ## test_01 19 | --- 20 | 21 | 1. run gosler, save to gob 22 | 2. run gobler, render gob to file 23 | 24 | --- 25 | ## test_02 26 | --- 27 | 28 | goslmailer runs with broken sacct line (-j jobid missing) 29 | 30 | --- 31 | ## test_03 32 | --- 33 | 34 | goslmailer render msteams json to file (actual data) 35 | Job start 36 | 37 | --- 38 | ## test_04 39 | --- 40 | 41 | goslmailer render msteams json to file (actual data) 42 | Job end - fail 43 | 44 | 45 | ## test_05 46 | --- 47 | 48 | Test goslmailer on SLURM versions (<21.8.x) that don't set the job information in as env variables 49 | 50 | --- 51 | -------------------------------------------------------------------------------- /test_e2e/cases/test_00/README.md: -------------------------------------------------------------------------------- 1 | ## test_00 2 | --- 3 | 4 | Run all binaries without the config file. 5 | 6 | --- 7 | -------------------------------------------------------------------------------- /test_e2e/cases/test_00/conf/gobler.conf: -------------------------------------------------------------------------------- 1 | { 2 | "logfile": "", 3 | "defaultconnector": "msteams", 4 | "connectors": { 5 | "msteams": { 6 | "name": "dev channel", 7 | "renderToFile": "yes", 8 | "spoolDir": "/tmp", 9 | "adaptiveCardTemplate": "/etc/slurm/adaptive_card_template.json", 10 | "url": "http://localhost:9999/", 11 | "useLookup": "GECOS", 12 | "monitorT": "10000ms", 13 | "pickerT": "1000ms", 14 | "psBufLen": "3", 15 | "numSenders": "3", 16 | "maxMsgPU": "6" 17 | }, 18 | "telegram": { 19 | "name": "telegram bot connector", 20 | "url": "", 21 | "token": "PasteTokenHere", 22 | "renderToFile": "no", 23 | "spoolDir": "/tmp/telegramgobs", 24 | "messageTemplate": "/etc/slurm/telegramTemplate.md", 25 | "useLookup": "no", 26 | "monitorT": "5000ms", 27 | "pickerT": "1000ms", 28 | "psBufLen": "3", 29 | "numSenders": "3", 30 | "maxMsgPU": "6" 31 | }, 32 | "textfile": { 33 | "path": "/tmp" 34 | } 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /test_e2e/cases/test_00/conf/goslmailer.conf: -------------------------------------------------------------------------------- 1 | { 2 | "logfile": "", 3 | "defaultconnector": "msteams", 4 | "connectors": { 5 | "msteams": { 6 | "renderToFile": "spool", 7 | "spoolDir": "/tmp", 8 | "useLookup": "GECOS" 9 | }, 10 | "connectorX": { 11 | "name": "conX", 12 | "addr": "localhost", 13 | "port": "9999", 14 | "templateFile": "/tmp/conX.tmpl", 15 | "renderToFile": "spool", 16 | "spoolDir": "/tmp", 17 | "useLookup": "no" 18 | }, 19 | "mailto": { 20 | "name": "original slurm mail functionality, extended.", 21 | "mailCmd": "/etc/slurm/mail.sh", 22 | "mailCmdParams": "-s \"Job {{ .SlurmEnvironment.SLURM_JOB_ID }} ({{ .SlurmEnvironment.SLURM_JOB_NAME }}) {{ .SlurmEnvironment.SLURM_JOB_MAIL_TYPE }}\"", 23 | "mailTemplate": "/etc/slurm/mailTemplate.tmpl", 24 | "mailFormat": "HTML", 25 | "allowList": ".+@(imp|imba.oeaw|gmi.oeaw).ac.at", 26 | "blockList": "" 27 | }, 28 | "telegram": { 29 | "name": "CLIP SlurmBot", 30 | "url": "", 31 | "token": "PasteTokenHere", 32 | "renderToFile": "no", 33 | "spoolDir": "/tmp/telegramgobs", 34 | "messageTemplate": "/etc/slurm/telegramTemplate.html", 35 | "useLookup": "no", 36 | "format": "HTML" 37 | }, 38 | "textfile": { 39 | "path": "/tmp" 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /test_e2e/cases/test_00/conf/tgslurmbot.conf: -------------------------------------------------------------------------------- 1 | { 2 | "logFile": "/tmp/tg.log", 3 | "defaultconnector": "msteams", 4 | "connectors": { 5 | "msteams": { 6 | "renderToFile": "spool", 7 | "spoolDir": "/tmp", 8 | "useLookup": "GECOS" 9 | }, 10 | "mailto": { 11 | "name": "original slurm mail functionality, extended.", 12 | "mailCmd": "/etc/slurm/mail.sh", 13 | "mailCmdParams": "-s \"Job {{ .SlurmEnvironment.SLURM_JOB_ID }} ({{ .SlurmEnvironment.SLURM_JOB_NAME }}) {{ .SlurmEnvironment.SLURM_JOB_MAIL_TYPE }}\"", 14 | "mailTemplate": "/etc/slurm/mailTemplate.tmpl", 15 | "allowList": ".+@(imp|imba.oeaw|gmi.oeaw).ac.at", 16 | "blockList": "" 17 | }, 18 | "telegram": { 19 | "name": "CLIP SlurmBot", 20 | "url": "", 21 | "token": "PasteTokenHere", 22 | "renderToFile": "no", 23 | "spoolDir": "/tmp/telegramgobs", 24 | "messageTemplate": "/etc/slurm/telegramTemplate.md", 25 | "useLookup": "no" 26 | }, 27 | "textfile": { 28 | "path": "/tmp" 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /test_e2e/cases/test_00/sacct/sacct: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cwd=`dirname $0` 4 | cat ${cwd}/sacct.txt 5 | -------------------------------------------------------------------------------- /test_e2e/cases/test_00/sacct/sacct.txt: -------------------------------------------------------------------------------- 1 | JobName|username|account|c|clip-c2-10|4|COMPLETED|2022-02-16T20:40:15|2022-02-16T20:40:15|2022-02-17T01:11:04|08:00:00|04:30:49|18:03:16|01:57.511|01:42.011|00:15.500|32Gn|||||||| 2 | batch||account||clip-c2-10|4|COMPLETED|2022-02-16T20:40:15|2022-02-16T20:40:15|2022-02-17T01:11:04||04:30:49|18:03:16|01:57.510|01:42.011|00:15.498|32Gn|1106640K|0.01M|0.13M|clip-c2-10|clip-c2-10|clip-c2-10|| 3 | extern||account||clip-c2-10|4|COMPLETED|2022-02-16T20:40:15|2022-02-16T20:40:15|2022-02-17T01:11:04||04:30:49|18:03:16|00:00.001|00:00:00|00:00.001|32Gn|0|0|0.00M|clip-c2-10|clip-c2-10|clip-c2-10|| 4 | -------------------------------------------------------------------------------- /test_e2e/cases/test_00/slurm_env/slurmenv.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | export SLURM_JOB_NAME=SendAllArrayJob 4 | export SLURM_JOB_GROUP=is.grp 5 | export SLURM_JOB_STATE=COMPLETED 6 | export SLURM_ARRAY_JOB_ID=1051491 7 | export SLURM_JOB_WORK_DIR=/users/petar.jager 8 | export SLURM_JOB_MAIL_TYPE=Ended 9 | export SLURM_JOBID=1051492 10 | export SLURM_ARRAY_TASK_ID=0 11 | export SLURM_JOB_RUN_TIME=00:00:30 12 | export SLURM_ARRAY_TASK_COUNT=6 13 | export SLURM_JOB_EXIT_CODE2=0:0 14 | export SLURM_JOB_DERIVED_EC=0 15 | export SLURM_JOB_ID=1051492 16 | export SLURM_JOB_USER=petar.jager 17 | export SLURM_ARRAY_TASK_MAX=5 18 | export SLURM_JOB_EXIT_CODE=0 19 | export SLURM_JOB_UID=58546 20 | export SLURM_JOB_NODELIST=stg-c2-0 21 | export SLURM_ARRAY_TASK_MIN=0 22 | export SLURM_JOB_STDIN=/dev/null 23 | export SLURM_ARRAY_TASK_STEP=1 24 | export SLURM_JOB_EXIT_CODE_MAX=0 25 | export SLURM_JOB_GID=1999 26 | export SLURM_CLUSTER_NAME=clip 27 | export SLURM_JOB_PARTITION=c 28 | export SLURM_JOB_ACCOUNT=hpc 29 | -------------------------------------------------------------------------------- /test_e2e/cases/test_00/test.yaml: -------------------------------------------------------------------------------- 1 | init: 2 | 3 | defaults: 4 | message: "Running test $i from $twd" 5 | systempaths: 6 | - $bwd 7 | 8 | pipeline: 9 | 10 | print_welcome: 11 | description: "Current test" 12 | action: workflow:print 13 | style: 1 14 | 15 | run_goslmailer: 16 | action: exec:run 17 | checkError: false 18 | env: 19 | GOSLMAILER_CONF: /tmp/doesntexist.conf 20 | commands: 21 | - goslmailer 22 | - gobler -c /tmp/doesntexist.conf 23 | - tgslurmbot -c /tmp/doesntexist.conf 24 | - matrixslurmbot -c /tmp/doesntexist.conf 25 | - discoslurmbot -c /tmp/doesntexist.conf 26 | 27 | test_assert: 28 | action: validator:assert 29 | expect: 30 | - '/ERROR: getConfig() failed/' 31 | - '/ERROR: getConfig() failed/' 32 | - '/ERROR: getConfig() failed/' 33 | - '/ERROR: getConfig() failed/' 34 | - '/ERROR: getConfig() failed/' 35 | actual: 36 | - $run_goslmailer.Cmd[0].Stdout 37 | - $run_goslmailer.Cmd[1].Stdout 38 | - $run_goslmailer.Cmd[2].Stdout 39 | - $run_goslmailer.Cmd[3].Stdout 40 | - $run_goslmailer.Cmd[4].Stdout 41 | 42 | # debug_gosl: 43 | # action: workflow:print 44 | # message: $AsJSON($run_goslmailer) 45 | 46 | # catch: 47 | # description: "ERROR CAUGHT BUT GOOD!" 48 | # action: workflow:print 49 | # message: CAUGHT $error.Error 50 | 51 | -------------------------------------------------------------------------------- /test_e2e/cases/test_01/README.md: -------------------------------------------------------------------------------- 1 | ## test_01 2 | --- 3 | 4 | 1. run gosler, save to gob 5 | 2. run gobler, render gob to file 6 | 7 | --- 8 | -------------------------------------------------------------------------------- /test_e2e/cases/test_01/conf/gobler.conf: -------------------------------------------------------------------------------- 1 | { 2 | "logfile": "/tmp/gobler_test05.log", 3 | "defaultconnector": "msteams", 4 | "connectors": { 5 | "msteams": { 6 | "name": "dev channel", 7 | "renderToFile": "yes", 8 | "spoolDir": "/tmp", 9 | "adaptiveCardTemplate": "/tmp/adaptive_card_template.json", 10 | "url": "http://localhost:9999/", 11 | "useLookup": "no", 12 | "monitorT": "10000ms", 13 | "pickerT": "1000ms", 14 | "psBufLen": "3", 15 | "numSenders": "3", 16 | "maxMsgPU": "6" 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /test_e2e/cases/test_01/conf/goslmailer.conf: -------------------------------------------------------------------------------- 1 | { 2 | "debugconfig":true, 3 | "logfile": "", 4 | "defaultconnector": "msteams", 5 | "binpaths": { 6 | "sacct": "/tmp/sacct" 7 | }, 8 | "connectors": { 9 | "msteams": { 10 | "renderToFile": "spool", 11 | "spoolDir": "/tmp", 12 | "useLookup": "no" 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /test_e2e/cases/test_01/sacct/sacct: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cwd=`dirname $0` 4 | cat ${cwd}/sacct.txt 5 | -------------------------------------------------------------------------------- /test_e2e/cases/test_01/sacct/sacct.txt: -------------------------------------------------------------------------------- 1 | JobName|username|account|c|clip-c2-10|4|COMPLETED|2022-02-16T20:40:15|2022-02-16T20:40:15|2022-02-17T01:11:04|08:00:00|04:30:49|18:03:16|01:57.511|01:42.011|00:15.500|32Gn|||||||| 2 | batch||account||clip-c2-10|4|COMPLETED|2022-02-16T20:40:15|2022-02-16T20:40:15|2022-02-17T01:11:04||04:30:49|18:03:16|01:57.510|01:42.011|00:15.498|32Gn|1106640K|0.01M|0.13M|clip-c2-10|clip-c2-10|clip-c2-10|| 3 | extern||account||clip-c2-10|4|COMPLETED|2022-02-16T20:40:15|2022-02-16T20:40:15|2022-02-17T01:11:04||04:30:49|18:03:16|00:00.001|00:00:00|00:00.001|32Gn|0|0|0.00M|clip-c2-10|clip-c2-10|clip-c2-10|| 4 | -------------------------------------------------------------------------------- /test_e2e/cases/test_01/slurm_env/slurmenv.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | export SLURM_JOB_NAME=SendAllArrayJob 4 | export SLURM_JOB_GROUP=is.grp 5 | export SLURM_JOB_STATE=COMPLETED 6 | export SLURM_ARRAY_JOB_ID=1051491 7 | export SLURM_JOB_WORK_DIR=/users/petar.jager 8 | export SLURM_JOB_MAIL_TYPE=Ended 9 | export SLURM_JOBID=1051492 10 | export SLURM_ARRAY_TASK_ID=0 11 | export SLURM_JOB_RUN_TIME=00:00:30 12 | export SLURM_ARRAY_TASK_COUNT=6 13 | export SLURM_JOB_EXIT_CODE2=0:0 14 | export SLURM_JOB_DERIVED_EC=0 15 | export SLURM_JOB_ID=1051492 16 | export SLURM_JOB_USER=petar.jager 17 | export SLURM_ARRAY_TASK_MAX=5 18 | export SLURM_JOB_EXIT_CODE=0 19 | export SLURM_JOB_UID=58546 20 | export SLURM_JOB_NODELIST=stg-c2-0 21 | export SLURM_ARRAY_TASK_MIN=0 22 | export SLURM_JOB_STDIN=/dev/null 23 | export SLURM_ARRAY_TASK_STEP=1 24 | export SLURM_JOB_EXIT_CODE_MAX=0 25 | export SLURM_JOB_GID=1999 26 | export SLURM_CLUSTER_NAME=clip 27 | export SLURM_JOB_PARTITION=c 28 | export SLURM_JOB_ACCOUNT=hpc 29 | -------------------------------------------------------------------------------- /test_e2e/cases/test_01/test.yaml: -------------------------------------------------------------------------------- 1 | init: 2 | 3 | defaults: 4 | message: "Running test $i from $twd" 5 | systempaths: 6 | - $bwd 7 | 8 | pipeline: 9 | 10 | print_welcome: 11 | description: "Current test" 12 | action: workflow:print 13 | style: 1 14 | 15 | deploy_conf_files: 16 | action: storage:copy 17 | source: 18 | URL: $twd/conf 19 | dest: 20 | URL: /tmp 21 | 22 | deploy_sacct_files: 23 | action: storage:copy 24 | source: 25 | URL: $twd/sacct 26 | dest: 27 | URL: /tmp 28 | 29 | run_goslmailer: 30 | action: exec:run 31 | checkError: true 32 | env: 33 | GOSLMAILER_CONF: /tmp/goslmailer.conf 34 | commands: 35 | - source $twd/slurm_env/slurmenv.sh 36 | - goslmailer -s "Slurm Job_id=39766384 Name=job Began, Queued time 2-00:04:18" pja 37 | 38 | test_assert_goslmailer: 39 | action: validator:assert 40 | expect: 41 | - '/Deposit gob OK!/' 42 | actual: 43 | - $run_goslmailer.Output 44 | 45 | clear_gobler_log: 46 | action: exec:run 47 | checkError: false 48 | commands: 49 | - truncate -s0 /tmp/gobler_test05.log 50 | 51 | run_gobler: 52 | action: process:start 53 | watch: true 54 | immuneToHangups: true 55 | command: gobler 56 | arguments: 57 | - -c 58 | - /tmp/gobler.conf 59 | 60 | run_sleep: 61 | action: exec:run 62 | checkError: true 63 | commands: 64 | - sleep 5 65 | 66 | stop_gobler: 67 | action: process:stop 68 | pid: $run_gobler.Pid 69 | 70 | 71 | # debug_gobler: 72 | # action: workflow:print 73 | # message: $AsJSON($run_gobler) 74 | 75 | read_gobler_log: 76 | action: exec:run 77 | checkError: true 78 | commands: 79 | - cat /tmp/gobler_test05.log 80 | 81 | 82 | # https://github.com/viant/assertly#validation 83 | test_assert_gobler: 84 | action: validator:assert 85 | expect: 86 | - '~/Send successful to file: rendered-1051492-pja-/' 87 | - '~/SENDER msteams#\d: Gob deleted/' 88 | actual: 89 | - $read_gobler_log.Cmd[0].Stdout 90 | - $read_gobler_log.Cmd[0].Stdout 91 | 92 | # todo: 93 | # add test: 94 | # jq . rendered.json >/dev/null || echo FAILED 95 | 96 | # debug_gosl: 97 | # action: workflow:print 98 | # message: $AsJSON($run_goslmailer) 99 | 100 | # catch: 101 | # description: "ERROR CAUGHT BUT GOOD!" 102 | # action: workflow:print 103 | # message: CAUGHT $error.Error 104 | 105 | -------------------------------------------------------------------------------- /test_e2e/cases/test_02/README.md: -------------------------------------------------------------------------------- 1 | ## test_02 2 | --- 3 | 4 | goslmailer runs with broken sacct line (-j jobid missing) 5 | 6 | --- 7 | -------------------------------------------------------------------------------- /test_e2e/cases/test_02/conf/gobler.conf: -------------------------------------------------------------------------------- 1 | { 2 | "logfile": "", 3 | "defaultconnector": "msteams", 4 | "connectors": { 5 | "msteams": { 6 | "name": "dev channel", 7 | "renderToFile": "yes", 8 | "spoolDir": "/tmp", 9 | "adaptiveCardTemplate": "/tmp/adaptive_card_template.json", 10 | "url": "http://localhost:9999/", 11 | "useLookup": "no", 12 | "monitorT": "10000ms", 13 | "pickerT": "1000ms", 14 | "psBufLen": "3", 15 | "numSenders": "3", 16 | "maxMsgPU": "6" 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /test_e2e/cases/test_02/conf/goslmailer.conf: -------------------------------------------------------------------------------- 1 | { 2 | "logfile": "", 3 | "defaultconnector": "msteams", 4 | "binpaths": { 5 | "sacct": "/tmp/sacct" 6 | }, 7 | "connectors": { 8 | "msteams": { 9 | "renderToFile": "spool", 10 | "spoolDir": "/tmp", 11 | "useLookup": "no" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /test_e2e/cases/test_02/sacct/sacct: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cwd=`dirname $0` 4 | cat ${cwd}/sacct.txt 5 | exit 1 6 | -------------------------------------------------------------------------------- /test_e2e/cases/test_02/sacct/sacct.txt: -------------------------------------------------------------------------------- 1 | sacct: fatal: Bad job/step specified: -n 2 | -------------------------------------------------------------------------------- /test_e2e/cases/test_02/slurm_env/slurmenv.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | unset SLURM_JOB_NAME 4 | unset SLURM_JOB_GROUP 5 | unset SLURM_JOB_STATE 6 | unset SLURM_ARRAY_JOB_ID 7 | unset SLURM_JOB_WORK_DIR 8 | unset SLURM_JOB_MAIL_TYPE 9 | unset SLURM_JOBID 10 | unset SLURM_ARRAY_TASK_ID 11 | unset SLURM_JOB_RUN_TIME 12 | unset SLURM_ARRAY_TASK_COUNT 13 | unset SLURM_JOB_EXIT_CODE2 14 | unset SLURM_JOB_DERIVED_EC 15 | unset SLURM_JOB_ID 16 | unset SLURM_JOB_USER 17 | unset SLURM_ARRAY_TASK_MAX 18 | unset SLURM_JOB_EXIT_CODE 19 | unset SLURM_JOB_UID 20 | unset SLURM_JOB_NODELIST 21 | unset SLURM_ARRAY_TASK_MIN 22 | unset SLURM_JOB_STDIN 23 | unset SLURM_ARRAY_TASK_STEP 24 | unset SLURM_JOB_EXIT_CODE_MAX 25 | unset SLURM_JOB_GID 26 | unset SLURM_CLUSTER_NAME 27 | unset SLURM_JOB_PARTITION 28 | unset SLURM_JOB_ACCOUNT 29 | -------------------------------------------------------------------------------- /test_e2e/cases/test_02/test.yaml: -------------------------------------------------------------------------------- 1 | init: 2 | test_readme: '${twd}/README.md' 3 | 4 | defaults: 5 | message: "Running test $i from $twd" 6 | systempaths: 7 | - $bwd 8 | 9 | pipeline: 10 | 11 | print_welcome: 12 | description: "Current test" 13 | action: workflow:print 14 | style: 1 15 | 16 | deploy_conf_files: 17 | action: storage:copy 18 | source: 19 | URL: $twd/conf 20 | dest: 21 | URL: /tmp 22 | 23 | deploy_sacct_files: 24 | action: storage:copy 25 | source: 26 | URL: $twd/sacct 27 | dest: 28 | URL: /tmp 29 | 30 | run_goslmailer: 31 | action: exec:run 32 | checkError: false 33 | env: 34 | GOSLMAILER_CONF: /tmp/goslmailer.conf 35 | commands: 36 | - source $twd/slurm_env/slurmenv.sh 37 | - goslmailer -s "Slurm Job_id=39766384 Name=job Began, Queued time 2-00:04:18" pja 38 | 39 | test_assert_goslmailer: 40 | action: validator:assert 41 | expect: 42 | - '/Unable to retrieve job stats. Error: failed to execute sacct command: exit status 1/' 43 | actual: 44 | - $run_goslmailer.Output 45 | -------------------------------------------------------------------------------- /test_e2e/cases/test_03/README.md: -------------------------------------------------------------------------------- 1 | ## test_03 2 | --- 3 | 4 | goslmailer render msteams json to file (actual data) 5 | Job start 6 | 7 | --- 8 | -------------------------------------------------------------------------------- /test_e2e/cases/test_03/conf/gobler.conf: -------------------------------------------------------------------------------- 1 | { 2 | "logfile": "", 3 | "defaultconnector": "msteams", 4 | "connectors": { 5 | "msteams": { 6 | "name": "dev channel", 7 | "renderToFile": "yes", 8 | "spoolDir": "/tmp", 9 | "adaptiveCardTemplate": "/tmp/adaptive_card_template.json", 10 | "url": "http://localhost:9999/", 11 | "useLookup": "no", 12 | "monitorT": "10000ms", 13 | "pickerT": "1000ms", 14 | "psBufLen": "3", 15 | "numSenders": "3", 16 | "maxMsgPU": "6" 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /test_e2e/cases/test_03/conf/goslmailer.conf: -------------------------------------------------------------------------------- 1 | { 2 | "logfile": "", 3 | "defaultconnector": "msteams", 4 | "binpaths": { 5 | "sacct": "/tmp/sacct", 6 | "sstat": "/tmp/sstat" 7 | }, 8 | "connectors": { 9 | "msteams": { 10 | "renderToFile": "yes", 11 | "adaptiveCardTemplate": "/tmp/adaptive_card_template.json", 12 | "spoolDir": "/tmp", 13 | "useLookup": "no" 14 | } 15 | }, 16 | "qosmap": { 17 | "RAPID": 3600, 18 | "SHORT": 28800, 19 | "MEDIUM": 172800, 20 | "LONG": 1209600 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /test_e2e/cases/test_03/results/rendered-1052477-petar.jager@imba.oeaw.ac.at-1653378962712164702.json: -------------------------------------------------------------------------------- 1 | { 2 | "type":"message", 3 | "attachments":[ 4 | { 5 | "contentType":"application/vnd.microsoft.card.adaptive", 6 | "content":{ 7 | "type":"AdaptiveCard", 8 | "body":[ 9 | { 10 | "type":"TextBlock", 11 | "size":"medium", 12 | "weight":"bolder", 13 | "text":"CBE Slurm job info", 14 | "style":"heading" 15 | }, 16 | { 17 | "type":"ColumnSet", 18 | "columns":[ 19 | { 20 | "type":"Column", 21 | "items":[ 22 | { 23 | "type":"Image", 24 | "style":"person", 25 | "url":"https://upload.wikimedia.org/wikipedia/commons/thumb/3/3a/Slurm_logo.svg/590px-Slurm_logo.svg.png", 26 | "size":"small" 27 | } 28 | ], 29 | "width":"auto" 30 | }, 31 | { 32 | "type":"Column", 33 | "items":[ 34 | { 35 | "type":"TextBlock", 36 | "weight":"bolder", 37 | "text":"Job 1052477 Began", 38 | "wrap":true, 39 | "size":"Large", 40 | "color":"Good" 41 | }, 42 | { 43 | "type":"TextBlock", 44 | "spacing":"none", 45 | 46 | "isSubtle":true, 47 | "wrap":true 48 | } 49 | ], 50 | "width":"stretch" 51 | } 52 | ] 53 | }, 54 | 55 | { 56 | "type":"FactSet", 57 | "separator":true, 58 | "spacing":"large", 59 | "isVisible":"true", 60 | "id":"ExternalData", 61 | "facts":[ 62 | { 63 | "type":"Fact", 64 | "title":"Job name", 65 | "value":"endlyJobStart" 66 | }, 67 | { 68 | "type":"Fact", 69 | "title":"Job ID", 70 | "value":"1052477" 71 | }, 72 | { 73 | "type":"Fact", 74 | "title":"User", 75 | "value":"petar.jager" 76 | }, 77 | { 78 | "type":"Fact", 79 | "title":"Partition", 80 | "value":"c" 81 | }, 82 | { 83 | "type":"Fact", 84 | "title":"Compute Nodes Used", 85 | "value":"stg-c2-0" 86 | }, 87 | { 88 | "type":"Fact", 89 | "title":"Cores", 90 | "value":"1" 91 | }, 92 | { 93 | "type":"Fact", 94 | "title":"Job state", 95 | "value":"RUNNING" 96 | }, 97 | 98 | { 99 | "type":"Fact", 100 | "title":"Submit", 101 | "value":"2022-05-24T07:43:07" 102 | }, 103 | { 104 | "type":"Fact", 105 | "title":"Start", 106 | "value":"2022-05-24T07:43:07" 107 | }, 108 | 109 | { 110 | "type":"Fact", 111 | "title":"Reserved Walltime", 112 | "value":"08:00:00" 113 | }, 114 | 115 | { 116 | "type":"Fact", 117 | "title":"Memory Requested", 118 | "value":"4.3 GB" 119 | }, 120 | 121 | ] 122 | }, 123 | 124 | { 125 | "type":"FactSet", 126 | "separator":true, 127 | "spacing":"large", 128 | "isVisible":"false", 129 | "id":"InternalData", 130 | "facts":[ 131 | { 132 | "type":"Fact", 133 | "title":"User", 134 | "value":"petar.jager" 135 | }, 136 | { 137 | "type":"Fact", 138 | "title":"JobStatus", 139 | "value":"Began" 140 | } 141 | ] 142 | } 143 | ], 144 | "actions":[ 145 | { 146 | "type":"Action.OpenUrl", 147 | "title":"View Google", 148 | "url":"https://www.youtube.com/watch?v=dQw4w9WgXcQ" 149 | } 150 | ], 151 | "$schema":"http://adaptivecards.io/schemas/adaptive-card.json", 152 | "version":"1.2", 153 | "msteams":{ 154 | "entities":[ 155 | { 156 | "type":"mention", 157 | "text":"petar.jager", 158 | "mentioned":{ 159 | "id":"petar.jager@imba.oeaw.ac.at", 160 | "name":"petar.jager" 161 | } 162 | } 163 | ] 164 | } 165 | } 166 | } 167 | ] 168 | } 169 | -------------------------------------------------------------------------------- /test_e2e/cases/test_03/sacct/sacct: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cwd=`dirname $0` 4 | cat ${cwd}/sacct.txt 5 | -------------------------------------------------------------------------------- /test_e2e/cases/test_03/sacct/sacct.txt: -------------------------------------------------------------------------------- 1 | endlyJobStart|petar.jager|account|c|stg-c2-0|1|RUNNING|2022-05-24T07:43:07|2022-05-24T07:43:07|Unknown|08:00:00|00:00:14|00:00:14|00:00:00|00:00:00|00:00:00|4G|||||||| 2 | batch||account||stg-c2-0|1|RUNNING|2022-05-24T07:43:07|2022-05-24T07:43:07|Unknown||00:00:14|00:00:14|00:00:00|00:00:00|00:00:00||||||||| 3 | extern||account||stg-c2-0|1|RUNNING|2022-05-24T07:43:07|2022-05-24T07:43:07|Unknown||00:00:14|00:00:14|00:00:00|00:00:00|00:00:00||||||||| 4 | -------------------------------------------------------------------------------- /test_e2e/cases/test_03/sacct/sstat: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cwd=`dirname $0` 4 | cat ${cwd}/sstat.txt 5 | -------------------------------------------------------------------------------- /test_e2e/cases/test_03/sacct/sstat.txt: -------------------------------------------------------------------------------- 1 | 1052477.extern|0|0|2012|stg-c2-0|stg-c2-0|stg-c2-0| 2 | 1052477.batch|344K|36|8267|stg-c2-0|stg-c2-0|stg-c2-0| 3 | -------------------------------------------------------------------------------- /test_e2e/cases/test_03/slurm_env/slurmenv.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | export SLURM_ARRAY_JOB_ID="" 4 | export SLURM_ARRAY_TASK_COUNT="" 5 | export SLURM_ARRAY_TASK_ID="" 6 | export SLURM_ARRAY_TASK_MAX="" 7 | export SLURM_ARRAY_TASK_MIN="" 8 | export SLURM_ARRAY_TASK_STEP="" 9 | export SLURM_CLUSTER_NAME="clip" 10 | export SLURM_JOB_ACCOUNT="hpc" 11 | export SLURM_JOB_DERIVED_EC="" 12 | export SLURM_JOB_EXIT_CODE="" 13 | export SLURM_JOB_EXIT_CODE2="" 14 | export SLURM_JOB_EXIT_CODE_MAX="" 15 | export SLURM_JOB_EXIT_CODE_MIN="" 16 | export SLURM_JOB_GID="1999" 17 | export SLURM_JOB_GROUP="is.grp" 18 | export SLURM_JOBID="1052477" 19 | export SLURM_JOB_ID="1052477" 20 | export SLURM_JOB_MAIL_TYPE="Began" 21 | export SLURM_JOB_NAME="endlyJobStart" 22 | export SLURM_JOB_NODELIST="stg-c2-0" 23 | export SLURM_JOB_PARTITION="c" 24 | export SLURM_JOB_QUEUED_TIME="00:00:00" 25 | export SLURM_JOB_RUN_TIME="" 26 | export SLURM_JOB_STATE="RUNNING" 27 | export SLURM_JOB_STDIN="/dev/null" 28 | export SLURM_JOB_UID="58546" 29 | export SLURM_JOB_USER="petar.jager" 30 | export SLURM_JOB_WORK_DIR="/users/petar.jager" 31 | -------------------------------------------------------------------------------- /test_e2e/cases/test_03/test.yaml: -------------------------------------------------------------------------------- 1 | init: 2 | test_readme: '${twd}/README.md' 3 | 4 | defaults: 5 | message: "Running test $i from $twd" 6 | systempaths: 7 | - $bwd 8 | 9 | pipeline: 10 | 11 | print_welcome: 12 | description: "Current test" 13 | action: workflow:print 14 | style: 1 15 | 16 | deploy_conf_files: 17 | action: storage:copy 18 | source: 19 | URL: $twd/conf 20 | dest: 21 | URL: /tmp 22 | 23 | deploy_sacct_files: 24 | action: storage:copy 25 | source: 26 | URL: $twd/sacct 27 | dest: 28 | URL: /tmp 29 | 30 | run_goslmailer: 31 | action: exec:extract 32 | checkError: true 33 | env: 34 | GOSLMAILER_CONF: /tmp/goslmailer.conf 35 | commands: 36 | - command: source $twd/slurm_env/slurmenv.sh 37 | - command: goslmailer -s "Slurm Job_id=1052477 Name=endlyJobStart Began, Queued time 2-00:04:18" petar.jager@imba.oeaw.ac.at 38 | extract: 39 | - key: rfile 40 | regExpr: 'Send successful to file: (rendered-1052477-petar.jager@imba.oeaw.ac.at-\d+.json)' 41 | required: true 42 | 43 | debug_extract: 44 | action: workflow:print 45 | message: "GOT: $rfile" 46 | 47 | test_diff: 48 | action: exec:run 49 | checkError: true 50 | commands: 51 | - sed -i -e '/"text":"Created /d' $WorkingDirectory()/$rfile 52 | - diff $WorkingDirectory()/$rfile $twd/results/*.json && echo RESULTS MATCH 53 | 54 | test_assert_goslmailer: 55 | action: validator:assert 56 | expect: 57 | - '~/Send successful to file: rendered-1052477-petar.jager@imba.oeaw.ac.at-\d+.json/' 58 | actual: 59 | - $run_goslmailer.Output 60 | 61 | 62 | # todo: 63 | # add test: 64 | # jq . rendered.json >/dev/null || echo FAILED 65 | 66 | -------------------------------------------------------------------------------- /test_e2e/cases/test_04/README.md: -------------------------------------------------------------------------------- 1 | ## test_04 2 | --- 3 | 4 | goslmailer render msteams json to file (actual data) 5 | Job end - fail 6 | 7 | --- 8 | -------------------------------------------------------------------------------- /test_e2e/cases/test_04/conf/gobler.conf: -------------------------------------------------------------------------------- 1 | { 2 | "logfile": "", 3 | "defaultconnector": "msteams", 4 | "connectors": { 5 | "msteams": { 6 | "name": "dev channel", 7 | "renderToFile": "yes", 8 | "spoolDir": "/tmp", 9 | "adaptiveCardTemplate": "/tmp/adaptive_card_template.json", 10 | "url": "http://localhost:9999/", 11 | "useLookup": "no", 12 | "monitorT": "10000ms", 13 | "pickerT": "1000ms", 14 | "psBufLen": "3", 15 | "numSenders": "3", 16 | "maxMsgPU": "6" 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /test_e2e/cases/test_04/conf/goslmailer.conf: -------------------------------------------------------------------------------- 1 | { 2 | "logfile": "", 3 | "defaultconnector": "msteams", 4 | "binpaths": { 5 | "sacct": "/tmp/sacct", 6 | "sstat": "/tmp/sstat" 7 | }, 8 | "connectors": { 9 | "msteams": { 10 | "renderToFile": "yes", 11 | "adaptiveCardTemplate": "/tmp/adaptive_card_template.json", 12 | "spoolDir": "/tmp", 13 | "useLookup": "no" 14 | } 15 | }, 16 | "qosmap": { 17 | "RAPID": 3600, 18 | "SHORT": 28800, 19 | "MEDIUM": 172800, 20 | "LONG": 1209600 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /test_e2e/cases/test_04/sacct/sacct: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cwd=`dirname $0` 4 | cat ${cwd}/sacct.txt 5 | -------------------------------------------------------------------------------- /test_e2e/cases/test_04/sacct/sacct.txt: -------------------------------------------------------------------------------- 1 | endlyJobFail|petar.jager|account|c|stg-c2-0|1|FAILED|2022-05-24T07:43:07|2022-05-24T07:43:07|2022-05-24T07:43:37|08:00:00|00:00:30|00:00:30|00:00.007|00:00.002|00:00.005|4G|||||||| 2 | batch||account||stg-c2-0|1|FAILED|2022-05-24T07:43:07|2022-05-24T07:43:07|2022-05-24T07:43:37||00:00:30|00:00:30|00:00.006|00:00.002|00:00.003||344K|0.00M|0.01M|stg-c2-0|stg-c2-0|stg-c2-0|| 3 | extern||account||stg-c2-0|1|COMPLETED|2022-05-24T07:43:07|2022-05-24T07:43:07|2022-05-24T07:43:37||00:00:30|00:00:30|00:00.001|00:00:00|00:00.001||0|0|0.00M|stg-c2-0|stg-c2-0|stg-c2-0|| 4 | -------------------------------------------------------------------------------- /test_e2e/cases/test_04/slurm_env/slurmenv.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | export SLURM_ARRAY_JOB_ID="" 4 | export SLURM_ARRAY_TASK_COUNT="" 5 | export SLURM_ARRAY_TASK_ID="" 6 | export SLURM_ARRAY_TASK_MAX="" 7 | export SLURM_ARRAY_TASK_MIN="" 8 | export SLURM_ARRAY_TASK_STEP="" 9 | export SLURM_CLUSTER_NAME="clip" 10 | export SLURM_JOB_ACCOUNT="hpc" 11 | export SLURM_JOB_DERIVED_EC="0" 12 | export SLURM_JOB_EXIT_CODE="256" 13 | export SLURM_JOB_EXIT_CODE2="1:0" 14 | export SLURM_JOB_EXIT_CODE_MAX="1" 15 | export SLURM_JOB_EXIT_CODE_MIN="" 16 | export SLURM_JOB_GID="1999" 17 | export SLURM_JOB_GROUP="is.grp" 18 | export SLURM_JOBID="1052477" 19 | export SLURM_JOB_ID="1052477" 20 | export SLURM_JOB_MAIL_TYPE="Failed" 21 | export SLURM_JOB_NAME="endlyJobFail" 22 | export SLURM_JOB_NODELIST="stg-c2-0" 23 | export SLURM_JOB_PARTITION="c" 24 | export SLURM_JOB_QUEUED_TIME="" 25 | export SLURM_JOB_RUN_TIME="00:00:30" 26 | export SLURM_JOB_STATE="FAILED" 27 | export SLURM_JOB_STDIN="/dev/null" 28 | export SLURM_JOB_UID="58546" 29 | export SLURM_JOB_USER="petar.jager" 30 | export SLURM_JOB_WORK_DIR="/users/petar.jager" 31 | -------------------------------------------------------------------------------- /test_e2e/cases/test_04/test.yaml: -------------------------------------------------------------------------------- 1 | init: 2 | test_readme: '${twd}/README.md' 3 | 4 | defaults: 5 | message: "Running test $i from $twd" 6 | systempaths: 7 | - $bwd 8 | 9 | pipeline: 10 | 11 | print_welcome: 12 | description: "Current test" 13 | action: workflow:print 14 | style: 1 15 | 16 | deploy_conf_files: 17 | action: storage:copy 18 | source: 19 | URL: $twd/conf 20 | dest: 21 | URL: /tmp 22 | 23 | deploy_sacct_files: 24 | action: storage:copy 25 | source: 26 | URL: $twd/sacct 27 | dest: 28 | URL: /tmp 29 | 30 | run_goslmailer: 31 | action: exec:extract 32 | checkError: true 33 | env: 34 | GOSLMAILER_CONF: /tmp/goslmailer.conf 35 | commands: 36 | - command: source $twd/slurm_env/slurmenv.sh 37 | - command: goslmailer -s "Slurm Job_id=1052477 Name=endlyJobFail Failed, Run time 00:00:30, FAILED, ExitCode 1" petar.jager@imba.oeaw.ac.at 38 | extract: 39 | - key: rfile 40 | regExpr: 'Send successful to file: (rendered-1052477-petar.jager@imba.oeaw.ac.at-\d+.json)' 41 | required: true 42 | 43 | debug_extract: 44 | action: workflow:print 45 | message: "GOT: $rfile" 46 | 47 | test_diff: 48 | action: exec:run 49 | checkError: true 50 | commands: 51 | - sed -i -e '/"text":"Created /d' $WorkingDirectory()/$rfile 52 | - diff $WorkingDirectory()/$rfile $twd/results/*.json && echo RESULTS MATCH 53 | 54 | test_assert_goslmailer: 55 | action: validator:assert 56 | expect: 57 | - '~/Send successful to file: rendered-1052477-petar.jager@imba.oeaw.ac.at-\d+.json/' 58 | actual: 59 | - $run_goslmailer.Output 60 | 61 | # todo: 62 | # add test: 63 | # jq . rendered.json > /dev/null || echo FAILED -------------------------------------------------------------------------------- /test_e2e/cases/test_05/README.md: -------------------------------------------------------------------------------- 1 | ## test_05 2 | --- 3 | 4 | Test goslmailer on SLURM versions (<21.8.x) that don't set the job information in as env variables 5 | 6 | --- 7 | -------------------------------------------------------------------------------- /test_e2e/cases/test_05/conf/gobler.conf: -------------------------------------------------------------------------------- 1 | { 2 | "logfile": "", 3 | "defaultconnector": "msteams", 4 | "connectors": { 5 | "msteams": { 6 | "name": "dev channel", 7 | "renderToFile": "yes", 8 | "spoolDir": "/tmp", 9 | "adaptiveCardTemplate": "/tmp/adaptive_card_template.json", 10 | "url": "http://localhost:9999/", 11 | "useLookup": "no", 12 | "monitorT": "10000ms", 13 | "pickerT": "1000ms", 14 | "psBufLen": "3", 15 | "numSenders": "3", 16 | "maxMsgPU": "6" 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /test_e2e/cases/test_05/conf/goslmailer.conf: -------------------------------------------------------------------------------- 1 | { 2 | "logfile": "", 3 | "defaultconnector": "msteams", 4 | "binpaths": { 5 | "sacct": "/tmp/sacct", 6 | "sstat": "/tmp/sstat" 7 | }, 8 | "connectors": { 9 | "msteams": { 10 | "renderToFile": "yes", 11 | "adaptiveCardTemplate": "/tmp/adaptive_card_template.json", 12 | "spoolDir": "/tmp", 13 | "useLookup": "no" 14 | } 15 | }, 16 | "qosmap": { 17 | "RAPID": 3600, 18 | "SHORT": 28800, 19 | "MEDIUM": 172800, 20 | "LONG": 1209600 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /test_e2e/cases/test_05/results/rendered-1052477-petar.jager@imba.oeaw.ac.at-1653378962712164702.json: -------------------------------------------------------------------------------- 1 | { 2 | "type":"message", 3 | "attachments":[ 4 | { 5 | "contentType":"application/vnd.microsoft.card.adaptive", 6 | "content":{ 7 | "type":"AdaptiveCard", 8 | "body":[ 9 | { 10 | "type":"TextBlock", 11 | "size":"medium", 12 | "weight":"bolder", 13 | "text":"CBE Slurm job info", 14 | "style":"heading" 15 | }, 16 | { 17 | "type":"ColumnSet", 18 | "columns":[ 19 | { 20 | "type":"Column", 21 | "items":[ 22 | { 23 | "type":"Image", 24 | "style":"person", 25 | "url":"https://upload.wikimedia.org/wikipedia/commons/thumb/3/3a/Slurm_logo.svg/590px-Slurm_logo.svg.png", 26 | "size":"small" 27 | } 28 | ], 29 | "width":"auto" 30 | }, 31 | { 32 | "type":"Column", 33 | "items":[ 34 | { 35 | "type":"TextBlock", 36 | "weight":"bolder", 37 | "text":"Job 1052477 Began", 38 | "wrap":true, 39 | "size":"Large", 40 | "color":"Good" 41 | }, 42 | { 43 | "type":"TextBlock", 44 | "spacing":"none", 45 | 46 | "isSubtle":true, 47 | "wrap":true 48 | } 49 | ], 50 | "width":"stretch" 51 | } 52 | ] 53 | }, 54 | 55 | { 56 | "type":"FactSet", 57 | "separator":true, 58 | "spacing":"large", 59 | "isVisible":"true", 60 | "id":"ExternalData", 61 | "facts":[ 62 | { 63 | "type":"Fact", 64 | "title":"Job name", 65 | "value":"endlyJobStart" 66 | }, 67 | { 68 | "type":"Fact", 69 | "title":"Job ID", 70 | "value":"1052477" 71 | }, 72 | { 73 | "type":"Fact", 74 | "title":"User", 75 | "value":"petar.jager" 76 | }, 77 | { 78 | "type":"Fact", 79 | "title":"Partition", 80 | "value":"c" 81 | }, 82 | { 83 | "type":"Fact", 84 | "title":"Compute Nodes Used", 85 | "value":"stg-c2-0" 86 | }, 87 | { 88 | "type":"Fact", 89 | "title":"Cores", 90 | "value":"1" 91 | }, 92 | { 93 | "type":"Fact", 94 | "title":"Job state", 95 | "value":"RUNNING" 96 | }, 97 | 98 | { 99 | "type":"Fact", 100 | "title":"Submit", 101 | "value":"2022-05-24T07:43:07" 102 | }, 103 | { 104 | "type":"Fact", 105 | "title":"Start", 106 | "value":"2022-05-24T07:43:07" 107 | }, 108 | 109 | { 110 | "type":"Fact", 111 | "title":"Reserved Walltime", 112 | "value":"08:00:00" 113 | }, 114 | 115 | { 116 | "type":"Fact", 117 | "title":"Memory Requested", 118 | "value":"4.3 GB" 119 | }, 120 | 121 | ] 122 | }, 123 | 124 | { 125 | "type":"FactSet", 126 | "separator":true, 127 | "spacing":"large", 128 | "isVisible":"false", 129 | "id":"InternalData", 130 | "facts":[ 131 | { 132 | "type":"Fact", 133 | "title":"User", 134 | "value":"petar.jager" 135 | }, 136 | { 137 | "type":"Fact", 138 | "title":"JobStatus", 139 | "value":"Began" 140 | } 141 | ] 142 | } 143 | ], 144 | "actions":[ 145 | { 146 | "type":"Action.OpenUrl", 147 | "title":"View Google", 148 | "url":"https://www.youtube.com/watch?v=dQw4w9WgXcQ" 149 | } 150 | ], 151 | "$schema":"http://adaptivecards.io/schemas/adaptive-card.json", 152 | "version":"1.2", 153 | "msteams":{ 154 | "entities":[ 155 | { 156 | "type":"mention", 157 | "text":"petar.jager", 158 | "mentioned":{ 159 | "id":"petar.jager@imba.oeaw.ac.at", 160 | "name":"petar.jager" 161 | } 162 | } 163 | ] 164 | } 165 | } 166 | } 167 | ] 168 | } 169 | -------------------------------------------------------------------------------- /test_e2e/cases/test_05/sacct/sacct: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | cwd=`dirname $0` 3 | cat ${cwd}/sacct_$2.txt 4 | -------------------------------------------------------------------------------- /test_e2e/cases/test_05/sacct/sacct_1052477.txt: -------------------------------------------------------------------------------- 1 | endlyJobStart|petar.jager|account|c|stg-c2-0|1|RUNNING|2022-05-24T07:43:07|2022-05-24T07:43:07|Unknown|08:00:00|00:00:14|00:00:14|00:00:00|00:00:00|00:00:00|4G|||||||| 2 | batch||account||stg-c2-0|1|RUNNING|2022-05-24T07:43:07|2022-05-24T07:43:07|Unknown||00:00:14|00:00:14|00:00:00|00:00:00|00:00:00||||||||| 3 | extern||account||stg-c2-0|1|RUNNING|2022-05-24T07:43:07|2022-05-24T07:43:07|Unknown||00:00:14|00:00:14|00:00:00|00:00:00|00:00:00||||||||| 4 | -------------------------------------------------------------------------------- /test_e2e/cases/test_05/sacct/sstat: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cwd=`dirname $0` 4 | cat ${cwd}/sstat_$2.txt 5 | -------------------------------------------------------------------------------- /test_e2e/cases/test_05/sacct/sstat_1052477.txt: -------------------------------------------------------------------------------- 1 | 1052477.extern|0|0|2012|stg-c2-0|stg-c2-0|stg-c2-0| 2 | 1052477.batch|344K|36|8267|stg-c2-0|stg-c2-0|stg-c2-0| 3 | -------------------------------------------------------------------------------- /test_e2e/cases/test_05/slurm_env/slurmenv.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | unset SLURM_JOB_NAME 4 | unset SLURM_JOB_GROUP 5 | unset SLURM_JOB_STATE 6 | unset SLURM_ARRAY_JOB_ID 7 | unset SLURM_JOB_WORK_DIR 8 | unset SLURM_JOB_MAIL_TYPE 9 | unset SLURM_JOBID 10 | unset SLURM_ARRAY_TASK_ID 11 | unset SLURM_JOB_RUN_TIME 12 | unset SLURM_ARRAY_TASK_COUNT 13 | unset SLURM_JOB_EXIT_CODE2 14 | unset SLURM_JOB_DERIVED_EC 15 | unset SLURM_JOB_ID 16 | unset SLURM_JOB_USER 17 | unset SLURM_ARRAY_TASK_MAX 18 | unset SLURM_JOB_EXIT_CODE 19 | unset SLURM_JOB_UID 20 | unset SLURM_JOB_NODELIST 21 | unset SLURM_ARRAY_TASK_MIN 22 | unset SLURM_JOB_STDIN 23 | unset SLURM_ARRAY_TASK_STEP 24 | unset SLURM_JOB_EXIT_CODE_MAX 25 | unset SLURM_JOB_GID 26 | unset SLURM_CLUSTER_NAME 27 | unset SLURM_JOB_PARTITION 28 | unset SLURM_JOB_ACCOUNT 29 | -------------------------------------------------------------------------------- /test_e2e/cases/test_05/test.yaml: -------------------------------------------------------------------------------- 1 | init: 2 | test_readme: '${twd}/README.md' 3 | 4 | defaults: 5 | message: "Running test $i from $twd" 6 | systempaths: 7 | - $bwd 8 | 9 | pipeline: 10 | 11 | print_welcome: 12 | description: "Current test" 13 | action: workflow:print 14 | style: 1 15 | 16 | deploy_conf_files: 17 | action: storage:copy 18 | source: 19 | URL: $twd/conf 20 | dest: 21 | URL: /tmp 22 | 23 | deploy_sacct_files: 24 | action: storage:copy 25 | source: 26 | URL: $twd/sacct 27 | dest: 28 | URL: /tmp 29 | 30 | run_goslmailer: 31 | action: exec:extract 32 | checkError: true 33 | env: 34 | GOSLMAILER_CONF: /tmp/goslmailer.conf 35 | commands: 36 | - command: source $twd/slurm_env/slurmenv.sh 37 | - command: goslmailer -s "Slurm Job_id=1052477 Name=endlyJobStart Began, Queued time 2-00:04:18" petar.jager@imba.oeaw.ac.at 38 | extract: 39 | - key: rfile 40 | regExpr: 'Send successful to file: (rendered-1052477-petar.jager@imba.oeaw.ac.at-\d+.json)' 41 | required: true 42 | 43 | debug_extract: 44 | action: workflow:print 45 | message: "GOT: $rfile" 46 | 47 | test_diff: 48 | action: exec:run 49 | checkError: true 50 | commands: 51 | - sed -i -e '/"text":"Created /d' $WorkingDirectory()/$rfile 52 | - diff $WorkingDirectory()/$rfile $twd/results/*.json && echo RESULTS MATCH 53 | 54 | test_assert_goslmailer: 55 | action: validator:assert 56 | expect: 57 | - '~/Send successful to file: rendered-1052477-petar.jager@imba.oeaw.ac.at-\d+.json/' 58 | actual: 59 | - $run_goslmailer.Output 60 | 61 | 62 | # todo: 63 | # add test: 64 | # jq . rendered.json >/dev/null || echo FAILED 65 | 66 | -------------------------------------------------------------------------------- /test_e2e/cases/test_06/README.md: -------------------------------------------------------------------------------- 1 | ## test_01 2 | --- 3 | 4 | 1. run gosler 5 times, save gobs (telegram,toml) 5 | 2. run gobler (telegram,toml), trim messages ("maxMsgPU": "3"), render to file and check output 6 | 7 | --- 8 | -------------------------------------------------------------------------------- /test_e2e/cases/test_06/conf/gobler.toml: -------------------------------------------------------------------------------- 1 | # 2 | # gobler annotated configuration file 3 | # 4 | # note: configuration file syntax is the same as from goslmailer, only the differences are commented here 5 | # 6 | 7 | logfile = "/tmp/gobler_test06.log" 8 | defaultconnector = "msteams" 9 | 10 | [connectors.msteams] 11 | name = "gobler.conf" 12 | renderToFile = "no" 13 | spoolDir = "/tmp/msteams" 14 | adaptiveCardTemplate = "/etc/slurm/adaptive_card_template.json" 15 | url = "https://msteams/webhook/url" 16 | useLookup = "GECOS" 17 | # 18 | # Gobler specific configuration, set this in every connector config that supports, and is used with spooling enabled. 19 | # 20 | # monitor period, now often will `monitor` goroutine scan the spoolDir for new gobs (if "ms" is omitted, default T is in seconds) 21 | monitorT = "20000ms" 22 | # 23 | # picker period, now often will `picker` goroutine pick the next message to send to `sender` 24 | # Tune depending on the endpoint throughput capability. 25 | # note: sender picks up and tries to send the message immediately, so this determines how fast are the messages sent out. 26 | pickerT = "5000ms" 27 | # 28 | # picker-sender buffer length 29 | # How many undelivered messages can the `picker` send to `sender` without blocking. 30 | # Tune depending on the endpoint throughput capability. 31 | psBufLen = "3" 32 | # 33 | # number of `sender` goroutines 34 | # Multiple `sender`s can wait for messages from the `picker`. 35 | # Tune depending on the endpoint throughput capability. 36 | numSenders = "4" 37 | # 38 | # maximum messages per user 39 | # On receipt of new messages from the `monitor`, `picker` will scan the list and delete latest messages above this limit. 40 | # The number of deleted messages is recorded in the messagepack structure and can be referenced like this: `{{ .Job.PrunedMessageCount }}` 41 | # in the template to notify user that some of his messages were deleted. 42 | maxMsgPU = "3" 43 | 44 | [connectors.telegram] 45 | name = "telegram bot connector" 46 | url = "" 47 | token = "PasteHereTelegramBotToken" 48 | renderToFile = "yes" 49 | spoolDir = "/tmp" 50 | messageTemplate = "/tmp/telegramTemplate.html" 51 | useLookup = "no" 52 | format = "HTML" 53 | monitorT = "5000ms" 54 | pickerT = "1000ms" 55 | psBufLen = "3" 56 | numSenders = "3" 57 | maxMsgPU = "1" 58 | 59 | [connectors.textfile] 60 | path = "/tmp" 61 | -------------------------------------------------------------------------------- /test_e2e/cases/test_06/conf/goslmailer.toml: -------------------------------------------------------------------------------- 1 | # 2 | # Annotated goslmailer configuration file 3 | # 4 | 5 | # if specified; append logs to this file; else; dump to stderr 6 | logfile = "" 7 | 8 | # default connector to be used for message delivery for receivers without full 'connector:user' specification 9 | defaultconnector = "msteams" 10 | 11 | # paths to slurm binaries (optional, will default to these if not specified) 12 | [binpaths] 13 | sacct = "/tmp/sacct" 14 | sstat = "/tmp/sstat" 15 | 16 | # map of connector configurations, remove any connectors that you don't wish to expose to the users 17 | [connectors] 18 | 19 | # each connector has it's own map of config attributes: 20 | [connectors.msteams] 21 | name = "dev channel" # unused 22 | renderToFile = "yes" # debug render of message to local file instead of sending ("yes" - render to file, "no" - send, "spool" - spool for gobler) 23 | spoolDir = "/tmp" # dir to use for spooling, remove if spooling not used 24 | url = "https://msteams/webhook/url" # ms teams webhook url 25 | adaptiveCardTemplate = "/path/template.json" # full path to adaptive card template file 26 | useLookup = "GECOS" # which function from lookup package the connector uses to map cmdline userid to end-system userid 27 | # available lookups ("GECOS", "none") 28 | 29 | [connectors.mailto] 30 | name = "original mail functionality, extended." # unused 31 | mailCmd = "/usr/bin/mutt" # mail client to use 32 | # mailCmdParams: templateable command line to be passed to mailCmd 33 | mailCmdParams = "-s \"Job {{ .SlurmEnvironment.SLURM_JOB_ID }} ({{ .SlurmEnvironment.SLURM_JOB_NAME }}) {{ .SlurmEnvironment.SLURM_JOB_MAIL_TYPE }}\"" 34 | mailTemplate = "/etc/slurm/mailTemplate.tmpl" # message body template 35 | mailFormat = "HTML" # `HTML` or `text` (can use telegram html in templates/) 36 | allowList = ".+@(imp|imba.oeaw|gmi.oeaw).ac.at" # golang re2 expression : https://github.com/google/re2/wiki/Syntax 37 | 38 | [connectors.telegram] 39 | name = "telegram bot" # bot uses this in hello message "Welcome to "name"" 40 | url = "" # unused, leave empty, might change in the future 41 | token = "PasteHereTelegramBotToken" # token obtained when creating the bot with botfather 42 | renderToFile = "spool" # debug render of message to local file instead of sending ("yes" - render to file, "no" - send, "spool" - spool for gobler) 43 | spoolDir = "/tmp" # where to spool 44 | messageTemplate = "/tmp/telegramTemplate.html" # template file 45 | useLookup = "no" # remove if not using custom lookup functions 46 | format = "HTML" # options: "MarkdownV2", "HTML" ,depending on the template used 47 | 48 | [connectors.discord] 49 | name = "DiscoSlurmBot" # name that is used in the bot welcome message 50 | triggerString = "showmeslurm" # string (in channel or DM) that triggers the bot to respond with an instructional DM to the user 51 | token = "PasteBotTokenHere" # place to put the bot token 52 | messageTemplate = "/path/to/template.md" # template file to use 53 | 54 | [connectors.matrix] 55 | username = "@myuser:matrix.org" 56 | token = "syt_dGRpZG9ib3QXXXXXXXEyQMBEmvOVp_10Jm93" 57 | homeserver = "matrix.org" 58 | template = "/path/to/matrix_template.md" 59 | 60 | # fictitious "textfile" connector, package code for it doesn't exist, implementation left as the exercise for the reader 61 | [connectors.textfile] 62 | path = "/tmp" 63 | 64 | # map of your sites configured QoSes, with their timelimits (seconds), used for hint generation 65 | [qosmap] 66 | RAPID = 3600 67 | SHORT = 28800 68 | MEDIUM = 172800 69 | LONG = 1209600 -------------------------------------------------------------------------------- /test_e2e/cases/test_06/conf/telegramTemplate.html: -------------------------------------------------------------------------------- 1 | {{ .Job.MailSubject }} {{ .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE }} 2 | Created {{ .Created }} 3 | {{ if ne .Job.PrunedMessageCount 0 }} 4 | WARNING: Rate limiting triggered. {{ .Job.PrunedMessageCount }} additonal notificiations have been suppressed 5 | {{ end }} 6 |
------------------------------
 7 | Job Name         : {{ .Job.SlurmEnvironment.SLURM_JOB_NAME }}
 8 | Job ID           : {{ .Job.SlurmEnvironment.SLURM_JOB_ID }}
 9 | User             : {{ .Job.SlurmEnvironment.SLURM_JOB_USER }}
10 | Partition        : {{ .Job.SlurmEnvironment.SLURM_JOB_PARTITION }}
11 | Nodes Used       : {{ .Job.SlurmEnvironment.SLURM_JOB_NODELIST }}
12 | Cores            : {{ .Job.JobStats.Ncpus }}
13 | Job state        : {{ .Job.SlurmEnvironment.SLURM_JOB_STATE }}
14 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }}
15 | Exit Code        : {{ .Job.SlurmEnvironment.SLURM_JOB_EXIT_CODE_MAX }}
16 | {{- end }}
17 | Submit           : {{ .Job.JobStats.Submittime }}
18 | Start            : {{ .Job.JobStats.Starttime }}
19 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }}
20 | End              : {{ .Job.JobStats.Endtime }}
21 | {{- end }}
22 | Res. Walltime    : {{ .Job.JobStats.WalltimeStr }}
23 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE "Began" }}
24 | Used Walltime    : {{ .Job.SlurmEnvironment.SLURM_JOB_RUN_TIME }}
25 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_STATE "RUNNING" }}
26 | Used CPU time    : {{ .Job.JobStats.TotalCPUStr }}
27 | % User (Comp)    : {{ printf "%5.2f%%" .Job.JobStats.CalcUserComputePercentage }}
28 | % System (I/O)   : {{ printf "%5.2f%%" .Job.JobStats.CalcSystemComputePercentage }}
29 | {{- end }}
30 | {{- end }}
31 | Memory Requested : {{ .Job.JobStats.ReqMem | humanBytes }}
32 | {{- if ne .Job.SlurmEnvironment.SLURM_JOB_MAIL_TYPE "Began" }}
33 | Max Memory Used  : {{ .Job.JobStats.MaxRSS | humanBytes }}
34 | Max Disk Write   : {{ .Job.JobStats.MaxDiskWrite | humanBytes }}
35 | Max Disk Read    : {{ .Job.JobStats.MaxDiskRead | humanBytes }}
36 | {{- end }}
37 | ------------------------------
38 | {{- range .Job.Hints }} 39 | - {{ . }} 40 | 41 | {{- end }} 42 | -------------------------------------------------------------------------------- /test_e2e/cases/test_06/sacct/sacct: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cwd=`dirname $0` 4 | cat ${cwd}/sacct.txt 5 | -------------------------------------------------------------------------------- /test_e2e/cases/test_06/sacct/sacct.txt: -------------------------------------------------------------------------------- 1 | endlyJobStart|petar.jager|account|c|stg-c2-0|1|RUNNING|2022-05-24T07:43:07|2022-05-24T07:43:07|Unknown|08:00:00|00:00:14|00:00:14|00:00:00|00:00:00|00:00:00|4G|||||||| 2 | batch||account||stg-c2-0|1|RUNNING|2022-05-24T07:43:07|2022-05-24T07:43:07|Unknown||00:00:14|00:00:14|00:00:00|00:00:00|00:00:00||||||||| 3 | extern||account||stg-c2-0|1|RUNNING|2022-05-24T07:43:07|2022-05-24T07:43:07|Unknown||00:00:14|00:00:14|00:00:00|00:00:00|00:00:00||||||||| 4 | -------------------------------------------------------------------------------- /test_e2e/cases/test_06/sacct/sstat: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cwd=`dirname $0` 4 | cat ${cwd}/sstat.txt 5 | -------------------------------------------------------------------------------- /test_e2e/cases/test_06/sacct/sstat.txt: -------------------------------------------------------------------------------- 1 | 1052477.extern|0|0|2012|stg-c2-0|stg-c2-0|stg-c2-0| 2 | 1052477.batch|344K|36|8267|stg-c2-0|stg-c2-0|stg-c2-0| 3 | -------------------------------------------------------------------------------- /test_e2e/cases/test_06/slurm_env/slurmenv.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | export SLURM_ARRAY_JOB_ID="" 4 | export SLURM_ARRAY_TASK_COUNT="" 5 | export SLURM_ARRAY_TASK_ID="" 6 | export SLURM_ARRAY_TASK_MAX="" 7 | export SLURM_ARRAY_TASK_MIN="" 8 | export SLURM_ARRAY_TASK_STEP="" 9 | export SLURM_CLUSTER_NAME="clip" 10 | export SLURM_JOB_ACCOUNT="hpc" 11 | export SLURM_JOB_DERIVED_EC="" 12 | export SLURM_JOB_EXIT_CODE="" 13 | export SLURM_JOB_EXIT_CODE2="" 14 | export SLURM_JOB_EXIT_CODE_MAX="" 15 | export SLURM_JOB_EXIT_CODE_MIN="" 16 | export SLURM_JOB_GID="1999" 17 | export SLURM_JOB_GROUP="is.grp" 18 | export SLURM_JOBID="1052477" 19 | export SLURM_JOB_ID="1052477" 20 | export SLURM_JOB_MAIL_TYPE="Began" 21 | export SLURM_JOB_NAME="endlyJobStart" 22 | export SLURM_JOB_NODELIST="stg-c2-0" 23 | export SLURM_JOB_PARTITION="c" 24 | export SLURM_JOB_QUEUED_TIME="00:00:00" 25 | export SLURM_JOB_RUN_TIME="" 26 | export SLURM_JOB_STATE="RUNNING" 27 | export SLURM_JOB_STDIN="/dev/null" 28 | export SLURM_JOB_UID="58546" 29 | export SLURM_JOB_USER="petar.jager" 30 | export SLURM_JOB_WORK_DIR="/users/petar.jager" 31 | -------------------------------------------------------------------------------- /test_e2e/cases/test_06/test.yaml: -------------------------------------------------------------------------------- 1 | init: 2 | 3 | defaults: 4 | message: "Running test $i from $twd" 5 | systempaths: 6 | - $bwd 7 | 8 | pipeline: 9 | 10 | print_welcome: 11 | description: "Current test" 12 | action: workflow:print 13 | style: 1 14 | 15 | deploy_conf_files: 16 | action: storage:copy 17 | source: 18 | URL: $twd/conf 19 | dest: 20 | URL: /tmp 21 | 22 | deploy_sacct_files: 23 | action: storage:copy 24 | source: 25 | URL: $twd/sacct 26 | dest: 27 | URL: /tmp 28 | 29 | run_goslmailer: 30 | action: exec:run 31 | checkError: true 32 | env: 33 | GOSLMAILER_CONF: /tmp/goslmailer.toml 34 | commands: 35 | - source $twd/slurm_env/slurmenv.sh 36 | - goslmailer -s "Slurm Job_id=1052477 Name=endlyJobStart Began, Queued time 2-00:04:18" telegram:12345 37 | - goslmailer -s "Slurm Job_id=1052477 Name=endlyJobStart Began, Queued time 2-00:04:18" telegram:12345 38 | - goslmailer -s "Slurm Job_id=1052477 Name=endlyJobStart Began, Queued time 2-00:04:18" telegram:12345 39 | 40 | test_assert_goslmailer: 41 | action: validator:assert 42 | expect: 43 | - '/Deposit gob OK!/' 44 | - '/Deposit gob OK!/' 45 | - '/Deposit gob OK!/' 46 | actual: 47 | - $run_goslmailer.Cmd[1].Stdout 48 | - $run_goslmailer.Cmd[2].Stdout 49 | - $run_goslmailer.Cmd[3].Stdout 50 | 51 | clear_gobler_log: 52 | action: exec:run 53 | checkError: false 54 | commands: 55 | - truncate -s0 /tmp/gobler_test06.log 56 | 57 | run_gobler: 58 | action: process:start 59 | watch: true 60 | immuneToHangups: true 61 | command: gobler 62 | arguments: 63 | - -c 64 | - /tmp/gobler.toml 65 | 66 | run_sleep: 67 | action: exec:run 68 | checkError: true 69 | commands: 70 | - sleep 5 71 | 72 | stop_gobler: 73 | action: process:stop 74 | pid: $run_gobler.Pid 75 | 76 | 77 | # debug_gobler: 78 | # action: workflow:print 79 | # message: $AsJSON($run_gobler) 80 | 81 | read_gobler_log: 82 | action: exec:run 83 | checkError: true 84 | commands: 85 | - cat /tmp/gobler_test06.log 86 | 87 | 88 | # https://github.com/viant/assertly#validation 89 | test_assert_gobler: 90 | action: validator:assert 91 | expect: 92 | - '~/Send successful to file: rendered-1052477-12345-/' 93 | - '~/SENDER telegram#\d: Gob deleted/' 94 | actual: 95 | - $read_gobler_log.Cmd[0].Stdout 96 | - $read_gobler_log.Cmd[0].Stdout 97 | 98 | grep_suppression: 99 | action: exec:run 100 | checkError: true 101 | env: 102 | TERM: dumb 103 | commands: 104 | - grep "Rate limiting triggered. 2 additonal notificiations have been suppressed" $WorkingDirectory()/rendered-1052477-12345-*.md && echo "FOUND rate limiting" 105 | 106 | test_grep_suppression: 107 | action: validator:assert 108 | expect: 109 | - '~/FOUND rate limiting/' 110 | actual: 111 | - $grep_suppression.Cmd[0].Stdout 112 | 113 | # todo: 114 | # add test: 115 | # jq . rendered.json >/dev/null || echo FAILED 116 | 117 | # debug_gosl: 118 | # action: workflow:print 119 | # message: $AsJSON($run_goslmailer) 120 | 121 | # catch: 122 | # description: "ERROR CAUGHT BUT GOOD!" 123 | # action: workflow:print 124 | # message: CAUGHT $error.Error 125 | 126 | -------------------------------------------------------------------------------- /test_e2e/run.yaml: -------------------------------------------------------------------------------- 1 | # defaults node defines attributes that will be merge with every executable node. 2 | # here, message will be used throughout the workflow wherever it's not specified 3 | defaults: 4 | init: 5 | 6 | pipeline: 7 | loop_over_tests: 8 | range: 00..06 9 | subPath: cases/test_${index} 10 | template: 11 | 12 | setup_print: 13 | action: workflow:print 14 | message: "Running case ${index} on path $path" 15 | 16 | print_test_case: 17 | description: "Test $index README" 18 | action: workflow:print 19 | message: $Cat('cases/test_${index}/README.md') 20 | 21 | run_test: 22 | action: workflow:run 23 | request: '@cases/test_${index}/test' 24 | params: 25 | i: $index 26 | twd: cases/test_${index} # test working directory 27 | bwd: $WorkingDirectory(../) # binary working directory 28 | 29 | #do_assertions: 30 | # action: workflow:print 31 | # message: do_assertions 32 | 33 | #task_assert: 34 | # test_run_action: 35 | # action: exec:run 36 | # commands: 37 | # - /usr/bin/hostname 38 | # - echo "Hello!" 39 | 40 | #defer: 41 | # action: print 42 | # message: allway run 43 | --------------------------------------------------------------------------------