├── .dockerignore ├── .env.example ├── .gitignore ├── .gitmodules ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── consumer.go ├── docker-compose.hub.yml ├── docker-compose.yml ├── errors.go ├── go.mod ├── go.sum ├── handlers.go ├── main.go ├── queue.go └── task.go /.dockerignore: -------------------------------------------------------------------------------- 1 | go-llama.cpp/ -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | ## required 2 | TG_TOKEN=6082407582:AFFS2uRCE-miM3tkKdxfW_EBTSdVL5_PV8g 3 | MODEL_PATH=/root/Wizard-Vicuna-13B-Uncensored.ggmlv3.q4_0.bin 4 | 5 | ## optional 6 | # N_TOKENS=1024 7 | # Q_SIZE=1000 8 | # N_CPU=8 9 | # SINGLE_MESSAGE_PROMPT="### User: Response to my next request. %s ### Assistant:" 10 | # REPLY_MESSAGE_PROMPT="### Assistant: %s ### User: %s \n### Assistant:" 11 | # STOP_WORD="###" -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.gguf 2 | *.ggml -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | 2 | [submodule "go-llama.cpp"] 3 | path = go-llama.cpp 4 | url = https://github.com/go-skynet/go-llama.cpp 5 | 6 | 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang as builder 2 | RUN apt-get update && apt-get install -y cmake build-essential 3 | WORKDIR /build 4 | COPY . ./ 5 | RUN git submodule update --init --recursive && make && C_INCLUDE_PATH=/build/go-llama.cpp LIBRARY_PATH=/build/go-llama.cpp go build -o app . 6 | 7 | FROM debian:12 8 | RUN apt-get update && apt-get install -y ca-certificates 9 | WORKDIR /usr/local/bin/app 10 | COPY --from=builder /build/app . 11 | CMD ["./app"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Δıⲙα 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | $(MAKE) -C go-llama.cpp/ libbinding.a 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Docker Pulls](https://img.shields.io/docker/pulls/thedmdim/llama-telegram-bot)](https://hub.docker.com/r/thedmdim/llama-telegram-bot) 2 | [![Docker Image Size (tag)](https://img.shields.io/docker/image-size/thedmdim/llama-telegram-bot/latest)](https://hub.docker.com/r/thedmdim/llama-telegram-bot) 3 | 4 | 5 | # 🦙 llama-telegram-bot 6 | 7 | ## What? 8 | It's a chatbot for Telegram utilizing genius [llama.cpp](https://github.com/ggerganov/llama.cpp). Try live instance here [@telellamabot](https://t.me/telellamabot) 9 | 10 | ## How? 11 | [llama-telegram-bot](https://github.com/thedmdim/llama-telegram-bot) is written in Go and uses [go-llama.cpp](https://github.com/go-skynet/go-llama.cpp) which is binding to [llama.cpp](https://github.com/ggerganov/llama.cpp) 12 | 13 | ## Quick Start 14 | Let's start! Everything is simple! 15 | 16 | Parameters are passed as env variables. 17 | 18 | 1. `MODEL_PATH=/path/to/model` 19 | 2. `TG_TOKEN=your_telegram_bot_token_here` 20 | 3. `Q_SIZE=1000` - task queue limit (optional: default 1000) 21 | 4. `N_TOKENS=1024` - tokens to predict (optional: default 1024) 22 | 5. `N_CPU=4` - number of cpu to use (optional: default max available) 23 | 6. `SINGLE_MESSAGE_PROMPT` - a prompt template for a direct message to bot (default in [.env.example](.env.example)) 24 | 7. `REPLY_MESSAGE_PROMPT` - a prompt template when you are replying to bot's answer (default in [.env.example](.env.example)) 25 | 8. `STOP_WORD` - characters when stop prediction (default in [.env.example](.env.example)) 26 | 27 | ### Docker Compose 28 | Local build (Prefered) 29 | 1. `git clone https://github.com/thedmdim/llama-telegram-bot` 30 | 2. `cp .env.example .env` and edit `.env` as you need 31 | 3. `docker compose up -d` 32 | 33 | Pull from Docker Hub 34 | 1. `git clone https://github.com/thedmdim/llama-telegram-bot` 35 | 2. `cp .env.example .env` and edit `.env` as you need 36 | 3. `docker compose -f docker-compose.hub.yml up -d` 37 | 38 | ### Build and run as binary 39 | You need to have Go and CMake installed 40 | 1. `git clone --recurse-submodules https://github.com/thedmdim/llama-telegram-bot` 41 | 2. `cd llama-telegram-bot && make` 42 | 4. `go build .` 43 | 5. `env TG_TOKEN= MODEL_PATH=/path/to/your/model ./llama-telegram-bot` -------------------------------------------------------------------------------- /consumer.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "strings" 6 | "time" 7 | 8 | llama "github.com/go-skynet/go-llama.cpp" 9 | tgbotapi "github.com/go-telegram-bot-api/telegram-bot-api/v5" 10 | ) 11 | 12 | 13 | var stopButton = tgbotapi.NewInlineKeyboardMarkup( 14 | tgbotapi.NewInlineKeyboardRow( 15 | tgbotapi.NewInlineKeyboardButtonData("Stop", "/stop"), 16 | ), 17 | ) 18 | 19 | func ProcessQueue() { 20 | for { 21 | task, err := qu.Dequeue() 22 | currentTask = task 23 | if err == ErrQueueEmpty { 24 | time.Sleep(time.Second * 2) 25 | continue 26 | } 27 | ProcessTask(task) 28 | } 29 | } 30 | 31 | 32 | type Result struct { 33 | Text string 34 | Err error 35 | } 36 | 37 | 38 | func Predict(task *Task) (chan string, chan Result) { 39 | 40 | stream := make(chan string) 41 | result := make(chan Result) 42 | 43 | go func(){ 44 | callback := func(token string) bool { 45 | select { 46 | case stream <- token: 47 | return true 48 | case <- task.Stop: 49 | return false 50 | } 51 | } 52 | 53 | text, err := l.Predict( 54 | task.Question, 55 | llama.Debug, 56 | llama.SetTokenCallback(callback), 57 | llama.SetTokens(nTokens), 58 | llama.SetThreads(nCpu), 59 | llama.SetTopK(90), 60 | llama.SetTopP(0.86), 61 | llama.SetStopWords(StopWord), 62 | ) 63 | close(stream) 64 | result <- Result{text, err} 65 | }() 66 | 67 | return stream, result 68 | } 69 | 70 | // This function is a mess 71 | func ProcessTask(task *Task) { 72 | 73 | log.Printf("Start processing task from user %d\n", task.UserID) 74 | log.Printf("The prompt is:\n%s\n", task.Question) 75 | 76 | // Start prediction 77 | stream, result := Predict(task) 78 | 79 | // Resulting generated text 80 | var answer string 81 | 82 | var counter int 83 | var issent bool 84 | for { 85 | select { 86 | case token := <- stream: 87 | if !issent && strings.TrimSpace(token) != "" { 88 | answer += token 89 | msg := tgbotapi.NewMessage(task.UserID, answer) 90 | msg.ReplyMarkup = &stopButton 91 | sent, err := bot.Send(msg) 92 | if err != nil { 93 | log.Println("[ProcessTask] error sending answer:", err) 94 | continue 95 | } 96 | // Save answer message ID to stream tokens to it 97 | task.MessageID = sent.MessageID 98 | issent = true 99 | continue 100 | } 101 | 102 | answer += token 103 | counter++ 104 | if counter == 6 { 105 | edited := tgbotapi.NewEditMessageText(task.UserID, task.MessageID, answer) 106 | edited.ReplyMarkup = &stopButton 107 | _, err := bot.Send(edited) 108 | if err != nil { 109 | log.Println("[ProcessTask] error streaming answer:", err) 110 | } 111 | counter = 0 112 | 113 | } 114 | 115 | case prediction := <- result: 116 | 117 | delete := tgbotapi.NewDeleteMessage(task.UserID, task.AnnounceID) 118 | _, err := bot.Request(delete) 119 | if err != nil { 120 | log.Println("Couldn't delete announce message:", err) 121 | } 122 | 123 | 124 | if prediction.Err != nil || strings.TrimSpace(prediction.Text) == "" || task.MessageID == 0 { 125 | log.Println("[ProcessTask] prediction error:", prediction.Err, prediction.Text) 126 | failure := tgbotapi.NewMessage(task.UserID, "Sorry, couldn't generate answer") 127 | _, err := bot.Send(failure) 128 | if err != nil { 129 | log.Println("[ProcessTask] error sending failure message:", err) 130 | } 131 | return 132 | } 133 | 134 | 135 | edited := tgbotapi.NewEditMessageText(task.UserID, task.MessageID, prediction.Text) 136 | // Set parse mode to Markdown if it's backticks there 137 | if nBackticks := strings.Count(prediction.Text, "`"); nBackticks > 0 && nBackticks % 2 == 0 { 138 | edited.ParseMode = "Markdown" 139 | } 140 | _, err = bot.Send(edited) 141 | if err != nil { 142 | log.Println("[ProcessTask] error sending answer:", err) 143 | } 144 | 145 | log.Printf("Generated answer is:\n%s\n", prediction.Text) 146 | 147 | return 148 | } 149 | } 150 | } -------------------------------------------------------------------------------- /docker-compose.hub.yml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | 3 | services: 4 | bot: 5 | image: thedmdim/llama-telegram-bot 6 | privileged: true 7 | restart: always 8 | volumes: 9 | - ${MODEL_PATH}:${MODEL_PATH} 10 | env_file: 11 | - .env -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | 3 | services: 4 | bot: 5 | build: 6 | context: . 7 | privileged: true 8 | container_name: llama-telegram-bot 9 | restart: always 10 | volumes: 11 | - ${MODEL_PATH}:${MODEL_PATH} 12 | env_file: 13 | - .env -------------------------------------------------------------------------------- /errors.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "errors" 4 | 5 | var ErrQueueEmpty = errors.New("queue is empty") 6 | var ErrOnePerUser = errors.New("user already applied task") 7 | var ErrQueueLimit = errors.New("reached queue limit") 8 | var ErrNoUserTask = errors.New("user task not found") -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module llama-telegram-bot 2 | 3 | go 1.21 4 | 5 | require ( 6 | github.com/go-skynet/go-llama.cpp v0.0.0-20231009155254-aeba71ee8428 7 | github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1 8 | ) 9 | 10 | replace github.com/go-skynet/go-llama.cpp => ./go-llama.cpp -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= 2 | github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 3 | github.com/go-skynet/go-llama.cpp v0.0.0-20231009155254-aeba71ee8428 h1:WYjkXL0Nw7dN2uDBMVCWQ8xLavrIhjF/DLczuh5L9TY= 4 | github.com/go-skynet/go-llama.cpp v0.0.0-20231009155254-aeba71ee8428/go.mod h1:iub0ugfTnflE3rcIuqV2pQSo15nEw3GLW/utm5gyERo= 5 | github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= 6 | github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= 7 | github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1 h1:wG8n/XJQ07TmjbITcGiUaOtXxdrINDz1b0J1w0SzqDc= 8 | github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1/go.mod h1:A2S0CWkNylc2phvKXWBBdD3K0iGnDBGbzRpISP2zBl8= 9 | github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= 10 | github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 11 | github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= 12 | github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= 13 | github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= 14 | github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= 15 | github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= 16 | github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= 17 | golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= 18 | golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= 19 | golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= 20 | golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 21 | golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= 22 | golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= 23 | golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= 24 | golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= 25 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 26 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 27 | -------------------------------------------------------------------------------- /handlers.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | 7 | tgbotapi "github.com/go-telegram-bot-api/telegram-bot-api/v5" 8 | ) 9 | 10 | 11 | func ProcessUpdate(update tgbotapi.Update) { 12 | // If we've gotten a message update. 13 | if update.Message != nil { 14 | 15 | msg := tgbotapi.MessageConfig{ 16 | BaseChat: tgbotapi.BaseChat{ 17 | ChatID: update.Message.Chat.ID, 18 | }, 19 | DisableWebPagePreview: true, 20 | } 21 | 22 | if update.Message.Text == "/start" { 23 | msg.Text = "Just ask question" 24 | if _, err := bot.Send(msg); err != nil { 25 | log.Println(err) 26 | } 27 | return 28 | } 29 | 30 | if update.Message.Text == "/queue" { 31 | _, n := qu.Load(update.Message.From.ID) 32 | 33 | switch n { 34 | case -1: 35 | if currentTask != nil && currentTask.UserID == update.Message.From.ID { 36 | msg.Text = "It's your turn now!!!" 37 | } else { 38 | msg.Text = "Hey! You haven't asked question yet!" 39 | } 40 | case 0: 41 | msg.Text = "Hold a second, you're next" 42 | default: 43 | msg.Text = fmt.Sprintf("Hold on! Your queue is %d", n) 44 | } 45 | 46 | if _, err := bot.Send(msg); err != nil { 47 | log.Println(err) 48 | } 49 | return 50 | } 51 | 52 | if chars := []rune(update.Message.Text); string(chars[0]) == "/" { 53 | msg.Text = "There is no such command" 54 | if _, err := bot.Send(msg); err != nil { 55 | log.Println(err) 56 | } 57 | return 58 | } 59 | 60 | // Do enqueue task 61 | task := Task{ 62 | UserID: update.Message.From.ID, 63 | Stop: make(chan bool), 64 | } 65 | 66 | if reply := update.Message.ReplyToMessage; reply != nil && reply.From.ID == bot.Self.ID { 67 | task.WrapPrevContext(reply.Text, update.Message.Text) 68 | } else { 69 | task.WrapInRoles(update.Message.Text) 70 | } 71 | 72 | 73 | n, err := qu.Enqueue(&task) 74 | log.Println(err) 75 | if err != nil { 76 | if err == ErrOnePerUser { 77 | msg.Text = "You've already asked your question. You can edit the existing one until it's your turn" 78 | } 79 | if err == ErrQueueLimit { 80 | msg.Text = fmt.Sprintf("Now queue is full %d/%d. Wait one slot to be free at least.\nCheck queue /stats", n, qu.Limit) 81 | } 82 | if _, err := bot.Send(msg); err != nil { 83 | log.Println(err) 84 | } 85 | return 86 | } 87 | msg.Text = fmt.Sprintf("Your question registered! Your queue is %d/%d.\nYou can edit your message until it's your turn", n, qu.Limit) 88 | sent, err := bot.Send(msg) 89 | if err != nil { 90 | log.Println(err) 91 | } 92 | task.AnnounceID = sent.MessageID 93 | } 94 | 95 | if update.EditedMessage != nil { 96 | task := Task{ 97 | UserID: update.EditedMessage.From.ID, 98 | MessageID: update.EditedMessage.MessageID, 99 | Question: update.EditedMessage.Text, 100 | } 101 | qu.Enqueue(&task) 102 | } 103 | 104 | 105 | if update.CallbackQuery != nil { 106 | if update.CallbackQuery.Data == "/stop" && currentTask != nil { 107 | if !currentTask.Stopped { 108 | callback := tgbotapi.NewCallback(update.CallbackQuery.ID, "Stopping") 109 | bot.Request(callback) 110 | currentTask.Stop <- true 111 | currentTask.Stopped = true 112 | } else { 113 | callback := tgbotapi.NewCallback(update.CallbackQuery.ID, "Already stopped") 114 | bot.Request(callback) 115 | } 116 | } 117 | } 118 | } -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "runtime" 7 | "strconv" 8 | 9 | llama "github.com/go-skynet/go-llama.cpp" 10 | tgbotapi "github.com/go-telegram-bot-api/telegram-bot-api/v5" 11 | ) 12 | 13 | 14 | var apiToken = os.Getenv("TG_TOKEN") 15 | var modelPath = os.Getenv("MODEL_PATH") 16 | var nTokens int 17 | var nCpu int 18 | 19 | var SingleMessagePrompt string 20 | var ReplyMessagePrompt string 21 | var StopWord = os.Getenv("STOP_WORD") 22 | 23 | var l *llama.LLama 24 | var bot *tgbotapi.BotAPI 25 | var qu *TaskQueue 26 | var currentTask *Task 27 | 28 | 29 | func main() { 30 | var err error 31 | 32 | if apiToken == "" || modelPath == "" { 33 | log.Fatalln("Please provide TG_TOKEN and MODEL_PATH env variables") 34 | } 35 | 36 | // Init queue 37 | var queueSize = 1000 38 | if s := os.Getenv("Q_SIZE"); s != "" { 39 | if n, err := strconv.Atoi(s); err == nil { 40 | queueSize = n 41 | } 42 | } 43 | qu = NewTaskQueue(queueSize) 44 | 45 | 46 | // N tokens 47 | nTokens = 1000 48 | if s := os.Getenv("N_TOKENS"); s != "" { 49 | if n, err := strconv.Atoi(s); err == nil { 50 | nTokens = n 51 | } 52 | } 53 | 54 | // N cores 55 | nCpu = runtime.NumCPU() 56 | if s := os.Getenv("N_CPU"); s != "" { 57 | if n, err := strconv.Atoi(s); err == nil { 58 | nCpu = n 59 | } 60 | } 61 | 62 | // Init Prompt templates 63 | SingleMessagePrompt = os.Getenv("SINGLE_MESSAGE_PROMPT") 64 | ReplyMessagePrompt = os.Getenv("REPLY_MESSAGE_PROMPT") 65 | if SingleMessagePrompt == "" { 66 | SingleMessagePrompt = "### User: Response to my next request. %s ### Assistant:" 67 | } 68 | if ReplyMessagePrompt == "" { 69 | ReplyMessagePrompt = "### Assistant: %s ### User: %s \n### Assistant:" 70 | } 71 | if StopWord == "" { 72 | StopWord = "###" 73 | } 74 | 75 | // Init LLAMA binding 76 | l, err = llama.New(modelPath, llama.SetContext(1024), llama.EnableEmbeddings, llama.EnableMLock) 77 | if err != nil { 78 | log.Fatalf("Loading the model failed: %s", err.Error()) 79 | } 80 | 81 | // Init Telegram API client 82 | bot, err = tgbotapi.NewBotAPI(apiToken) 83 | if err != nil { 84 | log.Fatal(err) 85 | } 86 | 87 | // Start iterating through queue 88 | go ProcessQueue() 89 | 90 | // Receive updates 91 | u := tgbotapi.NewUpdate(0) 92 | u.Timeout = 60 93 | 94 | updates := bot.GetUpdatesChan(u) 95 | for update := range updates { 96 | ProcessUpdate(update) 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /queue.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "sync" 4 | 5 | type TaskQueue struct { 6 | mu sync.Mutex 7 | tasks []*Task 8 | users map[int64]*Task 9 | Limit int 10 | Count int 11 | } 12 | 13 | func NewTaskQueue(limit int) *TaskQueue { 14 | return &TaskQueue{ 15 | tasks: make([]*Task, 0), 16 | users: make(map[int64]*Task, 0), 17 | Limit: limit, 18 | } 19 | } 20 | 21 | 22 | // Get task by UserID and its count in queue 23 | func (q *TaskQueue) Load(userId int64) (*Task, int) { 24 | q.mu.Lock() 25 | defer q.mu.Unlock() 26 | 27 | for n, task := range q.tasks { 28 | if task.UserID == userId { 29 | return task, n 30 | } 31 | } 32 | 33 | return nil, -1 34 | } 35 | 36 | 37 | func (q *TaskQueue) Enqueue(task *Task) (int, error) { 38 | q.mu.Lock() 39 | defer q.mu.Unlock() 40 | 41 | t, exists := q.users[task.UserID] 42 | if exists { 43 | // update existing 44 | if t.MessageID == task.MessageID { 45 | t.Question = task.Question 46 | return q.Count, nil 47 | } 48 | 49 | return q.Count, ErrOnePerUser 50 | } 51 | 52 | if q.Count == q.Limit { 53 | return q.Count, ErrQueueLimit 54 | } 55 | 56 | 57 | q.tasks = append(q.tasks, task) 58 | q.users[task.UserID] = task 59 | q.Count++ 60 | 61 | return q.Count, nil 62 | } 63 | 64 | func (q *TaskQueue) Dequeue() (*Task, error) { 65 | q.mu.Lock() 66 | defer q.mu.Unlock() 67 | 68 | if q.Count == 0 { 69 | return nil, ErrQueueEmpty 70 | } 71 | 72 | task := q.tasks[0] 73 | 74 | q.tasks[0] = nil 75 | q.tasks = q.tasks[1:] 76 | delete(q.users, task.UserID) 77 | 78 | q.Count-- 79 | 80 | return task, nil 81 | } -------------------------------------------------------------------------------- /task.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "fmt" 4 | 5 | type Task struct { 6 | UserID int64 7 | MessageID int 8 | AnnounceID int 9 | Question string 10 | Stopped bool 11 | Stop chan bool 12 | } 13 | 14 | func (t *Task) WrapInRoles(question string) { 15 | t.Question = fmt.Sprintf(SingleMessagePrompt, question) 16 | } 17 | 18 | func (t *Task) WrapPrevContext(previous, question string) { 19 | t.Question = fmt.Sprintf(ReplyMessagePrompt, previous, question) 20 | } --------------------------------------------------------------------------------