├── .gitignore
├── model
├── embedding.go
└── document.go
├── go.mod
├── examples
├── tools
│ └── prompt_optimizer.go
├── prompts
│ ├── objective_refinement_prompt.go
│ ├── execution_prompt.go
│ ├── priorization_prompt.go
│ ├── evaluation_prompt.go
│ ├── model.go
│ ├── milestone_creation_prompt.go
│ └── optimize_prompt_prompt.go
└── agi
│ └── agi.go
├── go.sum
├── llmclient
├── client.go
└── provider
│ └── openai.go
├── LICENSE
├── services
└── chunks.go
├── prompt
├── runner.go
└── prompt.go
├── cmd
└── main.go
├── datastore
├── datastore.go
└── providers
│ └── inmemory
│ └── in_memory_vector_store.go
├── README.md
└── img
├── lightweight-agi-sequence-diagram.svg
└── lightweight-agi-flowchart.svg
/.gitignore:
--------------------------------------------------------------------------------
1 | .env*
2 |
3 | dump*
4 | tmp/
5 |
--------------------------------------------------------------------------------
/model/embedding.go:
--------------------------------------------------------------------------------
1 | package model
2 |
3 | type Embedding struct {
4 | Embedding []float32
5 | }
6 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/zawakin/lightweight-agi
2 |
3 | go 1.20
4 |
5 | require (
6 | github.com/google/go-cmp v0.5.7
7 | github.com/google/uuid v1.3.0
8 | github.com/joho/godotenv v1.5.1
9 | github.com/sashabaranov/go-openai v1.5.7
10 | )
11 |
12 | require golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 // indirect
13 |
--------------------------------------------------------------------------------
/examples/tools/prompt_optimizer.go:
--------------------------------------------------------------------------------
1 | package tools
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/zawakin/lightweight-agi/examples/prompts"
7 | "github.com/zawakin/lightweight-agi/prompt"
8 | )
9 |
10 | func OptimizePrompt(ctx context.Context, runner *prompt.PromptRunner, original *prompt.Prompt, iterations int) (*prompt.Prompt, error) {
11 | p := original
12 |
13 | for i := 0; i < iterations; i++ {
14 | var result prompts.OptimizePromptOutput
15 | err := runner.Run(ctx, prompts.OptimizePromptPrompt, prompts.OptimizePromptInput{
16 | Original: p,
17 | }, &result)
18 | if err != nil {
19 | return nil, err
20 | }
21 |
22 | p = result.OptimizedPrompt
23 | }
24 |
25 | return p, nil
26 | }
27 |
--------------------------------------------------------------------------------
/examples/prompts/objective_refinement_prompt.go:
--------------------------------------------------------------------------------
1 | package prompts
2 |
3 | import (
4 | "github.com/zawakin/lightweight-agi/prompt"
5 | )
6 |
7 | type ObjectiveRefinementInput struct {
8 | Objective Objective `json:"objective"`
9 | }
10 |
11 | type ObjectiveRefinementOutput struct {
12 | RefinedObjective Objective `json:"refined_objective"`
13 | }
14 |
15 | var (
16 | ObjectRefinementPrompt = &prompt.Prompt{
17 | Name: "refinement of objective",
18 | Description: `Refine the objective to a more specific objective that can be used.`,
19 | Template: &prompt.Example{
20 | Input: &ObjectiveRefinementInput{
21 | Objective: Objective("original objective"),
22 | },
23 | Output: &ObjectiveRefinementOutput{
24 | RefinedObjective: Objective("refined objective"),
25 | },
26 | },
27 | }
28 | )
29 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
2 | github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
3 | github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
4 | github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
5 | github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
6 | github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
7 | github.com/sashabaranov/go-openai v1.5.7 h1:8DGgRG+P7yWixte5j720y6yiXgY3Hlgcd0gcpHdltfo=
8 | github.com/sashabaranov/go-openai v1.5.7/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=
9 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
10 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
11 |
--------------------------------------------------------------------------------
/llmclient/client.go:
--------------------------------------------------------------------------------
1 | package llmclient
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/zawakin/lightweight-agi/model"
7 | )
8 |
9 | type EmbeddingClient interface {
10 | EmbedText(ctx context.Context, text string) (*model.Embedding, error)
11 | EmbedTexts(ctx context.Context, text []string) ([]model.Embedding, error)
12 | }
13 |
14 | // CompletionClient is an interface that provides a method to complete text.
15 | type CompletionClient interface {
16 | Complete(ctx context.Context, text string, maxTokens int) (string, error)
17 | }
18 |
19 | type ChatCompletionClient interface {
20 | Complete(ctx context.Context, messages ChatMessages, opt CompletionOption) (string, error)
21 | }
22 |
23 | type Role string
24 |
25 | const (
26 | RoleUser Role = "user"
27 | RoleAssistant Role = "assistant"
28 | RoleSystem Role = "system"
29 | )
30 |
31 | func (r Role) String() string {
32 | return string(r)
33 | }
34 |
35 | type ChatMessage struct {
36 | Role Role
37 | Content string
38 | }
39 |
40 | type ChatMessages []ChatMessage
41 |
42 | type CompletionOption struct {
43 | MaxTokens int
44 | }
45 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Yuki Miyake
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/examples/prompts/execution_prompt.go:
--------------------------------------------------------------------------------
1 | package prompts
2 |
3 | import (
4 | "github.com/zawakin/lightweight-agi/prompt"
5 | )
6 |
7 | type ExecutionInput struct {
8 | Objective Objective `json:"objective"`
9 | CurrentTask Task `json:"current_task"`
10 | SolvedTasks Tasks `json:"solved_tasks"`
11 | RelevantContext []TaskContext `json:"relevant_context"`
12 | }
13 |
14 | type ExecutionOutput struct {
15 | CurrentTaskResult TaskResult `json:"current_task_result"`
16 | }
17 |
18 | var (
19 | ExecutionPrompt = prompt.NewPrompt(
20 | "execution",
21 | `Execute the task (with the given context) to archieve the objective. Output the result of the task to result text.`,
22 | prompt.NewExample(
23 | &ExecutionInput{
24 | Objective: Objective("original objective"),
25 | CurrentTask: Task{Name: "current task"},
26 | SolvedTasks: Tasks{
27 | {Name: "solved task 1"},
28 | {Name: "solved task 2"},
29 | },
30 | RelevantContext: []TaskContext{
31 | {
32 | Text: "context text 1",
33 | },
34 | },
35 | },
36 | &ExecutionOutput{
37 | CurrentTaskResult: TaskResult{
38 | Task: Task{Name: "current task"},
39 | ResultText: "result text",
40 | },
41 | },
42 | ),
43 | nil,
44 | )
45 | )
46 |
--------------------------------------------------------------------------------
/services/chunks.go:
--------------------------------------------------------------------------------
1 | package services
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/zawakin/lightweight-agi/llmclient"
7 | "github.com/zawakin/lightweight-agi/model"
8 | )
9 |
10 | func GetTextChunks(text string, chunkTokenSize int) []string {
11 | var chunks []string
12 | for i := 0; i < len(text); i += chunkTokenSize {
13 | end := i + chunkTokenSize
14 | if end > len(text) {
15 | end = len(text)
16 | }
17 | chunks = append(chunks, text[i:end])
18 | }
19 | return chunks
20 | }
21 |
22 | func CreateDocumentChunks(ctx context.Context, embeddingClient llmclient.EmbeddingClient, doc *model.Document, chunkTokenSize int) ([]model.DocumentChunk, error) {
23 | var chunks []model.DocumentChunk
24 |
25 | embeddings, err := embeddingClient.EmbedTexts(ctx, GetTextChunks(doc.Text, chunkTokenSize))
26 | if err != nil {
27 | return nil, err
28 | }
29 |
30 | for i, chunkText := range GetTextChunks(doc.Text, chunkTokenSize) {
31 | chunks = append(chunks, model.DocumentChunk{
32 | ID: model.NewDocumentChunkID(),
33 | DocumentID: doc.ID,
34 | Text: chunkText,
35 | Metadata: model.DocumentChunkMetadata{
36 | DocumentMetadata: doc.Metadata,
37 | DocumentID: doc.ID,
38 | },
39 | Embedding: embeddings[i].Embedding,
40 | })
41 | }
42 | return chunks, nil
43 | }
44 |
--------------------------------------------------------------------------------
/prompt/runner.go:
--------------------------------------------------------------------------------
1 | package prompt
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "log"
7 |
8 | "github.com/zawakin/lightweight-agi/llmclient"
9 | )
10 |
11 | // PromptRunner is a struct that runs a prompt.
12 | type PromptRunner struct {
13 | llmClient llmclient.CompletionClient
14 |
15 | verbose bool
16 | }
17 |
18 | func NewPromptRunner(llmClient llmclient.CompletionClient, verbose bool) *PromptRunner {
19 | return &PromptRunner{
20 | llmClient: llmClient,
21 | verbose: verbose,
22 | }
23 | }
24 |
25 | // Run runs a prompt.
26 | // It formats the input, sends it to the completion client, and unmarshals the output.
27 | func (a *PromptRunner) Run(ctx context.Context, prompter Prompter, input Input, out Output) error {
28 | prompt, err := prompter.Format(input)
29 | if err != nil {
30 | return err
31 | }
32 |
33 | if a.verbose {
34 | log.Println("--------------------------------")
35 | log.Printf("\033[33mPrompt:\n%s\033[0m\n", prompt)
36 | }
37 |
38 | result, err := a.llmClient.Complete(ctx, prompt, 1000)
39 | if err != nil {
40 | return err
41 | }
42 |
43 | if a.verbose {
44 | log.Printf("\033[32mResult:\n%s\033[0m\n", result)
45 | log.Println("--------------------------------")
46 | }
47 |
48 | err = json.Unmarshal([]byte(result), out)
49 | if err != nil {
50 | return err
51 | }
52 |
53 | return nil
54 | }
55 |
--------------------------------------------------------------------------------
/model/document.go:
--------------------------------------------------------------------------------
1 | package model
2 |
3 | import (
4 | "github.com/google/uuid"
5 | )
6 |
7 | type DocumentID uuid.UUID
8 |
9 | func NewDocumentID() DocumentID {
10 | return DocumentID(uuid.New())
11 | }
12 |
13 | type Document struct {
14 | ID DocumentID
15 | Text string
16 | Metadata *DocumentMetadata
17 | }
18 |
19 | type Source string
20 |
21 | type DocumentMetadata struct {
22 | }
23 |
24 | type DocumentChunkMetadata struct {
25 | *DocumentMetadata
26 | DocumentID DocumentID
27 | }
28 |
29 | type DocumentChunkID uuid.UUID
30 |
31 | func NewDocumentChunkID() DocumentChunkID {
32 | return DocumentChunkID(uuid.New())
33 | }
34 |
35 | type DocumentChunk struct {
36 | ID DocumentChunkID
37 | DocumentID DocumentID
38 | Text string
39 | Metadata DocumentChunkMetadata
40 | Embedding []float32
41 | }
42 |
43 | type DocumentChunkWithScore struct {
44 | DocumentChunk
45 | Score float32
46 | }
47 |
48 | type DocumentWithChunks struct {
49 | Document
50 | Chunks []DocumentChunk
51 | }
52 |
53 | type DocumentMetadataFilter struct {
54 | DocumentID *DocumentID
55 | Source *Source
56 | SourceID *string
57 | // Author *string
58 | }
59 |
60 | type Query struct {
61 | Query string
62 | Filter *DocumentMetadataFilter
63 | TopK int
64 | }
65 |
66 | type QueryWithEmbedding struct {
67 | Query
68 | Embedding Embedding
69 | }
70 |
71 | type QueryResult struct {
72 | Query string
73 | Results []DocumentChunkWithScore
74 | }
75 |
--------------------------------------------------------------------------------
/examples/prompts/priorization_prompt.go:
--------------------------------------------------------------------------------
1 | package prompts
2 |
3 | import (
4 | "github.com/zawakin/lightweight-agi/prompt"
5 | )
6 |
7 | // PrioritizationPrompt is the prompt that is shown to the user when they are
8 | // asked to prioritize a list of tasks.
9 | type PrioritizationInput struct {
10 | Objective Objective `json:"objective"`
11 | Tasks Tasks `json:"tasks"`
12 | }
13 |
14 | type PriorizationOutput struct {
15 | Tasks []Task `json:"tasks"`
16 | }
17 |
18 | var (
19 | PrioritizationPrompt = &prompt.Prompt{
20 | Name: "task prioritization",
21 | Description: `You are a task prioritization AI responsible for organizing the following tasks in a higher-priority order.
22 |
23 | To prioritize these tasks, please follow the steps below:
24 |
25 | 1. Determine the importance of each task based on the ultimate objective.
26 | 2. Consider any dependencies between tasks or any external constraints (e.g., deadlines, resources) that may impact the order of execution.
27 | 3. Reorder the tasks accordingly, with the most important and urgent tasks at the top.
28 |
29 | Do not remove any tasks. Return the tasks as an array in higher-priority order.
30 | `,
31 | Template: &prompt.Example{
32 | Input: &PrioritizationInput{
33 | Objective: Objective("Objective"),
34 | Tasks: Tasks{
35 | {Name: "Task 1"},
36 | {Name: "Task 2"},
37 | {Name: "Task 3"},
38 | },
39 | },
40 | Output: &PriorizationOutput{
41 | Tasks: []Task{
42 | {Name: "Task 2"},
43 | {Name: "Task 1"},
44 | {Name: "Task 3"},
45 | },
46 | },
47 | },
48 | }
49 | )
50 |
--------------------------------------------------------------------------------
/examples/prompts/evaluation_prompt.go:
--------------------------------------------------------------------------------
1 | package prompts
2 |
3 | import (
4 | "github.com/zawakin/lightweight-agi/prompt"
5 | )
6 |
7 | type EvaluationTaskInput struct {
8 | Objective Objective `json:"objective"`
9 | Task Task `json:"task"`
10 | TaskResult TaskResult `json:"task_result"`
11 | }
12 |
13 | type EvaluationTaskOutput struct {
14 | Score int `json:"score"`
15 | Reason string `json:"reason"`
16 | }
17 |
18 | var (
19 | EvaluationTasksPrompt = &prompt.Prompt{
20 | Name: "evaluation tasks",
21 | Description: `Evaluate the following task result with score(0-100) based on the task description.`,
22 | Template: &prompt.Example{
23 | Input: &EvaluationTaskInput{
24 | Objective: Objective("objective"),
25 | Task: Task{Name: "task"},
26 | TaskResult: TaskResult{ResultText: "result text"},
27 | },
28 | Output: &EvaluationTaskOutput{
29 | Score: 50,
30 | Reason: "reason",
31 | },
32 | },
33 | Examples: prompt.Examples{
34 | {
35 | Input: &EvaluationTaskInput{
36 | Objective: Objective("objective"),
37 | Task: Task{Name: "task-1"},
38 | TaskResult: TaskResult{ResultText: "bad result which does not achieve the objective"},
39 | },
40 | Output: &EvaluationTaskOutput{
41 | Score: 0,
42 | Reason: "This is a bad result because...",
43 | },
44 | },
45 | {
46 | Input: &EvaluationTaskInput{
47 | Objective: Objective("objective"),
48 | Task: Task{Name: "task"},
49 | TaskResult: TaskResult{ResultText: "good result which achieves the objective completely"},
50 | },
51 | Output: &EvaluationTaskOutput{
52 | Score: 100,
53 | Reason: "This is a good result because...",
54 | },
55 | },
56 | },
57 | }
58 | )
59 |
--------------------------------------------------------------------------------
/cmd/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "log"
6 | "os"
7 |
8 | "github.com/joho/godotenv"
9 | "github.com/sashabaranov/go-openai"
10 |
11 | "github.com/zawakin/lightweight-agi/datastore"
12 | "github.com/zawakin/lightweight-agi/datastore/providers/inmemory"
13 | "github.com/zawakin/lightweight-agi/examples/agi"
14 | "github.com/zawakin/lightweight-agi/examples/prompts"
15 | "github.com/zawakin/lightweight-agi/llmclient/provider"
16 | "github.com/zawakin/lightweight-agi/prompt"
17 | )
18 |
19 | var (
20 | // defaultOpenAICompletionModel = openai.GPT3Dot5Turbo
21 | defaultOpenAICompletionModel = openai.GPT4
22 |
23 | verbose = true
24 | )
25 |
26 | func init() {
27 | // load dotenv
28 | if err := godotenv.Load(); err != nil {
29 | panic(err)
30 | }
31 |
32 | // disable log prefix
33 | log.SetFlags(0)
34 | }
35 |
36 | func main() {
37 | // read env
38 | openAIAPIKey := os.Getenv("OPENAI_API_KEY")
39 | if openAIAPIKey == "" {
40 | log.Fatal("OPENAI_API_KEY is required")
41 | }
42 | openAIOrgID := os.Getenv("OPENAI_ORG_ID")
43 |
44 | ctx := context.Background()
45 |
46 | // ==== Create OpenAI client ====
47 | openAIConfig := openai.DefaultConfig(openAIAPIKey)
48 | if openAIOrgID != "" {
49 | openAIConfig.OrgID = openAIOrgID
50 | }
51 | openaiClient := openai.NewClientWithConfig(openAIConfig)
52 | completionClient := provider.NewOpenAICompletionClient(openaiClient, defaultOpenAICompletionModel)
53 | embeddingClient := provider.NewOpenAIEmbeddingClient(openaiClient)
54 |
55 | // ==== Create data store provider ====
56 | dataStore := datastore.NewDataStore(inmemory.NewInMemoryDataStore(), embeddingClient)
57 |
58 | // === Create Prompt Runner ====
59 | runner := prompt.NewPromptRunner(completionClient, verbose)
60 |
61 | // ==== Run AGI agent ====
62 | agiAgent := agi.NewAGIAgent(runner, dataStore)
63 |
64 | // Define Global Objective of this AGI
65 | objective := prompts.Objective("Define the feature of good GPT prompt.")
66 | if err := agiAgent.RunAGIByObjective(ctx, objective); err != nil {
67 | log.Fatal(err)
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/datastore/datastore.go:
--------------------------------------------------------------------------------
1 | package datastore
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/zawakin/lightweight-agi/llmclient"
7 | "github.com/zawakin/lightweight-agi/model"
8 | "github.com/zawakin/lightweight-agi/services"
9 | )
10 |
11 | type DataStore struct {
12 | provider DataStoreProvider
13 | embeddingClient llmclient.EmbeddingClient
14 | }
15 |
16 | type DataStoreProvider interface {
17 | Upsert(ctx context.Context, chunks map[model.DocumentID][]model.DocumentChunk, chunkTokenSize *int) ([]model.DocumentID, error)
18 | Query(ctx context.Context, queries []model.QueryWithEmbedding) ([]model.QueryResult, error)
19 | }
20 |
21 | func NewDataStore(provider DataStoreProvider, embeddingClient llmclient.EmbeddingClient) *DataStore {
22 | return &DataStore{
23 | provider: provider,
24 | embeddingClient: embeddingClient,
25 | }
26 | }
27 |
28 | func (d *DataStore) Upsert(ctx context.Context, documents []model.Document, chunkTokenSize *int) ([]model.DocumentID, error) {
29 | chunks := make(map[model.DocumentID][]model.DocumentChunk)
30 | for _, doc := range documents {
31 | documentChunks, err := services.CreateDocumentChunks(ctx, d.embeddingClient, &doc, *chunkTokenSize)
32 | if err != nil {
33 | return nil, err
34 | }
35 | chunks[doc.ID] = documentChunks
36 | }
37 |
38 | ids, err := d.provider.Upsert(ctx, chunks, chunkTokenSize)
39 | if err != nil {
40 | return nil, err
41 | }
42 |
43 | return ids, nil
44 | }
45 |
46 | func (d *DataStore) Query(ctx context.Context, queries []model.Query) ([]model.QueryResult, error) {
47 | queryTexts := make([]string, 0, len(queries))
48 | for _, query := range queries {
49 | queryTexts = append(queryTexts, query.Query)
50 | }
51 |
52 | queryEmbeddings, err := d.embeddingClient.EmbedTexts(ctx, queryTexts)
53 | if err != nil {
54 | return nil, err
55 | }
56 |
57 | queryWithEmbeddings := make([]model.QueryWithEmbedding, len(queries))
58 | for i, query := range queries {
59 | queryWithEmbeddings[i] = model.QueryWithEmbedding{
60 | Query: query,
61 | Embedding: queryEmbeddings[i],
62 | }
63 | }
64 |
65 | return d.provider.Query(ctx, queryWithEmbeddings)
66 | }
67 |
--------------------------------------------------------------------------------
/examples/prompts/model.go:
--------------------------------------------------------------------------------
1 | package prompts
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 | )
7 |
8 | // Objective is the objective of the task.
9 | // It is used to determine the type of task to create.
10 | type Objective string
11 |
12 | func (o Objective) String() string {
13 | return string(o)
14 | }
15 |
16 | type Milestone struct {
17 | Objective Objective `json:"objective"`
18 | Name string `json:"name"`
19 | }
20 |
21 | type Milestones []Milestone
22 |
23 | func (ms Milestones) String() string {
24 | var sb strings.Builder
25 | for i, m := range ms {
26 | sb.WriteString(fmt.Sprintf("%d. %s\n", i+1, m.Name))
27 | }
28 | return sb.String()
29 | }
30 |
31 | var (
32 | // MaxContextLength is the maximum length of the context that can be
33 | // returned by the model.
34 | MaxContextLength = 2000
35 | )
36 |
37 | // Task is a struct that contains the base task information.
38 | // It is used to identify the task and to provide a name for the task.
39 | type Task struct {
40 | Name string `json:"name"`
41 | }
42 |
43 | func (t Task) String() string {
44 | return fmt.Sprintf(`"%s"`, t.Name)
45 | }
46 |
47 | type Tasks []Task
48 |
49 | func (ts *Tasks) Add(task Task) {
50 | *ts = append(*ts, task)
51 | }
52 |
53 | func (ts Tasks) PopLeft() (Task, Tasks) {
54 | return ts[0], ts[1:]
55 | }
56 |
57 | func (ts Tasks) String() string {
58 | var ss []string
59 | for _, t := range ts {
60 | ss = append(ss, t.String())
61 | }
62 | return fmt.Sprintf(`[%s]`, strings.Join(ss, ","))
63 | }
64 |
65 | // SubTask represents a smaller unit of work within a Task.
66 | type SubTask struct {
67 | Name string
68 | }
69 |
70 | type TaskResult struct {
71 | Task Task `json:"task"`
72 | ResultText string `json:"result_text"`
73 | }
74 |
75 | type TaskEvaluation struct {
76 | // Score has the range of 0 to 100.
77 | Score int `json:"score"`
78 | Reason string `json:"reason"`
79 | }
80 |
81 | func (t TaskEvaluation) String() string {
82 | return fmt.Sprintf(`%d%%: %s`, t.Score, t.Reason)
83 | }
84 |
85 | // TaskContext is a struct that contains the base task and the context
86 | // for that task.
87 | type TaskContext struct {
88 | Text string `json:"text"`
89 | }
90 |
--------------------------------------------------------------------------------
/examples/prompts/milestone_creation_prompt.go:
--------------------------------------------------------------------------------
1 | package prompts
2 |
3 | import (
4 | "github.com/zawakin/lightweight-agi/prompt"
5 | )
6 |
7 | type MilestoneCreationInput struct {
8 | Objective Objective `json:"objective"`
9 | }
10 |
11 | type MilestoneCreationOutput struct {
12 | Milestones Milestones `json:"milestones"`
13 | }
14 |
15 | var (
16 | MilestoneCreationPrompt = &prompt.Prompt{
17 | Name: "milestone creation",
18 | Description: `You are an AI tasked with creating a milestone for the following objective.
19 |
20 | Please provide a milestone that can be used to achieve the objective.`,
21 | Template: &prompt.Example{
22 | Input: &MilestoneCreationInput{
23 | Objective: Objective("original objective"),
24 | },
25 | Output: &MilestoneCreationOutput{
26 | Milestones: Milestones{
27 | {Objective: Objective("milestone1"), Name: "milestone1"},
28 | {Objective: Objective("milestone2"), Name: "milestone2"},
29 | {Objective: Objective("milestone3"), Name: "milestone3"},
30 | },
31 | },
32 | },
33 | }
34 | )
35 |
36 | type TaskCreationInput struct {
37 | Objective Objective `json:"objective"`
38 | QueuedTasks Tasks `json:"queued_tasks"`
39 | LastTaskResult *TaskResult `json:"last_task_result"`
40 | }
41 |
42 | type TaskCreationOutput struct {
43 | Tasks Tasks `json:"tasks"`
44 | }
45 |
46 | var (
47 | TaskCreationPrompt = &prompt.Prompt{
48 | Name: "task creation",
49 | Description: `Create new tasks to be completed in order to achieve the objective if necessary.`,
50 | Template: &prompt.Example{
51 | Input: &TaskCreationInput{
52 | Objective: Objective("original objective"),
53 | QueuedTasks: Tasks{
54 | Task{
55 | Name: "task-10",
56 | },
57 | Task{
58 | Name: "task-11",
59 | },
60 | },
61 | LastTaskResult: &TaskResult{
62 | Task: Task{
63 | Name: "task-9",
64 | },
65 | ResultText: "task-9 result",
66 | },
67 | },
68 | Output: &TaskCreationOutput{
69 | Tasks: Tasks{
70 | Task{
71 | Name: "task-10",
72 | },
73 | Task{
74 | Name: "task-11",
75 | },
76 | Task{
77 | Name: "task-12",
78 | },
79 | },
80 | },
81 | },
82 | }
83 | )
84 |
--------------------------------------------------------------------------------
/datastore/providers/inmemory/in_memory_vector_store.go:
--------------------------------------------------------------------------------
1 | package inmemory
2 |
3 | import (
4 | "context"
5 | "math"
6 | "sort"
7 |
8 | "github.com/zawakin/lightweight-agi/datastore"
9 | "github.com/zawakin/lightweight-agi/model"
10 | )
11 |
12 | var _ datastore.DataStoreProvider = (*InMemoryDataStore)(nil)
13 |
14 | type InMemoryDataStore struct {
15 | data map[model.DocumentChunkID]model.DocumentChunk
16 | }
17 |
18 | func NewInMemoryDataStore() *InMemoryDataStore {
19 | return &InMemoryDataStore{
20 | data: make(map[model.DocumentChunkID]model.DocumentChunk),
21 | }
22 | }
23 |
24 | func (s *InMemoryDataStore) Upsert(ctx context.Context, chunks map[model.DocumentID][]model.DocumentChunk, chunkTokenSize *int) ([]model.DocumentID, error) {
25 | var result []model.DocumentID
26 | for docID, v := range chunks {
27 | for _, chunk := range v {
28 | s.data[chunk.ID] = chunk
29 | }
30 | result = append(result, docID)
31 | }
32 | return result, nil
33 | }
34 |
35 | func (s *InMemoryDataStore) Query(ctx context.Context, queries []model.QueryWithEmbedding) ([]model.QueryResult, error) {
36 | var result []model.QueryResult
37 | for _, query := range queries {
38 | r, err := s.query(ctx, query)
39 | if err != nil {
40 | return nil, err
41 | }
42 | result = append(result, *r)
43 | }
44 | return result, nil
45 | }
46 |
47 | func (s *InMemoryDataStore) query(ctx context.Context, query model.QueryWithEmbedding) (*model.QueryResult, error) {
48 | var heap []model.DocumentChunkWithScore
49 | topK := query.TopK
50 |
51 | for _, chunk := range s.data {
52 | score := cosineSimilarity(query.Embedding.Embedding, chunk.Embedding)
53 | heap = append(heap, model.DocumentChunkWithScore{
54 | DocumentChunk: chunk,
55 | Score: score,
56 | })
57 | }
58 |
59 | sort.Slice(heap, func(i, j int) bool {
60 | return heap[i].Score > heap[j].Score
61 | })
62 | if len(heap) > topK {
63 | heap = heap[:topK]
64 | }
65 |
66 | var result []model.DocumentChunkWithScore
67 | result = append(result, heap...)
68 |
69 | return &model.QueryResult{
70 | Query: query.Query.Query,
71 | Results: result,
72 | }, nil
73 |
74 | }
75 |
76 | func cosineSimilarity(a, b []float32) float32 {
77 | var dotProduct, aMagnitude, bMagnitude float32
78 |
79 | for i := range a {
80 | dotProduct += a[i] * b[i]
81 | aMagnitude += a[i] * a[i]
82 | bMagnitude += b[i] * b[i]
83 | }
84 |
85 | aMagnitude = float32(math.Sqrt(float64(aMagnitude)))
86 | bMagnitude = float32(math.Sqrt(float64(bMagnitude)))
87 |
88 | return dotProduct / (aMagnitude * bMagnitude)
89 | }
90 |
--------------------------------------------------------------------------------
/prompt/prompt.go:
--------------------------------------------------------------------------------
1 | package prompt
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | )
7 |
8 | type (
9 | Input any
10 | Output any
11 | )
12 |
13 | // Prompter interface defines a common method for generating formatted prompts.
14 | type Prompter interface {
15 | Format(input Input) (string, error)
16 | }
17 |
18 | var _ Prompter = (*Prompt)(nil)
19 |
20 | // Prompt is a struct that contains the information of a prompt.
21 | type Prompt struct {
22 | Name string `json:"name"`
23 | Description string `json:"description"`
24 | InputFormat string `json:"input_format"`
25 | OutputFormat string `json:"output_format"`
26 | Template *Example `json:"template"`
27 | Examples Examples `json:"examples"`
28 | }
29 |
30 | func NewPrompt(name string, description string, template *Example, examples []Example) *Prompt {
31 | return &Prompt{
32 | Name: name,
33 | Description: description,
34 | Template: template,
35 | Examples: examples,
36 | }
37 | }
38 |
39 | func NewSimplePrompt(name string, description string, input string, output string) *Prompt {
40 | return NewPrompt(name, description, NewExample(input, output), nil)
41 | }
42 |
43 | func (c *Prompt) Format(input Input) (string, error) {
44 | formattedInput, err := toJson(input)
45 | if err != nil {
46 | return "", err
47 | }
48 | formattedTemplate, err := c.Template.Format()
49 | if err != nil {
50 | return "", err
51 | }
52 | formattedExamples, err := c.Examples.Format()
53 | if err != nil {
54 | return "", err
55 | }
56 |
57 | return fmt.Sprintf(`You are an AI named "%s".
58 | %s
59 |
60 | Output a JSON-formatted string without outputting any other strings.
61 |
62 | Template:
63 | %s
64 |
65 | %s
66 |
67 | Input: %s
68 | Output:`, c.Name, c.Description, formattedTemplate, formattedExamples, formattedInput), nil
69 | }
70 |
71 | type Example struct {
72 | Input Input `json:"input"`
73 | Output Output `json:"output"`
74 | }
75 |
76 | func NewExample(input Input, output Output) *Example {
77 | return &Example{
78 | Input: input,
79 | Output: output,
80 | }
81 | }
82 |
83 | func (p *Example) Format() (string, error) {
84 | input, err := toJson(p.Input)
85 | if err != nil {
86 | return "", err
87 | }
88 |
89 | output, err := toJson(p.Output)
90 | if err != nil {
91 | return "", err
92 | }
93 |
94 | return fmt.Sprintf("Input: %s\nOutput: %s", input, output), nil
95 | }
96 |
97 | type Examples []Example
98 |
99 | func (p Examples) Format() (string, error) {
100 | if len(p) == 0 {
101 | return "", nil
102 | }
103 |
104 | s := ""
105 | for _, e := range p {
106 | s += "Example:\n"
107 | ds, err := e.Format()
108 | if err != nil {
109 | return "", err
110 | }
111 |
112 | s += ds + "\n"
113 | }
114 | return s, nil
115 | }
116 |
117 | func toJson(v any) (string, error) {
118 | s, err := json.Marshal(v)
119 | if err != nil {
120 | return "", err
121 | }
122 | return string(s), nil
123 | }
124 |
--------------------------------------------------------------------------------
/llmclient/provider/openai.go:
--------------------------------------------------------------------------------
1 | package provider
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "log"
7 | "time"
8 |
9 | "github.com/sashabaranov/go-openai"
10 |
11 | "github.com/zawakin/lightweight-agi/llmclient"
12 | "github.com/zawakin/lightweight-agi/model"
13 | )
14 |
15 | var _ llmclient.EmbeddingClient = (*OpenAIEmbeddingClient)(nil)
16 |
17 | type OpenAIEmbeddingClient struct {
18 | openAIClient *openai.Client
19 | }
20 |
21 | func NewOpenAIEmbeddingClient(openAIClient *openai.Client) *OpenAIEmbeddingClient {
22 | return &OpenAIEmbeddingClient{
23 | openAIClient: openAIClient,
24 | }
25 | }
26 |
27 | func (e *OpenAIEmbeddingClient) EmbedText(ctx context.Context, text string) (*model.Embedding, error) {
28 | result, err := e.openAIClient.CreateEmbeddings(ctx, openai.EmbeddingRequest{
29 | Input: []string{text},
30 | Model: openai.AdaEmbeddingV2,
31 | })
32 | if err != nil {
33 | return nil, err
34 | }
35 | if len(result.Data) == 0 {
36 | return nil, errors.New("no embeddings returned")
37 | }
38 | return &model.Embedding{
39 | Embedding: result.Data[0].Embedding,
40 | }, nil
41 | }
42 |
43 | func (e *OpenAIEmbeddingClient) EmbedTexts(ctx context.Context, texts []string) ([]model.Embedding, error) {
44 | result, err := e.openAIClient.CreateEmbeddings(ctx, openai.EmbeddingRequest{
45 | Input: texts,
46 | Model: openai.AdaEmbeddingV2,
47 | })
48 | if err != nil {
49 | return nil, err
50 | }
51 | embeddings := make([]model.Embedding, len(result.Data))
52 | for i, data := range result.Data {
53 | embeddings[i] = model.Embedding{
54 | Embedding: data.Embedding,
55 | }
56 | }
57 | return embeddings, nil
58 | }
59 |
60 | var _ llmclient.CompletionClient = (*OpenAICompletionClient)(nil)
61 |
62 | type OpenAICompletionClient struct {
63 | openAIClient *openai.Client
64 | model string
65 | }
66 |
67 | func NewOpenAICompletionClient(openAIClient *openai.Client, model string) *OpenAICompletionClient {
68 | return &OpenAICompletionClient{
69 | openAIClient: openAIClient,
70 | model: model,
71 | }
72 | }
73 |
74 | func (c *OpenAICompletionClient) Complete(ctx context.Context, text string, maxTokens int) (string, error) {
75 | result, err := c.complete(ctx, text, maxTokens)
76 | if err != nil {
77 | log.Println("retrying completion after 1 second", err)
78 | time.Sleep(1 * time.Second)
79 |
80 | result, err = c.complete(ctx, text, maxTokens)
81 | if err != nil {
82 | return "", err
83 | }
84 | }
85 | return result, nil
86 | }
87 |
88 | func (c *OpenAICompletionClient) complete(ctx context.Context, text string, maxTokens int) (string, error) {
89 | resp, err := c.openAIClient.CreateChatCompletion(ctx, openai.ChatCompletionRequest{
90 | Model: c.model,
91 | Messages: []openai.ChatCompletionMessage{
92 | {
93 | Role: "user",
94 | Content: text,
95 | },
96 | },
97 | MaxTokens: maxTokens,
98 | Temperature: 0.7,
99 | })
100 | if err != nil {
101 | return "", err
102 | }
103 | if len(resp.Choices) == 0 {
104 | return "", errors.New("no choices returned")
105 | }
106 | result := resp.Choices[0].Message.Content
107 | return result, nil
108 | }
109 |
--------------------------------------------------------------------------------
/examples/prompts/optimize_prompt_prompt.go:
--------------------------------------------------------------------------------
1 | package prompts
2 |
3 | import "github.com/zawakin/lightweight-agi/prompt"
4 |
5 | type OptimizePromptInput struct {
6 | Original *prompt.Prompt `json:"original_prompt"`
7 | }
8 |
9 | type OptimizePromptOutput struct {
10 | OptimizedPrompt *prompt.Prompt `json:"optimized_prompt"`
11 | }
12 |
13 | var (
14 | OptimizePromptPrompt = &prompt.Prompt{
15 | Name: "AI-Powered Prompt Optimizer",
16 | Description: `You are an advanced AI assistant whose goal is to optimize a given prompt. You should focus on improving the prompt's title, description, format, and examples. If necessary, feel free to modify input and output parameters. The aim is to provide a more comprehensive and detailed version of the original prompt, complete with a more specific title, a more elaborate description, and richer examples. Add or modify examples as required to better illustrate the prompt.`,
17 | InputFormat: "JSON object representing the original prompt details, including the name, description, input and output template, and examples.",
18 | OutputFormat: "JSON object representing the optimized prompt details, including the revised name, description, input and output template, and examples.",
19 | Template: &prompt.Example{
20 | Input: &OptimizePromptInput{
21 | Original: &prompt.Prompt{
22 | Name: "prompt name",
23 | Description: "prompt description",
24 | InputFormat: "prompt input format",
25 | OutputFormat: "prompt output format",
26 | Template: prompt.NewExample(
27 | "prompt input",
28 | "prompt output",
29 | ),
30 | Examples: prompt.Examples{
31 | *prompt.NewExample(
32 | "prompt example input 1",
33 | "prompt example output 1",
34 | ),
35 | },
36 | },
37 | },
38 | Output: &OptimizePromptOutput{
39 | OptimizedPrompt: &prompt.Prompt{
40 | Name: "optimized prompt name",
41 | Description: "optimized prompt description",
42 | InputFormat: "optimized prompt input format",
43 | OutputFormat: "optimized prompt output format",
44 | Template: prompt.NewExample(
45 | "optimized prompt input",
46 | "optimized prompt output",
47 | ),
48 | Examples: prompt.Examples{
49 | *prompt.NewExample(
50 | "optimized prompt example input 1",
51 | "optimized prompt example output 1",
52 | ),
53 | *prompt.NewExample(
54 | "optimized prompt example input 2",
55 | "optimized prompt example output 2",
56 | ),
57 | },
58 | },
59 | },
60 | },
61 | Examples: prompt.Examples{
62 | prompt.Example{
63 | Input: map[string]any{
64 | "original_prompt": prompt.Prompt{
65 | Name: "Animal Facts",
66 | Description: "Generate a fact about an animal",
67 | Template: prompt.NewExample(
68 | "Animal name",
69 | "Animal fact",
70 | ),
71 | Examples: prompt.Examples{
72 | *prompt.NewExample(
73 | "Elephant",
74 | "Elephants can communicate using infrasound, which is too low for humans to hear.",
75 | ),
76 | },
77 | },
78 | },
79 | Output: map[string]any{
80 | "optimized_prompt": prompt.Prompt{
81 | Name: "Intriguing Animal Facts",
82 | Description: "Provide an intriguing fact about the specified animal",
83 | Template: prompt.NewExample(
84 | "Name of the animal",
85 | "An intriguing fact about the animal",
86 | ),
87 | Examples: prompt.Examples{
88 | *prompt.NewExample(
89 | "Elephant",
90 | "Elephants can communicate using infrasound, which is too low for humans to hear.",
91 | ),
92 | *prompt.NewExample(
93 | "Giraffe",
94 | "Giraffes have a unique walking pattern, moving both legs on one side of their body at the same time.",
95 | ),
96 | },
97 | },
98 | },
99 | },
100 | },
101 | }
102 | )
103 |
--------------------------------------------------------------------------------
/examples/agi/agi.go:
--------------------------------------------------------------------------------
1 | package agi
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "log"
7 | "time"
8 |
9 | "github.com/zawakin/lightweight-agi/datastore"
10 | "github.com/zawakin/lightweight-agi/examples/prompts"
11 | "github.com/zawakin/lightweight-agi/model"
12 | "github.com/zawakin/lightweight-agi/prompt"
13 | )
14 |
15 | const (
16 | maxTaskIterationCount = 20
17 | )
18 |
19 | var (
20 | chunkSize = 200
21 | )
22 |
23 | type AGIAgent struct {
24 | runner *prompt.PromptRunner
25 |
26 | dataStore *datastore.DataStore
27 | }
28 |
29 | func NewAGIAgent(
30 | runner *prompt.PromptRunner,
31 | dataStore *datastore.DataStore,
32 | ) *AGIAgent {
33 | return &AGIAgent{
34 | runner: runner,
35 | dataStore: dataStore,
36 | }
37 | }
38 |
39 | func (a *AGIAgent) RunAGIByObjective(ctx context.Context, objective prompts.Objective) error {
40 | var objectiveRefinementOutput prompts.ObjectiveRefinementOutput
41 | err := a.runner.Run(ctx, prompts.ObjectRefinementPrompt, &prompts.ObjectiveRefinementInput{
42 | Objective: objective,
43 | }, &objectiveRefinementOutput)
44 | if err != nil {
45 | return err
46 | }
47 |
48 | var milestoneCreationOutput prompts.MilestoneCreationOutput
49 | err = a.runner.Run(ctx, prompts.MilestoneCreationPrompt, &prompts.MilestoneCreationInput{
50 | Objective: objective,
51 | }, &milestoneCreationOutput)
52 | if err != nil {
53 | return err
54 | }
55 |
56 | milestones := milestoneCreationOutput.Milestones
57 |
58 | for _, milestone := range milestones {
59 | err := a.RunAGIByMilestone(ctx, milestone)
60 | if err != nil {
61 | return err
62 | }
63 | }
64 |
65 | log.Printf("Finished executing tasks for objective %s", objective)
66 | return nil
67 | }
68 |
69 | func (a *AGIAgent) RunAGIByMilestone(ctx context.Context, milestone prompts.Milestone) error {
70 | objective := milestone.Objective
71 |
72 | var objectiveRefinementOutput prompts.ObjectiveRefinementOutput
73 | err := a.runner.Run(ctx, prompts.ObjectRefinementPrompt, &prompts.ObjectiveRefinementInput{
74 | Objective: objective,
75 | }, &objectiveRefinementOutput)
76 | if err != nil {
77 | return err
78 | }
79 |
80 | objective = objectiveRefinementOutput.RefinedObjective
81 |
82 | var taskCreationOutput prompts.TaskCreationOutput
83 | err = a.runner.Run(ctx, prompts.TaskCreationPrompt, &prompts.TaskCreationInput{
84 | Objective: objective,
85 | }, &taskCreationOutput)
86 | if err != nil {
87 | return err
88 | }
89 |
90 | tasks := taskCreationOutput.Tasks
91 |
92 | var solvedTasks prompts.Tasks
93 |
94 | for i := 0; i < maxTaskIterationCount; i++ {
95 | if len(tasks) == 0 {
96 | log.Printf("No more tasks to execute for objective %s\n", objective)
97 | break
98 | }
99 |
100 | task, remain := tasks.PopLeft()
101 | taskName := task.Name
102 |
103 | tasks = remain
104 |
105 | queryResults, err := a.dataStore.Query(ctx, []model.Query{
106 | {
107 | Query: taskName,
108 | TopK: 5,
109 | },
110 | })
111 | if err != nil {
112 | return err
113 | }
114 | if len(queryResults) != 1 {
115 | return fmt.Errorf("unexpected chunks length: %d", len(queryResults))
116 | }
117 | queryResult := queryResults[0]
118 |
119 | var relevantContext []prompts.TaskContext
120 | for _, chunk := range queryResult.Results {
121 | relevantContext = append(relevantContext, prompts.TaskContext{
122 | Text: chunk.Text,
123 | })
124 | }
125 |
126 | var executionOutput prompts.ExecutionOutput
127 | err = a.runner.Run(ctx, prompts.ExecutionPrompt, &prompts.ExecutionInput{
128 | Objective: objective,
129 | CurrentTask: task,
130 | SolvedTasks: solvedTasks,
131 | RelevantContext: relevantContext,
132 | }, &executionOutput)
133 | if err != nil {
134 | return err
135 | }
136 |
137 | result := executionOutput.CurrentTaskResult
138 |
139 | var evaluationTaskOutput prompts.EvaluationTaskOutput
140 | err = a.runner.Run(ctx, prompts.EvaluationTasksPrompt, &prompts.EvaluationTaskInput{
141 | Objective: objective,
142 | Task: task,
143 | TaskResult: result,
144 | }, &evaluationTaskOutput)
145 | if err != nil {
146 | return err
147 | }
148 |
149 | documentID := model.NewDocumentID()
150 | _, err = a.dataStore.Upsert(ctx, []model.Document{
151 | {
152 | ID: documentID,
153 | Text: result.ResultText,
154 | },
155 | }, &chunkSize)
156 | if err != nil {
157 | return err
158 | }
159 |
160 | var prioritizationOutput prompts.PriorizationOutput
161 | err = a.runner.Run(ctx, prompts.PrioritizationPrompt, &prompts.PrioritizationInput{
162 | Objective: objective,
163 | Tasks: tasks,
164 | }, &prioritizationOutput)
165 | if err != nil {
166 | return err
167 | }
168 |
169 | tasks = prioritizationOutput.Tasks
170 |
171 | time.Sleep(1 * time.Second)
172 | }
173 |
174 | log.Printf("Finished executing tasks for objective %s", objective)
175 |
176 | return nil
177 | }
178 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Lightweight AGI (LW-AGI)
2 |
3 | ## LW-AGI is Golang-based General AI with Flexible Language Models
4 |
5 |
6 | Lightweight AGI is a project aimed at creating a simple and effective Artificial General Intelligence (AGI) agent using Golang and versatile Large Language Models (LLMs) compatible with the LLMClient interface. The agent is designed to handle a wide range of objectives by refining objectives, executing tasks, evaluating results, and prioritizing further tasks. With its adaptable architecture, Lightweight AGI can be applied to various domains, including but not limited to gaming, problem-solving, and knowledge acquisition.
7 |
8 | **This project is currently under development, and its effectiveness has not yet been thoroughly tested at the individual level. However, given the growing interest in developing general artificial intelligence, we believe it is important to share the potential value of this project and evolve it together with the community. With that in mind, we kindly ask for your understanding as you read this article.**
9 |
10 | ## Key Features
11 |
12 | - **Objective Refinement**: A unique and innovative approach to refining the main objective into smaller, more manageable objectives, enabling the AGI agent to tackle complex problems effectively.
13 | - Task Creation: Generates tasks and milestones based on the refined objectives.
14 | - Execution Agent: Executes tasks using the OpenAI GPT-4 model (or GPT-3.5 model).
15 | - Evaluation Agent: Evaluates the results of tasks and their effectiveness.
16 | - Prioritization Agent: Prioritizes tasks based on their relevance and importance.
17 | - Task Context Agent: Stores the context of tasks for future reference.
18 | - **In-Memory Vector Store**: Supports an efficient in-memory vector store to save embedding vectors, which can be queried for similarity, enabling faster access and improved performance.
19 |
20 | ## Note: Currently Supported LLM Provider
21 |
22 | While Lightweight AGI is designed to be flexible and work with various Large Language Models (LLMs) that satisfy the LLMClient interface, please be aware that, as of now, the project only supports OpenAI's GPT-4 or GPT-3.5 as the LLM provider.
23 |
24 | Future updates may include support for additional LLM providers. Stay tuned for further developments and enhancements to the Lightweight AGI project.
25 |
26 |
27 | ## Installation
28 | 1. Install Go and set up your Go workspace.
29 | 2. Clone the repository:
30 |
31 | ```bash
32 | git clone https://github.com/zawakin/lightweight-agi.git
33 | ```
34 |
35 | 3. Navigate to the repository:
36 | ```bash
37 | cd lightweight-agi
38 | ```
39 |
40 | 4. Install the required packages:
41 | ```bash
42 | go mod download
43 | ```
44 |
45 | 5. Create a .env file with your OpenAI API key:
46 | ```makefile
47 | OPENAI_API_KEY=your_openai_api_key_here
48 | ```
49 |
50 | ## Usage
51 |
52 | Run the main program:
53 |
54 | ```bash
55 | go run ./cmd/main.go
56 | ```
57 |
58 | The AGI agent will start learning how to play chess by executing tasks, evaluating results, and refining its objectives.
59 |
60 | ## Sequence Diagram
61 |
62 | 
63 |
64 | ## Flowchart
65 |
66 | 
67 |
68 | ## Example Output
69 |
70 | ```
71 | ======= Objective ======
72 | I want to learn how to play chess.
73 |
74 |
75 | ======= Refined Objective ======
76 | I want to learn the basic rules, strategies, and tactics of chess to play at an intermediate level.
77 |
78 | ======= Milestones ======
79 | 1. Familiarize with the chessboard layout and the movement of each piece.
80 | 2. Learn the basic rules of chess, including check, checkmate, and stalemate.
81 | 3. Study and practice basic opening principles and strategies.
82 | 4. Understand and apply the concept of piece value and material advantage in gameplay.
83 | 5. Learn common tactical patterns, such as forks, pins, and skewers.
84 | 6. Develop an understanding of basic endgame strategies and techniques.
85 | 7. Regularly practice playing chess games against opponents of varying skill levels.
86 | 8. Analyze your own games and learn from your mistakes.
87 | 9. Study famous chess games and learn from the strategies and tactics of renowned players.
88 | 10. Participate in local chess tournaments or online matches to gain experience and improve your skills.
89 |
90 |
91 | ======= Milestone Objective ======
92 | Familiarize with the chessboard layout and the movement of each piece.
93 |
94 |
95 | ======= Refined Milestone Objective ======
96 | Learn the chessboard layout, including the initial positioning of each chess piece, and understand the rules governing their movements and captures.
97 |
98 |
99 | ======= Task ======
100 | Identify and label each chess piece and its corresponding starting position on the chessboard.
101 |
102 |
103 | ======= Relevant Context ======
104 | ...
105 |
106 |
107 | ======= Task Result ======
108 | In a standard chess game, the chessboard consists of 8x8 squares, alternating between light and dark colors. Each player starts with 16 chess pieces, and they are positioned as follows:
109 | ....
110 |
111 | ======= Task Evaluation ======
112 | 100%: This is a good result because it accurately identifies and labels each chess piece, provides their corresponding starting positions on the chessboard, and describes the rules governing their movements and captures.
113 | ```
114 |
115 | ## Contributing
116 |
117 | Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change.
118 |
119 | ## License
120 |
121 | MIT
122 |
--------------------------------------------------------------------------------
/img/lightweight-agi-sequence-diagram.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/img/lightweight-agi-flowchart.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------