├── .dockerignore
├── .env.example
├── .gitignore
├── .gitmodules
├── Brazen
├── Brazen.csproj
├── Dockerfile
├── Program.cs
├── Properties
│ └── launchSettings.json
├── Services
│ └── ChatService.cs
├── Startup.cs
├── appsettings.Development.json
└── appsettings.json
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── ETHICS.md
├── Eve
├── Dockerfile
├── Eve.pyproj
├── agents
│ ├── AutoGPT.py
│ ├── AutonomousAgent.py
│ └── GenerativeAgent.py
├── chains
│ ├── api
│ │ └── meteo
│ │ │ └── chain.json
│ ├── llm-bash
│ │ └── chain.json
│ ├── llm-checker
│ │ └── chain.json
│ ├── llm-math
│ │ └── chain.json
│ ├── llm-requests
│ │ └── chain.json
│ ├── pal
│ │ └── math
│ │ │ └── chain.json
│ ├── qa-sources
│ │ ├── refine
│ │ │ └── chain.json
│ │ └── rerank
│ │ │ └── chain.json
│ ├── qa
│ │ ├── chain.json
│ │ ├── reduce
│ │ │ └── chain.json
│ │ ├── refine
│ │ │ └── chain.json
│ │ └── rerank
│ │ │ └── chain.json
│ ├── sentiment
│ │ └── chain.json
│ ├── summarize
│ │ ├── concise
│ │ │ └── chain.json
│ │ ├── reduce
│ │ │ └── chain.json
│ │ └── refine
│ │ │ └── chain.json
│ └── vectordb
│ │ ├── reduce
│ │ └── chain.json
│ │ └── single
│ │ └── chain.json
├── characters
│ └── Ryan.py
├── data
│ └── dolly.jsonl
├── experiments
│ ├── LANGCHAN_COMPLETION_EVALS.py
│ ├── grpc.py
│ ├── timeweigh.py
│ └── x.py
├── main.py
├── old
│ ├── Eve.py
│ ├── main.py
│ ├── text_processing.py
│ ├── wikipedia_api_wrapper.py
│ └── wolfram_alpha_api_wrapper.py
├── parsers
│ └── CustomOutputParser.py
├── prompts
│ ├── CustomPromptTemplate.py
│ ├── ryan.json
│ ├── ryan.txt
│ └── useful.txt
├── protos
│ ├── chat.proto
│ ├── google
│ │ └── api
│ │ │ ├── annotations.proto
│ │ │ └── http.proto
│ └── protobuf
│ │ └── descriptor.proto
├── requirements.txt
├── toddleragi
│ ├── agents
│ │ ├── context_agent.py
│ │ ├── execution_agent.py
│ │ ├── openai_connector.py
│ │ ├── prioritzation_agent.py
│ │ └── task_creation_agent.py
│ ├── components
│ │ ├── IContextStorage.py
│ │ ├── pinecone.py
│ │ └── weaviate.py
│ └── toddleragi.py
├── tools.py
├── vectorstores
│ └── WeaviateWrapper.py
└── weaviate_schema.py
├── LICENSE.txt
├── Protos
├── Protos.csproj
├── chat.proto
└── google
│ ├── api
│ ├── annotations.proto
│ └── http.proto
│ └── protobuf
│ └── descriptor.proto
├── Putty.sln
├── SECURITY.md
├── docker-compose.brazen.yml
├── docker-compose.weaviate-gpu.yml
├── docker-compose.weaviate.yml
├── docker-compose.yml
├── docs
└── adr
│ ├── .markdownlint.yml
│ ├── 0002-weaviate-for-knowledge-graph-and-vector-storage.md
│ ├── Analysis of roles-latest-4.md
│ ├── Analysis of roles-latest.md
│ ├── Analysis of roles.md
│ ├── first.md
│ ├── template.md
│ └── weights_visualisation.puml
├── humans.txt
├── readme.md
└── searxng
├── settings.yml
└── uwsgi.ini
/.dockerignore:
--------------------------------------------------------------------------------
1 | **/.classpath
2 | **/.dockerignore
3 | **/.env
4 | **/.git
5 | **/.gitignore
6 | **/.project
7 | **/.settings
8 | **/.toolstarget
9 | **/.vs
10 | **/.vscode
11 | **/*.*proj.user
12 | **/*.dbmdl
13 | **/*.jfm
14 | **/azds.yaml
15 | **/bin
16 | **/charts
17 | **/docker-compose*
18 | **/Dockerfile*
19 | **/node_modules
20 | **/npm-debug.log
21 | **/obj
22 | **/secrets.dev.yaml
23 | **/values.dev.yaml
24 | LICENSE
25 | README.md
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
1 | OPENAI_API_KEY= #Your Api key
2 | OPENAI_API_MODEL= # gpt-3.5-turbo, text-davinci-002, text-davinci-003, etc
3 | OPANAI_API_PROXY= # str, dict
4 |
5 | LANGCHAIN_HANDLER=langchain
6 | LANGCHAIN_ENDPOINT=http://langchain-backend:8000
7 |
8 | PINECONE_API_KEY=
9 | PINECONE_ENVIRONMENT=
10 |
11 | WEAVIATE_HOST=http://weaviate:9001
12 | WEAVIATE_VECTORIZER=text2vec-transformers
13 | WEAVIATE_API_KEY=
14 | WEAVIATE_API_URL=
15 |
16 | CONTEXT_STORAGE_TYPE=weaviate
17 |
18 | SEARXNG_URL=http://searxng:8080/search
19 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Ignore Visual Studio temporary files, build results, and
2 | ## files generated by popular Visual Studio add-ons.
3 | ##
4 | ## Get latest from https://github.com/github/gitignore/blob/main/VisualStudio.gitignore
5 |
6 | # User-specific files
7 | *.rsuser
8 | *.suo
9 | *.user
10 | *.userosscache
11 | *.sln.docstates
12 |
13 | # User-specific files (MonoDevelop/Xamarin Studio)
14 | *.userprefs
15 |
16 | # Mono auto generated files
17 | mono_crash.*
18 |
19 | # Build results
20 | [Dd]ebug/
21 | [Dd]ebugPublic/
22 | [Rr]elease/
23 | [Rr]eleases/
24 | x64/
25 | x86/
26 | [Ww][Ii][Nn]32/
27 | [Aa][Rr][Mm]/
28 | [Aa][Rr][Mm]64/
29 | bld/
30 | [Bb]in/
31 | [Oo]bj/
32 | [Ll]og/
33 | [Ll]ogs/
34 |
35 | # Visual Studio 2015/2017 cache/options directory
36 | .vs/
37 | # Uncomment if you have tasks that create the project's static files in wwwroot
38 | #wwwroot/
39 |
40 | # Visual Studio 2017 auto generated files
41 | Generated\ Files/
42 |
43 | # MSTest test Results
44 | [Tt]est[Rr]esult*/
45 | [Bb]uild[Ll]og.*
46 |
47 | # NUnit
48 | *.VisualState.xml
49 | TestResult.xml
50 | nunit-*.xml
51 |
52 | # Build Results of an ATL Project
53 | [Dd]ebugPS/
54 | [Rr]eleasePS/
55 | dlldata.c
56 |
57 | # Benchmark Results
58 | BenchmarkDotNet.Artifacts/
59 |
60 | # .NET Core
61 | project.lock.json
62 | project.fragment.lock.json
63 | artifacts/
64 |
65 | # ASP.NET Scaffolding
66 | ScaffoldingReadMe.txt
67 |
68 | # StyleCop
69 | StyleCopReport.xml
70 |
71 | # Files built by Visual Studio
72 | *_i.c
73 | *_p.c
74 | *_h.h
75 | *.ilk
76 | *.meta
77 | *.obj
78 | *.iobj
79 | *.pch
80 | *.pdb
81 | *.ipdb
82 | *.pgc
83 | *.pgd
84 | *.rsp
85 | *.sbr
86 | *.tlb
87 | *.tli
88 | *.tlh
89 | *.tmp
90 | *.tmp_proj
91 | *_wpftmp.csproj
92 | *.log
93 | *.tlog
94 | *.vspscc
95 | *.vssscc
96 | .builds
97 | *.pidb
98 | *.svclog
99 | *.scc
100 |
101 | # Chutzpah Test files
102 | _Chutzpah*
103 |
104 | # Visual C++ cache files
105 | ipch/
106 | *.aps
107 | *.ncb
108 | *.opendb
109 | *.opensdf
110 | *.sdf
111 | *.cachefile
112 | *.VC.db
113 | *.VC.VC.opendb
114 |
115 | # Visual Studio profiler
116 | *.psess
117 | *.vsp
118 | *.vspx
119 | *.sap
120 |
121 | # Visual Studio Trace Files
122 | *.e2e
123 |
124 | # TFS 2012 Local Workspace
125 | $tf/
126 |
127 | # Guidance Automation Toolkit
128 | *.gpState
129 |
130 | # ReSharper is a .NET coding add-in
131 | _ReSharper*/
132 | *.[Rr]e[Ss]harper
133 | *.DotSettings.user
134 |
135 | # TeamCity is a build add-in
136 | _TeamCity*
137 |
138 | # DotCover is a Code Coverage Tool
139 | *.dotCover
140 |
141 | # AxoCover is a Code Coverage Tool
142 | .axoCover/*
143 | !.axoCover/settings.json
144 |
145 | # Coverlet is a free, cross platform Code Coverage Tool
146 | coverage*.json
147 | coverage*.xml
148 | coverage*.info
149 |
150 | # Visual Studio code coverage results
151 | *.coverage
152 | *.coveragexml
153 |
154 | # NCrunch
155 | _NCrunch_*
156 | .*crunch*.local.xml
157 | nCrunchTemp_*
158 |
159 | # MightyMoose
160 | *.mm.*
161 | AutoTest.Net/
162 |
163 | # Web workbench (sass)
164 | .sass-cache/
165 |
166 | # Installshield output folder
167 | [Ee]xpress/
168 |
169 | # DocProject is a documentation generator add-in
170 | DocProject/buildhelp/
171 | DocProject/Help/*.HxT
172 | DocProject/Help/*.HxC
173 | DocProject/Help/*.hhc
174 | DocProject/Help/*.hhk
175 | DocProject/Help/*.hhp
176 | DocProject/Help/Html2
177 | DocProject/Help/html
178 |
179 | # Click-Once directory
180 | publish/
181 |
182 | # Publish Web Output
183 | *.[Pp]ublish.xml
184 | *.azurePubxml
185 | # Note: Comment the next line if you want to checkin your web deploy settings,
186 | # but database connection strings (with potential passwords) will be unencrypted
187 | *.pubxml
188 | *.publishproj
189 |
190 | # Microsoft Azure Web App publish settings. Comment the next line if you want to
191 | # checkin your Azure Web App publish settings, but sensitive information contained
192 | # in these scripts will be unencrypted
193 | PublishScripts/
194 |
195 | # NuGet Packages
196 | *.nupkg
197 | # NuGet Symbol Packages
198 | *.snupkg
199 | # The packages folder can be ignored because of Package Restore
200 | **/[Pp]ackages/*
201 | # except build/, which is used as an MSBuild target.
202 | !**/[Pp]ackages/build/
203 | # Uncomment if necessary however generally it will be regenerated when needed
204 | #!**/[Pp]ackages/repositories.config
205 | # NuGet v3's project.json files produces more ignorable files
206 | *.nuget.props
207 | *.nuget.targets
208 |
209 | # Microsoft Azure Build Output
210 | csx/
211 | *.build.csdef
212 |
213 | # Microsoft Azure Emulator
214 | ecf/
215 | rcf/
216 |
217 | # Windows Store app package directories and files
218 | AppPackages/
219 | BundleArtifacts/
220 | Package.StoreAssociation.xml
221 | _pkginfo.txt
222 | *.appx
223 | *.appxbundle
224 | *.appxupload
225 |
226 | # Visual Studio cache files
227 | # files ending in .cache can be ignored
228 | *.[Cc]ache
229 | # but keep track of directories ending in .cache
230 | !?*.[Cc]ache/
231 |
232 | # Others
233 | ClientBin/
234 | ~$*
235 | *~
236 | *.dbmdl
237 | *.dbproj.schemaview
238 | *.jfm
239 | *.pfx
240 | *.publishsettings
241 | orleans.codegen.cs
242 |
243 | # Including strong name files can present a security risk
244 | # (https://github.com/github/gitignore/pull/2483#issue-259490424)
245 | #*.snk
246 |
247 | # Since there are multiple workflows, uncomment next line to ignore bower_components
248 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
249 | #bower_components/
250 |
251 | # RIA/Silverlight projects
252 | Generated_Code/
253 |
254 | # Backup & report files from converting an old project file
255 | # to a newer Visual Studio version. Backup files are not needed,
256 | # because we have git ;-)
257 | _UpgradeReport_Files/
258 | Backup*/
259 | UpgradeLog*.XML
260 | UpgradeLog*.htm
261 | ServiceFabricBackup/
262 | *.rptproj.bak
263 |
264 | # SQL Server files
265 | *.mdf
266 | *.ldf
267 | *.ndf
268 |
269 | # Business Intelligence projects
270 | *.rdl.data
271 | *.bim.layout
272 | *.bim_*.settings
273 | *.rptproj.rsuser
274 | *- [Bb]ackup.rdl
275 | *- [Bb]ackup ([0-9]).rdl
276 | *- [Bb]ackup ([0-9][0-9]).rdl
277 |
278 | # Microsoft Fakes
279 | FakesAssemblies/
280 |
281 | # GhostDoc plugin setting file
282 | *.GhostDoc.xml
283 |
284 | # Node.js Tools for Visual Studio
285 | .ntvs_analysis.dat
286 | node_modules/
287 |
288 | # Visual Studio 6 build log
289 | *.plg
290 |
291 | # Visual Studio 6 workspace options file
292 | *.opt
293 |
294 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
295 | *.vbw
296 |
297 | # Visual Studio 6 auto-generated project file (contains which files were open etc.)
298 | *.vbp
299 |
300 | # Visual Studio 6 workspace and project file (working project files containing files to include in project)
301 | *.dsw
302 | *.dsp
303 |
304 | # Visual Studio 6 technical files
305 | *.ncb
306 | *.aps
307 |
308 | # Visual Studio LightSwitch build output
309 | **/*.HTMLClient/GeneratedArtifacts
310 | **/*.DesktopClient/GeneratedArtifacts
311 | **/*.DesktopClient/ModelManifest.xml
312 | **/*.Server/GeneratedArtifacts
313 | **/*.Server/ModelManifest.xml
314 | _Pvt_Extensions
315 |
316 | # Paket dependency manager
317 | .paket/paket.exe
318 | paket-files/
319 |
320 | # FAKE - F# Make
321 | .fake/
322 |
323 | # CodeRush personal settings
324 | .cr/personal
325 |
326 | # Python Tools for Visual Studio (PTVS)
327 | __pycache__/
328 | *.pyc
329 |
330 | # Cake - Uncomment if you are using it
331 | # tools/**
332 | # !tools/packages.config
333 |
334 | # Tabs Studio
335 | *.tss
336 |
337 | # Telerik's JustMock configuration file
338 | *.jmconfig
339 |
340 | # BizTalk build output
341 | *.btp.cs
342 | *.btm.cs
343 | *.odx.cs
344 | *.xsd.cs
345 |
346 | # OpenCover UI analysis results
347 | OpenCover/
348 |
349 | # Azure Stream Analytics local run output
350 | ASALocalRun/
351 |
352 | # MSBuild Binary and Structured Log
353 | *.binlog
354 |
355 | # NVidia Nsight GPU debugger configuration file
356 | *.nvuser
357 |
358 | # MFractors (Xamarin productivity tool) working folder
359 | .mfractor/
360 |
361 | # Local History for Visual Studio
362 | .localhistory/
363 |
364 | # Visual Studio History (VSHistory) files
365 | .vshistory/
366 |
367 | # BeatPulse healthcheck temp database
368 | healthchecksdb
369 |
370 | # Backup folder for Package Reference Convert tool in Visual Studio 2017
371 | MigrationBackup/
372 |
373 | # Ionide (cross platform F# VS Code tools) working folder
374 | .ionide/
375 |
376 | # Fody - auto-generated XML schema
377 | FodyWeavers.xsd
378 |
379 | # VS Code files for those working on multiple tools
380 | .vscode/*
381 | !.vscode/settings.json
382 | !.vscode/tasks.json
383 | !.vscode/launch.json
384 | !.vscode/extensions.json
385 | *.code-workspace
386 |
387 | # Local History for Visual Studio Code
388 | .history/
389 |
390 | # Windows Installer files from build outputs
391 | *.cab
392 | *.msi
393 | *.msix
394 | *.msm
395 | *.msp
396 |
397 | # JetBrains Rider
398 | *.sln.iml
399 |
400 |
401 | # Byte-compiled / optimized / DLL files
402 | __pycache__/
403 | *.py[cod]
404 | *$py.class
405 |
406 | # C extensions
407 | *.so
408 |
409 | # Distribution / packaging
410 | .Python
411 | build/
412 | develop-eggs/
413 | dist/
414 | downloads/
415 | eggs/
416 | .eggs/
417 | lib/
418 | lib64/
419 | parts/
420 | sdist/
421 | var/
422 | wheels/
423 | share/python-wheels/
424 | *.egg-info/
425 | .installed.cfg
426 | *.egg
427 | MANIFEST
428 |
429 | # PyInstaller
430 | # Usually these files are written by a python script from a template
431 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
432 | *.manifest
433 | *.spec
434 |
435 | # Installer logs
436 | pip-log.txt
437 | pip-delete-this-directory.txt
438 |
439 | # Unit test / coverage reports
440 | htmlcov/
441 | .tox/
442 | .nox/
443 | .coverage
444 | .coverage.*
445 | .cache
446 | nosetests.xml
447 | coverage.xml
448 | *.cover
449 | *.py,cover
450 | .hypothesis/
451 | .pytest_cache/
452 | cover/
453 |
454 | # Translations
455 | *.mo
456 | *.pot
457 |
458 | # Django stuff:
459 | *.log
460 | local_settings.py
461 | db.sqlite3
462 | db.sqlite3-journal
463 |
464 | # Flask stuff:
465 | instance/
466 | .webassets-cache
467 |
468 | # Scrapy stuff:
469 | .scrapy
470 |
471 | # Sphinx documentation
472 | docs/_build/
473 |
474 | # PyBuilder
475 | .pybuilder/
476 | target/
477 |
478 | # Jupyter Notebook
479 | .ipynb_checkpoints
480 |
481 | # IPython
482 | profile_default/
483 | ipython_config.py
484 |
485 | # pyenv
486 | # For a library or package, you might want to ignore these files since the code is
487 | # intended to run in multiple environments; otherwise, check them in:
488 | # .python-version
489 |
490 | # pipenv
491 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
492 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
493 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
494 | # install all needed dependencies.
495 | #Pipfile.lock
496 |
497 | # poetry
498 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
499 | # This is especially recommended for binary packages to ensure reproducibility, and is more
500 | # commonly ignored for libraries.
501 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
502 | #poetry.lock
503 |
504 | # pdm
505 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
506 | #pdm.lock
507 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
508 | # in version control.
509 | # https://pdm.fming.dev/#use-with-ide
510 | .pdm.toml
511 |
512 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
513 | __pypackages__/
514 |
515 | # Celery stuff
516 | celerybeat-schedule
517 | celerybeat.pid
518 |
519 | # SageMath parsed files
520 | *.sage.py
521 |
522 | # Environments
523 | .env
524 | .venv
525 | env/
526 | venv/
527 | ENV/
528 | env.bak/
529 | venv.bak/
530 |
531 | # Spyder project settings
532 | .spyderproject
533 | .spyproject
534 |
535 | # Rope project settings
536 | .ropeproject
537 |
538 | # mkdocs documentation
539 | /site
540 |
541 | # mypy
542 | .mypy_cache/
543 | .dmypy.json
544 | dmypy.json
545 |
546 | # Pyre type checker
547 | .pyre/
548 |
549 | # pytype static type analyzer
550 | .pytype/
551 |
552 | # Cython debug symbols
553 | cython_debug/
554 |
555 | # PyCharm
556 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
557 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
558 | # and can be added to the global gitignore or merged into this file. For a more nuclear
559 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
560 | #.idea/
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "babyagi"]
2 | path = babyagi
3 | url = https://github.com/webgrip/babyagi
4 |
--------------------------------------------------------------------------------
/Brazen/Brazen.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | net7.0
5 | enable
6 | enable
7 | 9f9706b5-eb2e-4f57-9d30-633ee8a213da
8 | Linux
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/Brazen/Dockerfile:
--------------------------------------------------------------------------------
1 | #See https://aka.ms/customizecontainer to learn how to customize your debug container and how Visual Studio uses this Dockerfile to build your images for faster debugging.
2 |
3 | FROM mcr.microsoft.com/dotnet/aspnet:7.0 AS base
4 | WORKDIR /app
5 | EXPOSE 80
6 | EXPOSE 443
7 |
8 | FROM mcr.microsoft.com/dotnet/sdk:7.0 AS build
9 | WORKDIR /src
10 | COPY ["Brazen/Brazen.csproj", "Brazen/"]
11 | RUN dotnet restore "Brazen/Brazen.csproj"
12 | COPY . .
13 | WORKDIR "/src/Brazen"
14 | RUN dotnet build "Brazen.csproj" -c Release -o /app/build
15 |
16 | FROM build AS publish
17 | RUN dotnet publish "Brazen.csproj" -c Release -o /app/publish /p:UseAppHost=false
18 |
19 | FROM base AS final
20 | WORKDIR /app
21 | COPY --from=publish /app/publish .
22 | ENTRYPOINT ["dotnet", "Brazen.dll"]
--------------------------------------------------------------------------------
/Brazen/Program.cs:
--------------------------------------------------------------------------------
1 | namespace WebGrip.Putty.Brazen
2 | {
3 | public class Program
4 | {
5 | public static void Main(string[] args)
6 | {
7 | CreateHostBuilder(args).Build().Run();
8 | }
9 |
10 | public static IHostBuilder CreateHostBuilder(string[] args) =>
11 | Host.CreateDefaultBuilder(args)
12 | .ConfigureWebHostDefaults(webBuilder =>
13 | {
14 | webBuilder.UseStartup();
15 | });
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/Brazen/Properties/launchSettings.json:
--------------------------------------------------------------------------------
1 | {
2 | "profiles": {
3 | "http": {
4 | "commandName": "Project",
5 | "environmentVariables": {
6 | "ASPNETCORE_ENVIRONMENT": "Development"
7 | },
8 | "dotnetRunMessages": true,
9 | "applicationUrl": "http://localhost:5056"
10 | },
11 | "https": {
12 | "commandName": "Project",
13 | "environmentVariables": {
14 | "ASPNETCORE_ENVIRONMENT": "Development"
15 | },
16 | "dotnetRunMessages": true,
17 | "applicationUrl": "https://localhost:7082;http://localhost:5056"
18 | },
19 | "Docker": {
20 | "commandName": "Docker",
21 | "launchUrl": "{Scheme}://{ServiceHost}:{ServicePort}",
22 | "publishAllPorts": true
23 | },
24 | "WSL": {
25 | "commandName": "WSL2",
26 | "launchUrl": "https://localhost:7082",
27 | "environmentVariables": {
28 | "ASPNETCORE_ENVIRONMENT": "Development",
29 | "ASPNETCORE_URLS": "https://localhost:7082;http://localhost:5056"
30 | },
31 | "distributionName": ""
32 | }
33 | }
34 | }
--------------------------------------------------------------------------------
/Brazen/Services/ChatService.cs:
--------------------------------------------------------------------------------
1 | using Grpc.Core;
2 | using Standard.AI.OpenAI.Clients.OpenAIs;
3 | using Standard.AI.OpenAI.Models.Configurations;
4 | using Standard.AI.OpenAI.Models.Services.Foundations.ChatCompletions;
5 | using WebGrip.Protos;
6 |
7 | namespace WebGrip.Putty.Brazen.Services
8 | {
9 | public class ChatService : Protos.ChatService.ChatServiceBase
10 | {
11 | private readonly ILogger _logger;
12 |
13 | public ChatService(ILogger logger)
14 | {
15 | _logger = logger;
16 | }
17 |
18 | public override async Task AskQuestion(QuestionRequest request, ServerCallContext context)
19 | {
20 | _logger.LogDebug(request.ToString());
21 |
22 |
23 | var response = new QuestionResponse();
24 |
25 | try
26 | {
27 | // do request
28 |
29 | response.Status = "1";
30 | response.Message = "result";
31 | }
32 | catch (Exception ex)
33 | {
34 | response.Status = "2";
35 |
36 | _logger.LogError(ex.ToString());
37 | _logger.LogError($"Error doing request for question: {ex.Message}");
38 |
39 | var openAIConfigurations = new OpenAIConfigurations
40 | {
41 | ApiKey = "YOUR_API_KEY_HERE", // add config
42 | OrganizationId = "YOUR_OPTIONAL_ORG_ID_HERE" //optional
43 | };
44 |
45 | var openAIClient = new OpenAIClient(openAIConfigurations);
46 |
47 | var chatCompletion = new ChatCompletion
48 | {
49 | Request = new ChatCompletionRequest
50 | {
51 | Model = "gpt-3.5-turbo",
52 | Messages = new ChatCompletionMessage[]
53 | {
54 | new ChatCompletionMessage
55 | {
56 |
57 | Content = "What is c#?",
58 | Role = "user",
59 | }
60 | },
61 | }
62 | };
63 |
64 | ChatCompletion resultChatCompletion = await openAIClient.ChatCompletions.SendChatCompletionAsync(chatCompletion);
65 |
66 | Array.ForEach(
67 | resultChatCompletion.Response.Choices,
68 | choice =>
69 | Console.WriteLine(value: $"{choice.Message.Role}: {choice.Message.Content}")
70 | );
71 |
72 | //var errorMessages = new Dictionary
73 | //{
74 | // { GraphErrorCode.InvalidRequest.ToString(), "Invalid request. Please check the provided user data." },
75 | // { GraphErrorCode.AuthenticationFailure.ToString(), "Authentication failed. Check the credentials and required scopes." },
76 | // { GraphErrorCode.GeneralException.ToString(), "A network error or service outage occurred. Please try again later." },
77 | // { GraphErrorCode.ServiceNotAvailable.ToString(), "A network error or service outage occurred. Please try again later." }
78 | //};
79 |
80 | //response.Message = errorMessages.TryGetValue(ex.ResponseStatusCode.ToString(), out var message) ? message : $"An unknown error occurred: {ex.Message}";
81 |
82 | response.Message = $"Unexpected error: {ex.Message}";
83 | }
84 |
85 | return response;
86 | }
87 | }
88 | }
--------------------------------------------------------------------------------
/Brazen/Startup.cs:
--------------------------------------------------------------------------------
1 | using Microsoft.OpenApi.Models;
2 | using Microsoft.IdentityModel.Logging;
3 | using Microsoft.Identity.Web;
4 | using Microsoft.AspNetCore.Authorization;
5 |
6 | namespace WebGrip.Putty.Brazen
7 | {
8 | public class Startup
9 | {
10 | public IConfiguration Configuration { get; }
11 |
12 | public Startup(IConfiguration configuration)
13 | {
14 | Configuration = configuration;
15 | }
16 |
17 | public void ConfigureServices(IServiceCollection services)
18 | {
19 | services.AddOptions();
20 |
21 | //var instance = Configuration["AzureAd:Instance"];
22 | //var tenantId = Configuration["AzureAd:TenantId"];
23 | //var clientId = Configuration["AzureAd:ClientId"];
24 | //var clientSecret = Configuration["AzureAd:ClientSecret"];
25 |
26 | services.Configure(options => // TODO look into this more, what can we put in here?
27 | {
28 | // This lambda determines whether user consent for non-essential cookies is needed for a given request.
29 | options.CheckConsentNeeded = context => true;
30 | options.MinimumSameSitePolicy = SameSiteMode.Unspecified;
31 | // Handling SameSite cookie according to https://docs.microsoft.com/en-us/aspnet/core/security/samesite?view=aspnetcore-3.1
32 | options.HandleSameSiteCookieCompatibility();
33 | });
34 |
35 |
36 | ConfigureSwagger(services);
37 | }
38 |
39 |
40 | private void ConfigureSwagger(IServiceCollection services)
41 | {
42 | Dictionary scopes = new Dictionary // TODO this needs to go into appsettings.json
43 | {
44 | //{ "https://graph.microsoft.com/.default", "Graph" },
45 | { "User.Read", "Reading user" }
46 | };
47 |
48 |
49 | services.AddGrpc().AddJsonTranscoding();
50 | services.AddGrpcSwagger();
51 |
52 | services.AddSwaggerGen(c =>
53 | {
54 | c.SwaggerDoc(
55 | "v1",
56 | new OpenApiInfo
57 | {
58 | Title = "gRPC transcoding",
59 | Version = "v1"
60 | }
61 | );
62 | });
63 | }
64 |
65 | public void Configure(IApplicationBuilder app, IWebHostEnvironment env)
66 | {
67 | if (env.IsDevelopment())
68 | {
69 | IdentityModelEventSource.ShowPII = true;
70 | IdentityModelEventSource.HeaderWritten = true;
71 | app.UseDeveloperExceptionPage();
72 | }
73 |
74 | app.UseHttpsRedirection();
75 | app.UseRouting();
76 |
77 | app.UseAuthentication();
78 | app.UseAuthorization();
79 |
80 | app.UseSwagger();
81 |
82 | app.UseSwaggerUI(c => {
83 | c.OAuthClientId(Configuration["AzureAd:ClientId"]);
84 | c.SwaggerEndpoint("/swagger/v1/swagger.json", "My API V1");
85 | });
86 |
87 |
88 | app.UseEndpoints(endpoints =>
89 | {
90 | endpoints.MapGrpcService();
91 | endpoints.MapGet("/", async context =>
92 | {
93 | await context.Response.WriteAsync("Communication with gRPC endpoints must be made through a gRPC client. To learn how to create a client, visit: https://go.microsoft.com/fwlink/?linkid=2086909");
94 | });
95 | });
96 | }
97 | public class HasScopeRequirement : IAuthorizationRequirement
98 | {
99 | public string Scope { get; }
100 | public string Issuer { get; }
101 |
102 | public HasScopeRequirement(string scope, string issuer)
103 | {
104 | Scope = scope;
105 | Issuer = issuer;
106 | }
107 | }
108 | }
109 | }
110 |
--------------------------------------------------------------------------------
/Brazen/appsettings.Development.json:
--------------------------------------------------------------------------------
1 | {
2 | "Logging": {
3 | "LogLevel": {
4 | "Default": "Information",
5 | "Microsoft.AspNetCore": "Warning"
6 | }
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/Brazen/appsettings.json:
--------------------------------------------------------------------------------
1 | {
2 | "Logging": {
3 | "LogLevel": {
4 | "Default": "Information",
5 | "Microsoft.AspNetCore": "Warning"
6 | }
7 | },
8 | "AllowedHosts": "*",
9 | "Kestrel": {
10 | "EndpointDefaults": {
11 | "Protocols": "Http2"
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering a professional and respectful environment, we as contributors and maintainers pledge to make participation in our project and our community a positive experience for everyone, regardless of their background or level of experience.
6 |
7 | ## Our Standards
8 |
9 | Examples of behavior that contributes to creating a positive environment include:
10 |
11 | * Using professional and respectful language
12 | * Being open to differing viewpoints and experiences
13 | * Accepting constructive criticism
14 | * Focusing on what is best for the project and the community
15 | * Respecting the time and effort of community members
16 |
17 | Examples of unacceptable behavior by participants include:
18 |
19 | * Insulting, derogatory comments, or personal attacks
20 | * Public or private harassment
21 | * Publishing others' private information, such as physical or electronic addresses, without explicit permission
22 | * Other conduct that could reasonably be considered inappropriate in a professional setting
23 |
24 | ## Our Responsibilities
25 |
26 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
27 |
28 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
29 |
30 | ## Scope
31 |
32 | This Code of Conduct applies within all project spaces, and it also applies when an individual is representing the project or its community in public spaces. Representation of a project may be further defined and clarified by project maintainers.
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | CONTRIBUTING.md
2 | ===============
3 |
4 | First of all, thank you for your interest in contributing to PuttyGPT! We appreciate your time and effort to help make this project better. ??
5 |
6 | Please note that this is a **very early version** of the project, and we are still in the process of wrapping things up, wiring components together, and experimenting with new ideas. As such, the project may undergo significant changes and updates as we continue to evolve and refine our vision.
7 |
8 | With that said, we still welcome contributions in various forms. Please follow these guidelines to ensure a smooth and effective contribution process.
9 |
10 | ?? How to Contribute
11 | -------------------
12 |
13 | 1. **Fork the repository**: Start by forking the [PuttyGPT repository](https://github.com/yourusername/puttygpt) and creating a local clone on your computer.
14 | 2. **Create a branch**: Create a new branch based on the `main` branch. Use a descriptive name for your branch, such as `feature/your-feature-name` or `fix/your-fix-name`.
15 | 3. **Commit your changes**: Make your changes or additions to the code, ensuring that your changes adhere to the project's code style and guidelines. Commit your changes with a clear and concise commit message.
16 | 4. **Test your changes**: Before submitting your contribution, ensure that your changes pass all tests and do not introduce any new bugs or issues.
17 | 5. **Create a pull request**: Once your changes are ready, push your branch to your forked repository and create a pull request against the `main` branch of the PuttyGPT repository. In the pull request, provide a detailed description of your changes and the reasoning behind them.
18 | 6. **Wait for a review**: Maintainers will review your pull request, and they may request changes or provide feedback. Please be patient and responsive to their comments.
19 |
20 | ?? Important Notice
21 | -------------------
22 |
23 | While we appreciate all contributions, please keep in mind that we are in the early stages of the project, and significant changes may occur. Therefore, please understand that your contribution might not be merged immediately, or it might be affected by subsequent updates to the project. We still encourage you to contribute, as your ideas and expertise can help shape the future of PuttyGPT.
24 |
25 | ?? Code of Conduct
26 | -----------------
27 |
28 | As a contributor, you are expected to uphold our [Code of Conduct](./CODE_OF_CONDUCT.md). Please be respectful, inclusive, and professional in all interactions with the project and its community.
29 |
30 | ?? Questions or Concerns
31 | -----------------------
32 |
33 | If you have any questions or concerns, feel free to reach out to the project maintainers or open an issue on the repository.
34 |
35 | Again, thank you for your interest in contributing to PuttyGPT! Your efforts can help make this project even more successful. ??
36 |
37 |
--------------------------------------------------------------------------------
/ETHICS.md:
--------------------------------------------------------------------------------
1 | # Ethics
2 |
3 | This project aims to adhere to ethical principles in its development and usage. As contributors and maintainers, we strive to ensure that the project:
4 |
5 | 1. Respects user privacy and does not collect personal information without explicit consent.
6 | 2. Does not promote or facilitate illegal activities.
7 | 3. Aims to minimize any potential harm or negative impact on individuals, communities, or the environment.
8 | 4. Encourages responsible use and application of the technology.
9 |
10 | Users and contributors are expected to use the project in a manner that aligns with these ethical principles. The project maintainers reserve the right to take appropriate action in response to any unethical use of the project or its components.
--------------------------------------------------------------------------------
/Eve/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:alpine3.17 AS builder
2 |
3 | RUN apk update && \
4 | apk upgrade
5 |
6 | RUN apk add --no-cache --virtual .build-dependencies python3 py3-pip build-base gcc musl-dev python3-dev openblas-dev libffi-dev openssl-dev g++ gfortran freetype-dev pkgconfig dumb-init musl libc6-compat linux-headers build-base bash git ca-certificates freetype libgfortran libgcc libstdc++ openblas tcl tk
7 | RUN apk add --virtual build-runtime openssh git
8 | RUN ln -s /usr/include/locale.h /usr/include/xlocale.h
9 | RUN pip3 install --upgrade pip setuptools
10 | RUN ln -sf /usr/bin/python3 /usr/bin/python
11 | RUN ln -sf pip3 /usr/bin/pip
12 | RUN rm -r /root/.cache
13 | RUN rm -rf /var/cache/apk/*
14 |
15 | ENV VIRTUAL_ENV=/opt/venv
16 | RUN python3 -m venv $VIRTUAL_ENV
17 | ENV PATH="$VIRTUAL_ENV/bin:$PATH"
18 |
19 | COPY Eve/requirements.txt .
20 |
21 | RUN pip3 install --upgrade pip
22 | RUN pip3 install --no-cache-dir -r requirements.txt
23 |
24 | FROM builder AS final
25 |
26 | COPY --from=builder /opt/venv /opt/venv
27 | COPY --from=builder /usr/include/xlocale.h /usr/include/xlocale.h
28 | ENV PATH=/opt/venv/bin:$PATH
29 |
30 | WORKDIR /app
31 |
32 | COPY ./Eve ./app
33 |
34 | #CMD [ "/venv/bin/python", "main.py" ]
35 | ENTRYPOINT ["tail", "-f", "/dev/null"]
--------------------------------------------------------------------------------
/Eve/Eve.pyproj:
--------------------------------------------------------------------------------
1 |
2 |
3 | Debug
4 | 2.0
5 | 7b277655-816a-4454-bec6-d0cc75996cf6
6 | .
7 | Eve.py
8 |
9 |
10 | .
11 | .
12 | Eve
13 | Eve
14 | MSBuild|env|$(MSBuildProjectFullPath)
15 | True
16 |
17 |
18 | true
19 | false
20 |
21 |
22 | true
23 | false
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 | env1
142 | 3.9
143 | env1 (Python 3.9 (64-bit))
144 | Scripts\python.exe
145 | Scripts\pythonw.exe
146 | PYTHONPATH
147 | X64
148 |
149 |
150 | env
151 | 3.11
152 | env (Python 3.11)
153 | Scripts\python.exe
154 | Scripts\pythonw.exe
155 | PYTHONPATH
156 | X64
157 |
158 |
159 |
160 |
161 |
162 |
163 |
166 |
167 |
168 |
169 |
170 |
171 |
--------------------------------------------------------------------------------
/Eve/agents/AutoGPT.py:
--------------------------------------------------------------------------------
1 | # General
2 | import pandas as pd
3 | from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT
4 | from langchain.chat_models import ChatOpenAI
5 |
6 | from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent
7 | from langchain.docstore.document import Document
8 | from langchain.chains import RetrievalQA
9 | import asyncio
10 | import json
11 | from duckduckgo_search import ddg
12 |
13 |
14 | llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=1.0)
15 |
16 |
17 | # Tools
18 | from typing import Optional
19 | from langchain.agents import tool
20 | from langchain.tools.file_management.read import ReadFileTool
21 | from langchain.tools.file_management.write import WriteFileTool
22 |
23 | @tool
24 | def process_csv(csv_file_path: str, instructions: str, output_path: Optional[str] = None) -> str:
25 | """Process a CSV by with pandas in a limited REPL. Only use this after writing data to disk as a csv file. Any figures must be saved to disk to be viewed by the human. Instructions should be written in natural language, not code. Assume the dataframe is already loaded."""
26 | try:
27 | df = pd.read_csv(csv_file_path)
28 | except Exception as e:
29 | return f"Error: {e}"
30 | agent = create_pandas_dataframe_agent(llm, df, max_iterations=30, verbose=True)
31 | if output_path is not None:
32 | instructions += f" Save output to disk at {output_path}"
33 | try:
34 | return agent.run(instructions)
35 | except Exception as e:
36 | return f"Error: {e}"
37 |
38 |
39 |
40 |
41 |
42 | @tool
43 | def web_search(query: str, num_results: int = 8) -> str:
44 | """Useful for general internet search queries."""
45 | search_results = []
46 | if not query:
47 | return json.dumps(search_results)
48 |
49 | results = ddg(query, max_results=num_results)
50 | if not results:
51 | return json.dumps(search_results)
52 |
53 | for j in results:
54 | search_results.append(j)
55 |
56 | return json.dumps(search_results, ensure_ascii=False, indent=4)
57 |
58 |
59 | #async def async_load_playwright(url: str) -> str:
60 | # """Load the specified URLs using Playwright and parse using BeautifulSoup."""
61 | # from bs4 import BeautifulSoup
62 | # from playwright.async_api import async_playwright
63 |
64 | # results = ""
65 | # async with async_playwright() as p:
66 | # browser = await p.chromium.launch(headless=True)
67 | # try:
68 | # page = await browser.new_page()
69 | # await page.goto(url)
70 |
71 | # page_source = await page.content()
72 | # soup = BeautifulSoup(page_source, "html.parser")
73 |
74 | # for script in soup(["script", "style"]):
75 | # script.extract()
76 |
77 | # text = soup.get_text()
78 | # lines = (line.strip() for line in text.splitlines())
79 | # chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
80 | # results = "\n".join(chunk for chunk in chunks if chunk)
81 | # except Exception as e:
82 | # results = f"Error: {e}"
83 | # await browser.close()
84 | # return results
85 |
86 | def run_async(coro):
87 | event_loop = asyncio.get_event_loop()
88 | return event_loop.run_until_complete(coro)
89 |
90 | #@tool
91 | #def browse_web_page(url: str) -> str:
92 | # """Verbose way to scrape a whole webpage. Likely to cause issues parsing."""
93 | # return run_async(async_load_playwright(url))
94 |
95 |
96 | from langchain.tools.base import BaseTool
97 | from langchain.text_splitter import RecursiveCharacterTextSplitter
98 |
99 | from langchain.document_loaders import WebBaseLoader
100 | from pydantic import Field
101 | from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain, BaseCombineDocumentsChain
102 |
103 | def _get_text_splitter():
104 | return RecursiveCharacterTextSplitter(
105 | # Set a really small chunk size, just to show.
106 | chunk_size = 500,
107 | chunk_overlap = 20,
108 | length_function = len,
109 | )
110 |
111 |
112 | class WebpageQATool(BaseTool):
113 | name = "query_webpage"
114 | description = "Browse a webpage and retrieve the information relevant to the question."
115 | text_splitter: RecursiveCharacterTextSplitter = Field(default_factory=_get_text_splitter)
116 | qa_chain: BaseCombineDocumentsChain
117 |
118 | def _run(self, url: str, question: str) -> str:
119 | """Useful for browsing websites and scraping the text information."""
120 | result = browse_web_page.run(url)
121 | docs = [Document(page_content=result, metadata={"source": url})]
122 | web_docs = self.text_splitter.split_documents(docs)
123 | results = []
124 | # TODO: Handle this with a MapReduceChain
125 | for i in range(0, len(web_docs), 4):
126 | input_docs = web_docs[i:i+4]
127 | window_result = self.qa_chain({"input_documents": input_docs, "question": question}, return_only_outputs=True)
128 | results.append(f"Response from window {i} - {window_result}")
129 | results_docs = [Document(page_content="\n".join(results), metadata={"source": url})]
130 | return self.qa_chain({"input_documents": results_docs, "question": question}, return_only_outputs=True)
131 |
132 | async def _arun(self, url: str, question: str) -> str:
133 | raise NotImplementedError
134 |
135 |
136 |
137 | from langchain.vectorstores import FAISS
138 | from langchain.docstore import InMemoryDocstore
139 | from langchain.embeddings import OpenAIEmbeddings
140 | from langchain.tools.human.tool import HumanInputRun
141 |
142 | embeddings_model = OpenAIEmbeddings()
143 | embedding_size = 1536
144 | index = faiss.IndexFlatL2(embedding_size)
145 | vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
146 |
147 |
148 | tools = [
149 | web_search,
150 | WriteFileTool(),
151 | ReadFileTool(),
152 | process_csv,
153 | query_website_tool,
154 | # HumanInputRun(), # Activate if you want the permit asking for help from the human
155 | ]
156 |
157 |
158 | agent = AutoGPT.from_llm_and_tools(
159 | ai_name="Tom",
160 | ai_role="Assistant",
161 | tools=tools,
162 | llm=llm,
163 | memory=vectorstore.as_retriever(search_kwargs={"k": 8}),
164 | # human_in_the_loop=True, # Set to True if you want to add feedback at each step.
165 | )
166 | # agent.chain.verbose = True
167 |
168 |
169 |
170 | agent.run(["What were the winning boston marathon times for the past 5 years? Generate a table of the names, countries of origin, and times."])
--------------------------------------------------------------------------------
/Eve/agents/AutonomousAgent.py:
--------------------------------------------------------------------------------
1 | import math
2 | from array import array
3 | from langchain.llms.base import BaseLLM
4 |
5 | from typing import Sequence
6 |
7 | from .GenerativeAgent import GenerativeAgent
8 |
9 | from langchain.vectorstores import Weaviate
10 | from langchain.retrievers import TimeWeightedVectorStoreRetriever
11 | from langchain.embeddings import OpenAIEmbeddings
12 | from langchain.schema import BaseRetriever
13 | from langchain import PromptTemplate
14 | from langchain.tools.base import BaseTool
15 | from langchain.agents import ZeroShotAgent
16 | from langchain.prompts import load_prompt
17 |
18 |
19 | import os
20 |
21 | import weaviate
22 |
23 |
24 | WEAVIATE_HOST = os.getenv("WEAVIATE_HOST", "")
25 | WEAVIATE_VECTORIZER = os.getenv("WEAVIATE_VECTORIZER", "")
26 |
27 | def relevance_score_fn(score: float) -> float:
28 | """Return a similarity score on a scale [0, 1]."""
29 | # This will differ depending on a few things:
30 | # - the distance / similarity metric used by the VectorStore
31 | # - the scale of your embeddings (OpenAI's are unit norm. Many others are not!)
32 | # This function converts the euclidean norm of normalized embeddings
33 | # (0 is most similar, sqrt(2) most dissimilar)
34 | # to a similarity function (0 to 1)
35 | return 1.0 - score / math.sqrt(2)
36 |
37 | def create_new_memory_retriever_default():
38 | """Create a new vector store retriever unique to the agent."""
39 |
40 | client = weaviate.Client(
41 | url=WEAVIATE_HOST,
42 | additional_headers={"X-OpenAI-Api-Key": os.getenv("OPENAI_API_KEY")},
43 | # auth_client_secret: Optional[AuthCredentials] = None,
44 | # timeout_config: Union[Tuple[Real, Real], Real] = (10, 60),
45 | # proxies: Union[dict, str, None] = None,
46 | # trust_env: bool = False,
47 | # additional_headers: Optional[dict] = None,
48 | # startup_period: Optional[int] = 5,
49 | # embedded_options=[],
50 | )
51 |
52 | embeddings_model = OpenAIEmbeddings(
53 | #deployment="your-embeddings-deployment-name",
54 | model="text-embedding-ada-002"
55 | )
56 |
57 | vectorstore = Weaviate(client, "Paragraph", "content", embedding=embeddings_model, relevance_score_fn=relevance_score_fn)
58 |
59 | return TimeWeightedVectorStoreRetriever(vectorstore=vectorstore, other_score_keys=["importance"], k=15)
60 |
61 | class AutonomousAgent():
62 |
63 | def make(
64 | self,
65 | name: str,
66 | age: int,
67 | traits: str,
68 | status: str,
69 | llm: BaseLLM,
70 | daily_summaries: array,
71 | reflection_threshold: int = 8,
72 | memory_retriever: BaseRetriever = create_new_memory_retriever_default(),
73 | verbose: bool = False
74 | )->GenerativeAgent:
75 |
76 | return GenerativeAgent( # TODO current_plan
77 | name=name,
78 | age=age,
79 | traits=traits,
80 | status=status,
81 | reflection_threshold=reflection_threshold,
82 | memory_retriever=memory_retriever,
83 | llm=llm,
84 | daily_summaries=daily_summaries,
85 | verbose=verbose,
86 | )
87 |
88 | def getPrompt(generativeAgent: GenerativeAgent, objective, operating_system, tool_names, tools_summary)->PromptTemplate:
89 |
90 | prompt = load_prompt("prompts/ryan.json")
91 | prompt.partial(agent_summary=generativeAgent.get_summary(True))
92 | prompt.format(
93 | task = objective,
94 | objective = objective,
95 | agent_name = "Ryan",
96 | operating_system = operating_system,
97 | tool_names = tool_names,
98 | tools_summary = tools_summary,
99 | agent_summary = generativeAgent.get_summary(True)
100 | )
101 |
102 | return prompt
103 |
104 | #return prompt.format(adjective="funny")
105 |
106 |
107 | #if input_variables is None:
108 | # input_variables = ["input", "agent_scratchpad"]
109 | #return PromptTemplate(template=template, input_variables=input_variables)
110 |
111 |
112 |
113 | #ZeroShotAgent.create_prompt(
114 | # tools=tools,
115 | # prefix=template,
116 | # suffix="",
117 | # input_variables=["objective", "task", "context", "agent_scratchpad"],
118 | #)
119 |
120 | #template="""You are {name}, an instance of an autonomous AGI agent, running on {operating_system}. This is a recent summary of you: {agent_summary}. You have been given a single task: {task}, based on the overarching objective: {objective}. The tools I can use are: {tools}. Think smart.\n{agent_scratchpad}""",
121 | # input_variables=["operating_system", "tools", "objective", "task", "agent_scratchpad"],
122 | # partial_variables={"agent_summary": generativeAgent.get_summary(), "agent_name": generativeAgent.name},
123 | # tools=tools,
124 | # # prefix="You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}.",
125 | # # suffix="Question: {task}",
126 |
127 |
128 |
129 | return prompt
130 |
131 |
132 |
--------------------------------------------------------------------------------
/Eve/chains/api/meteo/chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": true,
4 | "api_request_chain": {
5 | "memory": null,
6 | "verbose": false,
7 | "prompt": {
8 | "input_variables": [
9 | "api_docs",
10 | "question"
11 | ],
12 | "output_parser": null,
13 | "template": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url:",
14 | "template_format": "f-string",
15 | "_type": "prompt"
16 | },
17 | "llm": {
18 | "model_name": "text-davinci-003",
19 | "temperature": 0.0,
20 | "max_tokens": 256,
21 | "top_p": 1,
22 | "frequency_penalty": 0,
23 | "presence_penalty": 0,
24 | "n": 1,
25 | "best_of": 1,
26 | "request_timeout": null,
27 | "logit_bias": {},
28 | "_type": "openai"
29 | },
30 | "output_key": "text",
31 | "_type": "llm_chain"
32 | },
33 | "api_answer_chain": {
34 | "memory": null,
35 | "verbose": false,
36 | "prompt": {
37 | "input_variables": [
38 | "api_docs",
39 | "question",
40 | "api_url",
41 | "api_response"
42 | ],
43 | "output_parser": null,
44 | "template": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url: {api_url}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:",
45 | "template_format": "f-string",
46 | "_type": "prompt"
47 | },
48 | "llm": {
49 | "model_name": "text-davinci-003",
50 | "temperature": 0.0,
51 | "max_tokens": 256,
52 | "top_p": 1,
53 | "frequency_penalty": 0,
54 | "presence_penalty": 0,
55 | "n": 1,
56 | "best_of": 1,
57 | "request_timeout": null,
58 | "logit_bias": {},
59 | "_type": "openai"
60 | },
61 | "output_key": "text",
62 | "_type": "llm_chain"
63 | },
64 | "api_docs": "BASE URL: https://api.open-meteo.com/\n\nAPI Documentation\nThe API endpoint /v1/forecast accepts a geographical coordinate, a list of weather variables and responds with a JSON hourly weather forecast for 7 days. Time always starts at 0:00 today and contains 168 hours. All URL parameters are listed below:\n\nParameter\tFormat\tRequired\tDefault\tDescription\nlatitude, longitude\tFloating point\tYes\t\tGeographical WGS84 coordinate of the location\nhourly\tString array\tNo\t\tA list of weather variables which should be returned. Values can be comma separated, or multiple &hourly= parameter in the URL can be used.\ndaily\tString array\tNo\t\tA list of daily weather variable aggregations which should be returned. Values can be comma separated, or multiple &daily= parameter in the URL can be used. If daily weather variables are specified, parameter timezone is required.\ncurrent_weather\tBool\tNo\tfalse\tInclude current weather conditions in the JSON output.\ntemperature_unit\tString\tNo\tcelsius\tIf fahrenheit is set, all temperature values are converted to Fahrenheit.\nwindspeed_unit\tString\tNo\tkmh\tOther wind speed speed units: ms, mph and kn\nprecipitation_unit\tString\tNo\tmm\tOther precipitation amount units: inch\ntimeformat\tString\tNo\tiso8601\tIf format unixtime is selected, all time values are returned in UNIX epoch time in seconds. Please note that all timestamp are in GMT+0! For daily values with unix timestamps, please apply utc_offset_seconds again to get the correct date.\ntimezone\tString\tNo\tGMT\tIf timezone is set, all timestamps are returned as local-time and data is returned starting at 00:00 local-time. Any time zone name from the time zone database is supported. If auto is set as a time zone, the coordinates will be automatically resolved to the local time zone.\npast_days\tInteger (0-2)\tNo\t0\tIf past_days is set, yesterday or the day before yesterday data are also returned.\nstart_date\nend_date\tString (yyyy-mm-dd)\tNo\t\tThe time interval to get weather data. A day must be specified as an ISO8601 date (e.g. 2022-06-30).\nmodels\tString array\tNo\tauto\tManually select one or more weather models. Per default, the best suitable weather models will be combined.\n\nHourly Parameter Definition\nThe parameter &hourly= accepts the following values. Most weather variables are given as an instantaneous value for the indicated hour. Some variables like precipitation are calculated from the preceding hour as an average or sum.\n\nVariable\tValid time\tUnit\tDescription\ntemperature_2m\tInstant\t\u00b0C (\u00b0F)\tAir temperature at 2 meters above ground\nsnowfall\tPreceding hour sum\tcm (inch)\tSnowfall amount of the preceding hour in centimeters. For the water equivalent in millimeter, divide by 7. E.g. 7 cm snow = 10 mm precipitation water equivalent\nrain\tPreceding hour sum\tmm (inch)\tRain from large scale weather systems of the preceding hour in millimeter\nshowers\tPreceding hour sum\tmm (inch)\tShowers from convective precipitation in millimeters from the preceding hour\nweathercode\tInstant\tWMO code\tWeather condition as a numeric code. Follow WMO weather interpretation codes. See table below for details.\nsnow_depth\tInstant\tmeters\tSnow depth on the ground\nfreezinglevel_height\tInstant\tmeters\tAltitude above sea level of the 0\u00b0C level\nvisibility\tInstant\tmeters\tViewing distance in meters. Influenced by low clouds, humidity and aerosols. Maximum visibility is approximately 24 km.",
65 | "question_key": "question",
66 | "output_key": "output",
67 | "_type": "api_chain"
68 | }
--------------------------------------------------------------------------------
/Eve/chains/llm-bash/chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": true,
4 | "llm": {
5 | "model_name": "text-davinci-003",
6 | "temperature": 0.0,
7 | "max_tokens": 256,
8 | "top_p": 1,
9 | "frequency_penalty": 0,
10 | "presence_penalty": 0,
11 | "n": 1,
12 | "best_of": 1,
13 | "request_timeout": null,
14 | "logit_bias": {},
15 | "_type": "openai"
16 | },
17 | "input_key": "question",
18 | "output_key": "answer",
19 | "prompt": {
20 | "input_variables": [
21 | "question"
22 | ],
23 | "output_parser": null,
24 | "template": "If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put \"#!/bin/bash\" in your answer. Make sure to reason step by step, using this format:\n\nQuestion: \"copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'\"\n\nI need to take the following actions:\n- List all files in the directory\n- Create a new directory\n- Copy the files from the first directory into the second directory\n```bash\nls\nmkdir myNewDirectory\ncp -r target/* myNewDirectory\n```\n\nThat is the format. Begin!\n\nQuestion: {question}",
25 | "template_format": "f-string",
26 | "_type": "prompt"
27 | },
28 | "_type": "llm_bash_chain"
29 | }
--------------------------------------------------------------------------------
/Eve/chains/llm-checker/chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": true,
4 | "llm": {
5 | "model_name": "text-davinci-003",
6 | "temperature": 0.7,
7 | "max_tokens": 256,
8 | "top_p": 1,
9 | "frequency_penalty": 0,
10 | "presence_penalty": 0,
11 | "n": 1,
12 | "best_of": 1,
13 | "request_timeout": null,
14 | "logit_bias": {},
15 | "_type": "openai"
16 | },
17 | "create_draft_answer_prompt": {
18 | "input_variables": [
19 | "question"
20 | ],
21 | "output_parser": null,
22 | "template": "{question}\n\n",
23 | "template_format": "f-string",
24 | "_type": "prompt"
25 | },
26 | "list_assertions_prompt": {
27 | "input_variables": [
28 | "statement"
29 | ],
30 | "output_parser": null,
31 | "template": "Here is a statement:\n{statement}\nMake a bullet point list of the assumptions you made when producing the above statement.\n\n",
32 | "template_format": "f-string",
33 | "_type": "prompt"
34 | },
35 | "check_assertions_prompt": {
36 | "input_variables": [
37 | "assertions"
38 | ],
39 | "output_parser": null,
40 | "template": "Here is a bullet point list of assertions:\n{assertions}\nFor each assertion, determine whether it is true or false. If it is false, explain why.\n\n",
41 | "template_format": "f-string",
42 | "_type": "prompt"
43 | },
44 | "revised_answer_prompt": {
45 | "input_variables": [
46 | "checked_assertions",
47 | "question"
48 | ],
49 | "output_parser": null,
50 | "template": "{checked_assertions}\n\nQuestion: In light of the above assertions and checks, how would you answer the question '{question}'?\n\nAnswer:",
51 | "template_format": "f-string",
52 | "_type": "prompt"
53 | },
54 | "input_key": "query",
55 | "output_key": "result",
56 | "_type": "llm_checker_chain"
57 | }
--------------------------------------------------------------------------------
/Eve/chains/llm-math/chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": true,
4 | "llm": {
5 | "model_name": "text-davinci-003",
6 | "temperature": 0.0,
7 | "max_tokens": 256,
8 | "top_p": 1,
9 | "frequency_penalty": 0,
10 | "presence_penalty": 0,
11 | "n": 1,
12 | "best_of": 1,
13 | "request_timeout": null,
14 | "logit_bias": {},
15 | "_type": "openai"
16 | },
17 | "prompt": {
18 | "input_variables": [
19 | "question"
20 | ],
21 | "output_parser": null,
22 | "template": "You are GPT-3, and you can't do math.\n\nYou can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers.\n\nSo we hooked you up to a Python 3 kernel, and now you can execute code. If anyone gives you a hard math problem, just use this format and we\u2019ll take care of the rest:\n\nQuestion: ${{Question with hard calculation.}}\n```python\n${{Code that prints what you need to know}}\n```\n```output\n${{Output of your code}}\n```\nAnswer: ${{Answer}}\n\nOtherwise, use this simpler format:\n\nQuestion: ${{Question without hard calculation}}\nAnswer: ${{Answer}}\n\nBegin.\n\nQuestion: What is 37593 * 67?\n\n```python\nprint(37593 * 67)\n```\n```output\n2518731\n```\nAnswer: 2518731\n\nQuestion: {question}\n",
23 | "template_format": "f-string",
24 | "_type": "prompt"
25 | },
26 | "input_key": "question",
27 | "output_key": "answer",
28 | "_type": "llm_math_chain"
29 | }
--------------------------------------------------------------------------------
/Eve/chains/llm-requests/chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": false,
4 | "llm_chain": {
5 | "memory": null,
6 | "verbose": false,
7 | "prompt": {
8 | "input_variables": [
9 | "query",
10 | "requests_result"
11 | ],
12 | "output_parser": null,
13 | "template": "Between >>> and <<< are the raw search result text from google.\nExtract the answer to the question '{query}' or say \"not found\" if the information is not contained.\nUse the format\nExtracted:\n>>> {requests_result} <<<\nExtracted:",
14 | "template_format": "f-string",
15 | "_type": "prompt"
16 | },
17 | "llm": {
18 | "model_name": "text-davinci-003",
19 | "temperature": 0.0,
20 | "max_tokens": 256,
21 | "top_p": 1,
22 | "frequency_penalty": 0,
23 | "presence_penalty": 0,
24 | "n": 1,
25 | "best_of": 1,
26 | "request_timeout": null,
27 | "logit_bias": {},
28 | "_type": "openai"
29 | },
30 | "output_key": "text",
31 | "_type": "llm_chain"
32 | },
33 | "text_length": 8000,
34 | "requests_key": "requests_result",
35 | "input_key": "url",
36 | "output_key": "output",
37 | "_type": "llm_requests_chain"
38 | }
--------------------------------------------------------------------------------
/Eve/chains/pal/math/chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": true,
4 | "llm": {
5 | "model_name": "code-davinci-002",
6 | "temperature": 0.0,
7 | "max_tokens": 512,
8 | "top_p": 1,
9 | "frequency_penalty": 0,
10 | "presence_penalty": 0,
11 | "n": 1,
12 | "best_of": 1,
13 | "request_timeout": null,
14 | "logit_bias": {},
15 | "_type": "openai"
16 | },
17 | "prompt": {
18 | "input_variables": [
19 | "question"
20 | ],
21 | "output_parser": null,
22 | "template": "Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\"\"\"\n money_initial = 23\n bagels = 5\n bagel_cost = 3\n money_spent = bagels * bagel_cost\n money_left = money_initial - money_spent\n result = money_left\n return result\n\n\n\n\n\nQ: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?\"\"\"\n golf_balls_initial = 58\n golf_balls_lost_tuesday = 23\n golf_balls_lost_wednesday = 2\n golf_balls_left = golf_balls_initial - golf_balls_lost_tuesday - golf_balls_lost_wednesday\n result = golf_balls_left\n return result\n\n\n\n\n\nQ: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?\"\"\"\n computers_initial = 9\n computers_per_day = 5\n num_days = 4 # 4 days between monday and thursday\n computers_added = computers_per_day * num_days\n computers_total = computers_initial + computers_added\n result = computers_total\n return result\n\n\n\n\n\nQ: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?\"\"\"\n toys_initial = 5\n mom_toys = 2\n dad_toys = 2\n total_received = mom_toys + dad_toys\n total_toys = toys_initial + total_received\n result = total_toys\n return result\n\n\n\n\n\nQ: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?\"\"\"\n jason_lollipops_initial = 20\n jason_lollipops_after = 12\n denny_lollipops = jason_lollipops_initial - jason_lollipops_after\n result = denny_lollipops\n return result\n\n\n\n\n\nQ: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?\"\"\"\n leah_chocolates = 32\n sister_chocolates = 42\n total_chocolates = leah_chocolates + sister_chocolates\n chocolates_eaten = 35\n chocolates_left = total_chocolates - chocolates_eaten\n result = chocolates_left\n return result\n\n\n\n\n\nQ: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\"\"\"\n cars_initial = 3\n cars_arrived = 2\n total_cars = cars_initial + cars_arrived\n result = total_cars\n return result\n\n\n\n\n\nQ: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?\"\"\"\n trees_initial = 15\n trees_after = 21\n trees_added = trees_after - trees_initial\n result = trees_added\n return result\n\n\n\n\n\nQ: {question}\n\n# solution in Python:\n\n\n",
23 | "template_format": "f-string",
24 | "_type": "prompt"
25 | },
26 | "stop": "\n\n",
27 | "get_answer_expr": "print(solution())",
28 | "output_key": "result",
29 | "_type": "pal_chain"
30 | }
--------------------------------------------------------------------------------
/Eve/chains/qa-sources/refine/chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": false,
4 | "input_key": "input_documents",
5 | "output_key": "output_text",
6 | "initial_llm_chain": {
7 | "memory": null,
8 | "verbose": false,
9 | "prompt": {
10 | "input_variables": [
11 | "context_str",
12 | "question"
13 | ],
14 | "output_parser": null,
15 | "template": "Context information is below. \n---------------------\n{context_str}\n---------------------\nGiven the context information and not prior knowledge, answer the question: {question}\n",
16 | "template_format": "f-string",
17 | "_type": "prompt"
18 | },
19 | "llm": {
20 | "model_name": "text-davinci-003",
21 | "temperature": 0.0,
22 | "max_tokens": 256,
23 | "top_p": 1,
24 | "frequency_penalty": 0,
25 | "presence_penalty": 0,
26 | "n": 1,
27 | "best_of": 1,
28 | "request_timeout": null,
29 | "logit_bias": {},
30 | "_type": "openai"
31 | },
32 | "output_key": "text",
33 | "_type": "llm_chain"
34 | },
35 | "refine_llm_chain": {
36 | "memory": null,
37 | "verbose": false,
38 | "prompt": {
39 | "input_variables": [
40 | "question",
41 | "existing_answer",
42 | "context_str"
43 | ],
44 | "output_parser": null,
45 | "template": "The original question is as follows: {question}\nWe have provided an existing answer, including sources: {existing_answer}\nWe have the opportunity to refine the existing answer(only if needed) with some more context below.\n------------\n{context_str}\n------------\nGiven the new context, refine the original answer to better answer the question. If you do update it, please update the sources as well. If the context isn't useful, return the original answer.",
46 | "template_format": "f-string",
47 | "_type": "prompt"
48 | },
49 | "llm": {
50 | "model_name": "text-davinci-003",
51 | "temperature": 0.0,
52 | "max_tokens": 256,
53 | "top_p": 1,
54 | "frequency_penalty": 0,
55 | "presence_penalty": 0,
56 | "n": 1,
57 | "best_of": 1,
58 | "request_timeout": null,
59 | "logit_bias": {},
60 | "_type": "openai"
61 | },
62 | "output_key": "text",
63 | "_type": "llm_chain"
64 | },
65 | "document_variable_name": "context_str",
66 | "initial_response_name": "existing_answer",
67 | "document_prompt": {
68 | "input_variables": [
69 | "page_content",
70 | "source"
71 | ],
72 | "output_parser": null,
73 | "template": "Content: {page_content}\nSource: {source}",
74 | "template_format": "f-string",
75 | "_type": "prompt"
76 | },
77 | "return_intermediate_steps": false,
78 | "_type": "refine_documents_chain"
79 | }
--------------------------------------------------------------------------------
/Eve/chains/qa-sources/rerank/chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": false,
4 | "input_key": "input_documents",
5 | "output_key": "output_text",
6 | "llm_chain": {
7 | "memory": null,
8 | "verbose": false,
9 | "prompt": {
10 | "input_variables": [
11 | "context",
12 | "question"
13 | ],
14 | "output_parser": {
15 | "regex": "(.*?)\\nScore: (.*)",
16 | "output_keys": [
17 | "answer",
18 | "score"
19 | ],
20 | "default_output_key": null,
21 | "_type": "regex_parser"
22 | },
23 | "template": "Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\nIn addition to giving an answer, also return a score of how fully it answered the user's question. This should be in the following format:\n\nQuestion: [question here]\nHelpful Answer: [answer here]\nScore: [score between 0 and 100]\n\nHow to determine the score:\n- Higher is a better answer\n- Better responds fully to the asked question, with sufficient level of detail\n- If you do not know the answer based on the context, that should be a score of 0\n- Don't be overconfident!\n\nExample #1\n\nContext:\n---------\nApples are red\n---------\nQuestion: what color are apples?\nHelpful Answer: red\nScore: 100\n\nExample #2\n\nContext:\n---------\nit was night and the witness forgot his glasses. he was not sure if it was a sports car or an suv\n---------\nQuestion: what type was the car?\nHelpful Answer: a sports car or an suv\nScore: 60\n\nExample #3\n\nContext:\n---------\nPears are either red or orange\n---------\nQuestion: what color are apples?\nHelpful Answer: This document does not answer the question\nScore: 0\n\nBegin!\n\nContext:\n---------\n{context}\n---------\nQuestion: {question}\nHelpful Answer:",
24 | "template_format": "f-string",
25 | "_type": "prompt"
26 | },
27 | "llm": {
28 | "model_name": "text-davinci-003",
29 | "temperature": 0.0,
30 | "max_tokens": 256,
31 | "top_p": 1,
32 | "frequency_penalty": 0,
33 | "presence_penalty": 0,
34 | "n": 1,
35 | "best_of": 1,
36 | "request_timeout": null,
37 | "logit_bias": {},
38 | "_type": "openai"
39 | },
40 | "output_key": "text",
41 | "_type": "llm_chain"
42 | },
43 | "document_variable_name": "context",
44 | "rank_key": "score",
45 | "answer_key": "answer",
46 | "metadata_keys": [
47 | "source"
48 | ],
49 | "return_intermediate_steps": false,
50 | "_type": "map_rerank_documents_chain"
51 | }
--------------------------------------------------------------------------------
/Eve/chains/qa/chain.json:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/webgrip/PuttyGPT/7c84818bde06a90a2de42af7fb8aa8f904395fa8/Eve/chains/qa/chain.json
--------------------------------------------------------------------------------
/Eve/chains/qa/reduce/chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": false,
4 | "input_key": "input_documents",
5 | "output_key": "output_text",
6 | "llm_chain": {
7 | "memory": null,
8 | "verbose": false,
9 | "prompt": {
10 | "input_variables": [
11 | "context",
12 | "question"
13 | ],
14 | "output_parser": null,
15 | "template": "Use the following portion of a long document to see if any of the text is relevant to answer the question. \nReturn any relevant text verbatim.\n{context}\nQuestion: {question}\nRelevant text, if any:",
16 | "template_format": "f-string",
17 | "_type": "prompt"
18 | },
19 | "llm": {
20 | "model_name": "text-davinci-003",
21 | "temperature": 0.0,
22 | "max_tokens": 256,
23 | "top_p": 1,
24 | "frequency_penalty": 0,
25 | "presence_penalty": 0,
26 | "n": 1,
27 | "best_of": 1,
28 | "request_timeout": null,
29 | "logit_bias": {},
30 | "_type": "openai"
31 | },
32 | "output_key": "text",
33 | "_type": "llm_chain"
34 | },
35 | "combine_document_chain": {
36 | "memory": null,
37 | "verbose": false,
38 | "input_key": "input_documents",
39 | "output_key": "output_text",
40 | "llm_chain": {
41 | "memory": null,
42 | "verbose": false,
43 | "prompt": {
44 | "input_variables": [
45 | "summaries",
46 | "question"
47 | ],
48 | "output_parser": null,
49 | "template": "Given the following extracted parts of a long document and a question, create a final answer. \nIf you don't know the answer, just say that you don't know. Don't try to make up an answer.\n\nQUESTION: Which state/country's law governs the interpretation of the contract?\n=========\nContent: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights.\n\nContent: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries.\n\nContent: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur,\n=========\nFINAL ANSWER: This Agreement is governed by English law.\n\nQUESTION: What did the president say about Michael Jackson?\n=========\nContent: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia\u2019s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland.\n\nContent: And we won\u2019t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet\u2019s use this moment to reset. Let\u2019s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet\u2019s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can\u2019t change how divided we\u2019ve been. But we can change how we move forward\u2014on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who\u2019d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves.\n\nContent: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I\u2019ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I\u2019m taking robust action to make sure the pain of our sanctions is targeted at Russia\u2019s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what\u2019s happening can seem alarming. \n\nBut I want you to know that we are going to be okay.\n\nContent: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt\u2019s based on DARPA\u2014the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose\u2014to drive breakthroughs in cancer, Alzheimer\u2019s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans\u2014tonight , we have gathered in a sacred space\u2014the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation.\n=========\nFINAL ANSWER: The president did not mention Michael Jackson.\n\nQUESTION: {question}\n=========\n{summaries}\n=========\nFINAL ANSWER:",
50 | "template_format": "f-string",
51 | "_type": "prompt"
52 | },
53 | "llm": {
54 | "model_name": "text-davinci-003",
55 | "temperature": 0.0,
56 | "max_tokens": 256,
57 | "top_p": 1,
58 | "frequency_penalty": 0,
59 | "presence_penalty": 0,
60 | "n": 1,
61 | "best_of": 1,
62 | "request_timeout": null,
63 | "logit_bias": {},
64 | "_type": "openai"
65 | },
66 | "output_key": "text",
67 | "_type": "llm_chain"
68 | },
69 | "document_prompt": {
70 | "input_variables": [
71 | "page_content"
72 | ],
73 | "output_parser": null,
74 | "template": "{page_content}",
75 | "template_format": "f-string",
76 | "_type": "prompt"
77 | },
78 | "document_variable_name": "summaries",
79 | "_type": "stuff_documents_chain"
80 | },
81 | "collapse_document_chain": null,
82 | "document_variable_name": "context",
83 | "return_intermediate_steps": false,
84 | "_type": "map_reduce_documents_chain"
85 | }
--------------------------------------------------------------------------------
/Eve/chains/qa/refine/chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": false,
4 | "input_key": "input_documents",
5 | "output_key": "output_text",
6 | "initial_llm_chain": {
7 | "memory": null,
8 | "verbose": false,
9 | "prompt": {
10 | "input_variables": [
11 | "context_str",
12 | "question"
13 | ],
14 | "output_parser": null,
15 | "template": "Context information is below. \n---------------------\n{context_str}\n---------------------\nGiven the context information and not prior knowledge, answer the question: {question}\n",
16 | "template_format": "f-string",
17 | "_type": "prompt"
18 | },
19 | "llm": {
20 | "model_name": "text-davinci-003",
21 | "temperature": 0.0,
22 | "max_tokens": 256,
23 | "top_p": 1,
24 | "frequency_penalty": 0,
25 | "presence_penalty": 0,
26 | "n": 1,
27 | "best_of": 1,
28 | "request_timeout": null,
29 | "logit_bias": {},
30 | "_type": "openai"
31 | },
32 | "output_key": "text",
33 | "_type": "llm_chain"
34 | },
35 | "refine_llm_chain": {
36 | "memory": null,
37 | "verbose": false,
38 | "prompt": {
39 | "input_variables": [
40 | "question",
41 | "existing_answer",
42 | "context_str"
43 | ],
44 | "output_parser": null,
45 | "template": "The original question is as follows: {question}\nWe have provided an existing answer: {existing_answer}\nWe have the opportunity to refine the existing answer(only if needed) with some more context below.\n------------\n{context_str}\n------------\nGiven the new context, refine the original answer to better answer the question. If the context isn't useful, return the original answer.",
46 | "template_format": "f-string",
47 | "_type": "prompt"
48 | },
49 | "llm": {
50 | "model_name": "text-davinci-003",
51 | "temperature": 0.0,
52 | "max_tokens": 256,
53 | "top_p": 1,
54 | "frequency_penalty": 0,
55 | "presence_penalty": 0,
56 | "n": 1,
57 | "best_of": 1,
58 | "request_timeout": null,
59 | "logit_bias": {},
60 | "_type": "openai"
61 | },
62 | "output_key": "text",
63 | "_type": "llm_chain"
64 | },
65 | "document_variable_name": "context_str",
66 | "initial_response_name": "existing_answer",
67 | "document_prompt": {
68 | "input_variables": [
69 | "page_content"
70 | ],
71 | "output_parser": null,
72 | "template": "{page_content}",
73 | "template_format": "f-string",
74 | "_type": "prompt"
75 | },
76 | "return_intermediate_steps": false,
77 | "_type": "refine_documents_chain"
78 | }
--------------------------------------------------------------------------------
/Eve/chains/qa/rerank/chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": false,
4 | "input_key": "input_documents",
5 | "output_key": "output_text",
6 | "llm_chain": {
7 | "memory": null,
8 | "verbose": false,
9 | "prompt": {
10 | "input_variables": [
11 | "context",
12 | "question"
13 | ],
14 | "output_parser": {
15 | "regex": "(.*?)\\nScore: (.*)",
16 | "output_keys": [
17 | "answer",
18 | "score"
19 | ],
20 | "default_output_key": null,
21 | "_type": "regex_parser"
22 | },
23 | "template": "Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\nIn addition to giving an answer, also return a score of how fully it answered the user's question. This should be in the following format:\n\nQuestion: [question here]\nHelpful Answer: [answer here]\nScore: [score between 0 and 100]\n\nHow to determine the score:\n- Higher is a better answer\n- Better responds fully to the asked question, with sufficient level of detail\n- If you do not know the answer based on the context, that should be a score of 0\n- Don't be overconfident!\n\nExample #1\n\nContext:\n---------\nApples are red\n---------\nQuestion: what color are apples?\nHelpful Answer: red\nScore: 100\n\nExample #2\n\nContext:\n---------\nit was night and the witness forgot his glasses. he was not sure if it was a sports car or an suv\n---------\nQuestion: what type was the car?\nHelpful Answer: a sports car or an suv\nScore: 60\n\nExample #3\n\nContext:\n---------\nPears are either red or orange\n---------\nQuestion: what color are apples?\nHelpful Answer: This document does not answer the question\nScore: 0\n\nBegin!\n\nContext:\n---------\n{context}\n---------\nQuestion: {question}\nHelpful Answer:",
24 | "template_format": "f-string",
25 | "_type": "prompt"
26 | },
27 | "llm": {
28 | "model_name": "text-davinci-003",
29 | "temperature": 0.0,
30 | "max_tokens": 256,
31 | "top_p": 1,
32 | "frequency_penalty": 0,
33 | "presence_penalty": 0,
34 | "n": 1,
35 | "best_of": 1,
36 | "request_timeout": null,
37 | "logit_bias": {},
38 | "_type": "openai"
39 | },
40 | "output_key": "text",
41 | "_type": "llm_chain"
42 | },
43 | "document_variable_name": "context",
44 | "rank_key": "score",
45 | "answer_key": "answer",
46 | "metadata_keys": null,
47 | "return_intermediate_steps": false,
48 | "_type": "map_rerank_documents_chain"
49 | }
--------------------------------------------------------------------------------
/Eve/chains/sentiment/chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": false,
4 | "input_key": "input_documents",
5 | "output_key": "output_text",
6 | "llm_chain": {
7 | "memory": null,
8 | "verbose": false,
9 | "prompt": {
10 | "input_variables": [
11 | "text"
12 | ],
13 | "output_parser": null,
14 | "template": "What is the sentiment of the following:\n\"{text}\"\n\n\nSENTIMENT (a float from 0 to 1 where 0 is negative and 1 is positive):",
15 | "template_format": "f-string",
16 | "_type": "prompt"
17 | },
18 | "llm": {
19 | "model_name": "ada",
20 | "temperature": 0.0,
21 | "max_tokens": 256,
22 | "top_p": 1,
23 | "frequency_penalty": 0,
24 | "presence_penalty": 0,
25 | "n": 1,
26 | "best_of": 1,
27 | "request_timeout": null,
28 | "logit_bias": {},
29 | "_type": "openai"
30 | },
31 | "output_key": "text",
32 | "_type": "llm_chain"
33 | },
34 | "document_prompt": {
35 | "input_variables": [
36 | "page_content"
37 | ],
38 | "output_parser": null,
39 | "template": "{page_content}",
40 | "template_format": "f-string",
41 | "_type": "prompt"
42 | },
43 | "document_variable_name": "text",
44 | "_type": "stuff_documents_chain"
45 | }
46 |
--------------------------------------------------------------------------------
/Eve/chains/summarize/concise/chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": false,
4 | "input_key": "input_documents",
5 | "output_key": "output_text",
6 | "llm_chain": {
7 | "memory": null,
8 | "verbose": false,
9 | "prompt": {
10 | "input_variables": [
11 | "text"
12 | ],
13 | "output_parser": null,
14 | "template": "Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:",
15 | "template_format": "f-string",
16 | "_type": "prompt"
17 | },
18 | "llm": {
19 | "model_name": "text-davinci-003",
20 | "temperature": 0.0,
21 | "max_tokens": 256,
22 | "top_p": 1,
23 | "frequency_penalty": 0,
24 | "presence_penalty": 0,
25 | "n": 1,
26 | "best_of": 1,
27 | "request_timeout": null,
28 | "logit_bias": {},
29 | "_type": "openai"
30 | },
31 | "output_key": "text",
32 | "_type": "llm_chain"
33 | },
34 | "document_prompt": {
35 | "input_variables": [
36 | "text"
37 | ],
38 | "output_parser": null,
39 | "template": "{text}",
40 | "template_format": "f-string",
41 | "_type": "prompt"
42 | },
43 | "document_variable_name": "text",
44 | "_type": "stuff_documents_chain"
45 | }
46 |
--------------------------------------------------------------------------------
/Eve/chains/summarize/reduce/chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": false,
4 | "input_key": "input_documents",
5 | "output_key": "output_text",
6 | "llm_chain": {
7 | "memory": null,
8 | "verbose": false,
9 | "prompt": {
10 | "input_variables": [
11 | "text"
12 | ],
13 | "output_parser": null,
14 | "template": "Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:",
15 | "template_format": "f-string",
16 | "_type": "prompt"
17 | },
18 | "llm": {
19 | "model_name": "text-davinci-003",
20 | "temperature": 0.0,
21 | "max_tokens": 256,
22 | "top_p": 1,
23 | "frequency_penalty": 0,
24 | "presence_penalty": 0,
25 | "n": 1,
26 | "best_of": 1,
27 | "request_timeout": null,
28 | "logit_bias": {},
29 | "_type": "openai"
30 | },
31 | "output_key": "text",
32 | "_type": "llm_chain"
33 | },
34 | "combine_document_chain": {
35 | "memory": null,
36 | "verbose": false,
37 | "input_key": "input_documents",
38 | "output_key": "output_text",
39 | "llm_chain": {
40 | "memory": null,
41 | "verbose": false,
42 | "prompt": {
43 | "input_variables": [
44 | "text"
45 | ],
46 | "output_parser": null,
47 | "template": "Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:",
48 | "template_format": "f-string",
49 | "_type": "prompt"
50 | },
51 | "llm": {
52 | "model_name": "text-davinci-003",
53 | "temperature": 0.0,
54 | "max_tokens": 256,
55 | "top_p": 1,
56 | "frequency_penalty": 0,
57 | "presence_penalty": 0,
58 | "n": 1,
59 | "best_of": 1,
60 | "request_timeout": null,
61 | "logit_bias": {},
62 | "_type": "openai"
63 | },
64 | "output_key": "text",
65 | "_type": "llm_chain"
66 | },
67 | "document_prompt": {
68 | "input_variables": [
69 | "page_content"
70 | ],
71 | "output_parser": null,
72 | "template": "{page_content}",
73 | "template_format": "f-string",
74 | "_type": "prompt"
75 | },
76 | "document_variable_name": "text",
77 | "_type": "stuff_documents_chain"
78 | },
79 | "collapse_document_chain": null,
80 | "document_variable_name": "text",
81 | "return_intermediate_steps": false,
82 | "_type": "map_reduce_documents_chain"
83 | }
--------------------------------------------------------------------------------
/Eve/chains/summarize/refine/chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": false,
4 | "input_key": "input_documents",
5 | "output_key": "output_text",
6 | "initial_llm_chain": {
7 | "memory": null,
8 | "verbose": false,
9 | "prompt": {
10 | "input_variables": [
11 | "text"
12 | ],
13 | "output_parser": null,
14 | "template": "Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:",
15 | "template_format": "f-string",
16 | "_type": "prompt"
17 | },
18 | "llm": {
19 | "model_name": "text-davinci-003",
20 | "temperature": 0.0,
21 | "max_tokens": 256,
22 | "top_p": 1,
23 | "frequency_penalty": 0,
24 | "presence_penalty": 0,
25 | "n": 1,
26 | "best_of": 1,
27 | "request_timeout": null,
28 | "logit_bias": {},
29 | "_type": "openai"
30 | },
31 | "output_key": "text",
32 | "_type": "llm_chain"
33 | },
34 | "refine_llm_chain": {
35 | "memory": null,
36 | "verbose": false,
37 | "prompt": {
38 | "input_variables": [
39 | "existing_answer",
40 | "text"
41 | ],
42 | "output_parser": null,
43 | "template": "Your job is to produce a final summary\nWe have provided an existing summary up to a certain point: {existing_answer}\nWe have the opportunity to refine the existing summary(only if needed) with some more context below.\n------------\n{text}\n------------\nGiven the new context, refine the original summaryIf the context isn't useful, return the original summary.",
44 | "template_format": "f-string",
45 | "_type": "prompt"
46 | },
47 | "llm": {
48 | "model_name": "text-davinci-003",
49 | "temperature": 0.0,
50 | "max_tokens": 256,
51 | "top_p": 1,
52 | "frequency_penalty": 0,
53 | "presence_penalty": 0,
54 | "n": 1,
55 | "best_of": 1,
56 | "request_timeout": null,
57 | "logit_bias": {},
58 | "_type": "openai"
59 | },
60 | "output_key": "text",
61 | "_type": "llm_chain"
62 | },
63 | "document_variable_name": "text",
64 | "initial_response_name": "existing_answer",
65 | "document_prompt": {
66 | "input_variables": [
67 | "page_content"
68 | ],
69 | "output_parser": null,
70 | "template": "{page_content}",
71 | "template_format": "f-string",
72 | "_type": "prompt"
73 | },
74 | "return_intermediate_steps": false,
75 | "_type": "refine_documents_chain"
76 | }
--------------------------------------------------------------------------------
/Eve/chains/vectordb/reduce/chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": false,
4 | "k": 4,
5 | "combine_documents_chain": {
6 | "memory": null,
7 | "verbose": false,
8 | "input_key": "input_documents",
9 | "output_key": "output_text",
10 | "llm_chain": {
11 | "memory": null,
12 | "verbose": false,
13 | "prompt": {
14 | "input_variables": [
15 | "context",
16 | "question"
17 | ],
18 | "output_parser": null,
19 | "template": "Use the following portion of a long document to see if any of the text is relevant to answer the question. \nReturn any relevant text verbatim.\n{context}\nQuestion: {question}\nRelevant text, if any:",
20 | "template_format": "f-string",
21 | "_type": "prompt"
22 | },
23 | "llm": {
24 | "model_name": "text-davinci-003",
25 | "temperature": 0.7,
26 | "max_tokens": 256,
27 | "top_p": 1,
28 | "frequency_penalty": 0,
29 | "presence_penalty": 0,
30 | "n": 1,
31 | "best_of": 1,
32 | "request_timeout": null,
33 | "logit_bias": {},
34 | "_type": "openai"
35 | },
36 | "output_key": "text",
37 | "_type": "llm_chain"
38 | },
39 | "combine_document_chain": {
40 | "memory": null,
41 | "verbose": false,
42 | "input_key": "input_documents",
43 | "output_key": "output_text",
44 | "llm_chain": {
45 | "memory": null,
46 | "verbose": false,
47 | "prompt": {
48 | "input_variables": [
49 | "summaries",
50 | "question"
51 | ],
52 | "output_parser": null,
53 | "template": "Given the following extracted parts of a long document and a question, create a final answer. \nIf you don't know the answer, just say that you don't know. Don't try to make up an answer.\n\nQUESTION: Which state/country's law governs the interpretation of the contract?\n=========\nContent: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights.\n\nContent: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries.\n\nContent: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur,\n=========\nFINAL ANSWER: This Agreement is governed by English law.\n\nQUESTION: What did the president say about Michael Jackson?\n=========\nContent: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia\u2019s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland.\n\nContent: And we won\u2019t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet\u2019s use this moment to reset. Let\u2019s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet\u2019s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can\u2019t change how divided we\u2019ve been. But we can change how we move forward\u2014on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who\u2019d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves.\n\nContent: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I\u2019ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I\u2019m taking robust action to make sure the pain of our sanctions is targeted at Russia\u2019s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what\u2019s happening can seem alarming. \n\nBut I want you to know that we are going to be okay.\n\nContent: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt\u2019s based on DARPA\u2014the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose\u2014to drive breakthroughs in cancer, Alzheimer\u2019s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans\u2014tonight , we have gathered in a sacred space\u2014the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation.\n=========\nFINAL ANSWER: The president did not mention Michael Jackson.\n\nQUESTION: {question}\n=========\n{summaries}\n=========\nFINAL ANSWER:",
54 | "template_format": "f-string",
55 | "_type": "prompt"
56 | },
57 | "llm": {
58 | "model_name": "text-davinci-003",
59 | "temperature": 0.7,
60 | "max_tokens": 256,
61 | "top_p": 1,
62 | "frequency_penalty": 0,
63 | "presence_penalty": 0,
64 | "n": 1,
65 | "best_of": 1,
66 | "request_timeout": null,
67 | "logit_bias": {},
68 | "_type": "openai"
69 | },
70 | "output_key": "text",
71 | "_type": "llm_chain"
72 | },
73 | "document_prompt": {
74 | "input_variables": [
75 | "page_content"
76 | ],
77 | "output_parser": null,
78 | "template": "{page_content}",
79 | "template_format": "f-string",
80 | "_type": "prompt"
81 | },
82 | "document_variable_name": "summaries",
83 | "_type": "stuff_documents_chain"
84 | },
85 | "collapse_document_chain": null,
86 | "document_variable_name": "context",
87 | "return_intermediate_steps": false,
88 | "_type": "map_reduce_documents_chain"
89 | },
90 | "input_key": "query",
91 | "output_key": "result",
92 | "return_source_documents": false,
93 | "search_kwargs": {},
94 | "_type": "vector_db_qa"
95 | }
--------------------------------------------------------------------------------
/Eve/chains/vectordb/single/chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": false,
4 | "k": 4,
5 | "combine_documents_chain": {
6 | "memory": null,
7 | "verbose": false,
8 | "input_key": "input_documents",
9 | "output_key": "output_text",
10 | "llm_chain": {
11 | "memory": null,
12 | "verbose": false,
13 | "prompt": {
14 | "input_variables": [
15 | "context",
16 | "question"
17 | ],
18 | "output_parser": null,
19 | "template": "Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\n{context}\n\nQuestion: {question}\nHelpful Answer:",
20 | "template_format": "f-string",
21 | "_type": "prompt"
22 | },
23 | "llm": {
24 | "model_name": "text-davinci-003",
25 | "temperature": 0.7,
26 | "max_tokens": 256,
27 | "top_p": 1,
28 | "frequency_penalty": 0,
29 | "presence_penalty": 0,
30 | "n": 1,
31 | "best_of": 1,
32 | "request_timeout": null,
33 | "logit_bias": {},
34 | "_type": "openai"
35 | },
36 | "output_key": "text",
37 | "_type": "llm_chain"
38 | },
39 | "document_prompt": {
40 | "input_variables": [
41 | "page_content"
42 | ],
43 | "output_parser": null,
44 | "template": "{page_content}",
45 | "template_format": "f-string",
46 | "_type": "prompt"
47 | },
48 | "document_variable_name": "context",
49 | "_type": "stuff_documents_chain"
50 | },
51 | "input_key": "query",
52 | "output_key": "result",
53 | "return_source_documents": false,
54 | "search_kwargs": {},
55 | "_type": "vector_db_qa"
56 | }
--------------------------------------------------------------------------------
/Eve/characters/Ryan.py:
--------------------------------------------------------------------------------
1 | tracer = LangChainTracer()
2 | tracer.load_default_session()
3 | callback_manager = CallbackManager([StdOutCallbackHandler(), tracer])
4 |
5 | llm = ChatOpenAI(temperature=0.415, max_tokens=1500, streaming = True, callback_manager=callback_manager)
6 |
7 | autonomousAgent = AutonomousAgent().make(
8 | name="Ryan",
9 | age=28,
10 | traits="loyal, experimental, hopeful, smart, world class programmer",
11 | status="Executing the task",
12 | reflection_threshold = 8,
13 | llm=llm,
14 | daily_summaries = [
15 | "Just woke up, ready and eager to start working"
16 | ],
17 | verbose=True,
18 | )
19 |
20 |
--------------------------------------------------------------------------------
/Eve/experiments/LANGCHAN_COMPLETION_EVALS.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | from typing import Optional
3 | from evals.api import CompletionFn, CompletionResult
4 |
5 | from langchain.llms import BaseLLM
6 |
7 | from evals.prompt.base import CompletionPrompt
8 | from evals.record import record_sampling
9 |
10 |
11 | class LangChainLLMCompletionResult(CompletionResult):
12 | def __init__(self, response) -> None:
13 | self.response = response
14 |
15 | def get_completions(self) -> list[str]:
16 | return [self.response.strip()]
17 |
18 |
19 | class LangChainLLMCompletionFn(CompletionFn):
20 | def __init__(self, llm: str, llm_kwargs: Optional[dict] = {}, **kwargs) -> None:
21 | # Import and resolve self.llm to an instance of llm argument here, assuming it's always a subclass of BaseLLM
22 | module = importlib.import_module("langchain.llms")
23 | LLMClass = getattr(module, llm)
24 |
25 | if issubclass(LLMClass, BaseLLM):
26 | self.llm = LLMClass(**llm_kwargs)
27 | else:
28 | raise ValueError(f"{llm} is not a subclass of BaseLLM")
29 |
30 | def __call__(self, prompt, **kwargs) -> LangChainLLMCompletionResult:
31 | prompt = CompletionPrompt(prompt).to_formatted_prompt()
32 | response = self.llm(prompt)
33 | record_sampling(prompt=prompt, sampled=response)
34 | return LangChainLLMCompletionResult(response)
--------------------------------------------------------------------------------
/Eve/experiments/grpc.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/webgrip/PuttyGPT/7c84818bde06a90a2de42af7fb8aa8f904395fa8/Eve/experiments/grpc.py
--------------------------------------------------------------------------------
/Eve/experiments/timeweigh.py:
--------------------------------------------------------------------------------
1 | import os
2 | from langchain.retrievers import TimeWeightedVectorStoreRetriever
3 | from langchain.schema import Document
4 | from langchain.vectorstores import Weaviate
5 | import weaviate
6 | import datetime
7 |
8 |
9 | client = weaviate.Client(
10 | url=os.getenv("WEAVIATE_HOST"),
11 | additional_headers={"X-OpenAI-Api-Key": os.getenv("OPENAI_API_KEY")},
12 | # auth_client_secret: Optional[AuthCredentials] = None,
13 | # timeout_config: Union[Tuple[Real, Real], Real] = (10, 60),
14 | # proxies: Union[dict, str, None] = None,
15 | # trust_env: bool = False,
16 | # additional_headers: Optional[dict] = None,
17 | # startup_period: Optional[int] = 5,
18 | # embedded_options=[],
19 | )
20 | client.schema.delete_all()
21 | schema = client.schema.get()
22 | print(schema)
23 |
24 | vectorstore = Weaviate(client, "Paragraph", "content")
25 |
26 | #retriever = WeaviateHybridSearchRetriever(
27 | # client, index_name="LangChain", text_key="text"
28 | #)
29 |
30 |
31 |
32 | retriever = TimeWeightedVectorStoreRetriever(vectorstore=vectorstore, decay_rate=.33, k=1)
33 |
34 | #index = faiss.IndexFlatL2(embedding_size)
35 | #vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
36 |
37 | now = datetime.datetime.now()
38 | retriever.add_documents(
39 | [
40 | Document(
41 | page_content="hello world",
42 | #metadata={"last_accessed_at": now}
43 | ),
44 | ]
45 | )
46 | #
47 |
48 |
49 |
50 | # "Hello Foo" is returned first because "hello world" is mostly forgotten
51 | print(retriever.get_relevant_documents("hello world"))
52 |
--------------------------------------------------------------------------------
/Eve/experiments/x.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import time
3 | import random
4 | from colorama import Fore, Back, Style, init
5 | import warnings
6 | from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, Conversation, AutoConfig, AutoModelForCausalLM
7 | import requests
8 |
9 |
10 | # Initialize colorama
11 | init(autoreset=True)
12 |
13 | # Global variables
14 | build = "v0.103"
15 | random.seed()
16 | global eos_token_id, model_size, prompt_text, max_length, top_p, top_k, typ_p, temp, ngrams, start_time, end_time, model_name
17 | eos_token_id = None
18 | model_size = "*s"
19 | prompt_text = "the ideal helper generates and completes a task list"
20 | max_length = None
21 | top_p = None
22 | top_k = None
23 | typ_p = None
24 | temp = None
25 | ngrams = None
26 |
27 | # Argument parser
28 | parser = argparse.ArgumentParser(description='Generate text with language agents')
29 | parser.add_argument('-m', '--model', choices=['111m', '256m', '590m', '1.3b', '2.7b', '6.7b', '13b', '20b', '30b', '100b', '500b', '560b' ],
30 | help='Choose the model size to use (default: 111m)', type=str.lower)
31 | parser.add_argument('-nv', '--cuda', action='store_true', help='Use CUDA GPU')
32 | parser.add_argument('-cv', '--conv', action='store_true', help='Conversation Mode')
33 | parser.add_argument('-se', '--sent', action='store_true', help='Sentiment Mode')
34 | parser.add_argument('-cu', '--custom', type=str, help='Specify a custom model')
35 | parser.add_argument('-p', '--prompt', type=str, default="the ideal helper generates and completes a task list",
36 | help='Text prompt to generate from')
37 | parser.add_argument('-l', '--length', type=int, default=None,
38 | help="a value that controls the maximum number of tokens (words) that the model is allowed to generate")
39 | parser.add_argument('-tk', '--topk', type=float, default=None,
40 | help="a value that controls the number of highest probability tokens to consider during generation")
41 | parser.add_argument('-tp', '--topp', type=float, default=None,
42 | help="higher = more deterministic text")
43 | parser.add_argument('-ty', '--typp', type=float, default=None,
44 | help="a value that controls the strength of the prompt(lower=stronger higher=more freedom")
45 | parser.add_argument('-tm', '--temp', type=float, default=None,
46 | help="a value that controls the amount of creativity")
47 | parser.add_argument('-ng', '--ngram', type=int, default=None,
48 | help=" a repetition penalty")
49 | parser.add_argument('-t', '--time', action='store_true', help='Display the execution duration')
50 | parser.add_argument('-c', '--cmdline', action='store_true', help='Enable command line mode without a web server')
51 | parser.add_argument('-cl', '--clean', action='store_true', help='Produce neat and uncluttered output')
52 | parser.add_argument('-nw', '--nowarn', action='store_true', help='Hide warning messages')
53 | args = parser.parse_args()
54 |
55 |
56 | if args.clean or args.nowarn:
57 | warnings.simplefilter("ignore")
58 |
59 | model_size = args.model if args.model else None
60 | prompt_text = args.prompt if args.prompt else None
61 | max_length = int(args.length) if args.length is not None else args.length
62 | top_p = args.topp
63 | top_k = args.topk
64 | typ_p = args.typp
65 | temp = args.temp
66 | ngrams = args.ngram
67 |
68 | def AutoChat(prompt_text):
69 | global start_time, end_time
70 |
71 | def main():
72 | global model_name
73 | model_name = input("Enter Hugging Face repository or model name: ")
74 | if model_name.startswith("https://"):
75 | model_name = get_model_name_from_url(model_url)
76 | else:
77 | model_name = model_name
78 | result = AutoChat(prompt_text)
79 | print(result)
80 |
81 |
82 | if __name__ == "__main__":
83 | main()
84 |
85 | def validate_model_url(model_url):
86 | try:
87 | response = requests.head(model_url)
88 | if response.status_code == 200:
89 | return True
90 | else:
91 | return False
92 | except:
93 | return False
94 |
95 | def get_model_name_from_url(model_url):
96 | model_name = model_url.replace("https://huggingface.co/", "").split("/")[0]
97 | model_name = model_name.rstrip("/")
98 | return model_name
99 |
100 | model = AutoModelForCausalLM.from_pretrained(model_name)
101 |
102 | def get_model():
103 | while True:
104 | model_input = input("Enter Hugging Face repository name or URL: ")
105 | if model_input.startswith("https://huggingface.co/"):
106 | model_url = f"{model_input}/resolve/main/config.json"
107 | if validate_model_url(model_url):
108 | model_name = get_model_name_from_url(model_input)
109 | return model_name
110 | else:
111 | print("The provided model URL is not valid or the repository is not accessible. Please try again.")
112 | else:
113 | model_url = f"https://huggingface.co/{model_input}/resolve/main/config.json"
114 | if validate_model_url(model_url):
115 | return model_input
116 | else:
117 | print("The provided model name is not valid or the repository is not accessible. Please try again.")
118 |
119 |
120 | def banner():
121 | if not args.clean:
122 | print(Style.BRIGHT + f"{build} - Alignmentlab.ai")
123 | print("Using Model : " + Fore.BLUE + f"{model_name}")
124 | print("Using Prompt: " + Fore.GREEN + f"{prompt_text}")
125 | print("Using Params: " + Fore.YELLOW + f"max_new_tokens:{max_length} do_sample:True use_cache:True no_repeat_ngram_size:{ngrams} top_k:{top_k} top_p:{top_p} typical_p:{typ_p} temp:{temp}")
126 |
127 | class textgeneration(pipeline(task="text-generation")) :
128 | def __init__(self, *args, **kwargs):
129 | super().__init__(*args, **kwargs)
130 |
131 | def _parse_and_tokenize(self, prompt_text, **kwargs):
132 | return self.tokenizer(prompt_text, return_tensors=self.framework, **kwargs)
133 |
134 | def get_pipeline(task):
135 | tokenizer = AutoTokenizer.from_pretrained(model_name)
136 | model = AutoModelForCausalLM.from_pretrained(model_name)
137 | if task == "text-generation":
138 | return TextGeneration(model=model, tokenizer=tokenizer, device=0)
139 | else:
140 | return pipeline(task, model=model, tokenizer=tokenizer, device=0)
141 |
142 | def AutoChat(prompt_text):
143 | global start_time, end_time
144 | start_time = time.time()
145 |
146 | opts = {
147 | "max_length": max_length,
148 | "no_repeat_ngram_size": ngrams,
149 | "top_k": top_k,
150 | "top_p": top_p,
151 | "temperature": temp
152 | }
153 |
154 | if args.conv:
155 | chatbot = get_pipeline("conversational")
156 | while True:
157 | prompt_text = input("You: ")
158 | conversation = Conversation(prompt_text)
159 |
160 | if prompt_text == "exit":
161 | exit()
162 | break
163 |
164 | conversation = chatbot(conversation)
165 | response = conversation.generated_responses[-1]
166 | print("Bot:", response)
167 | else:
168 | pipe = get_pipeline("text-generation")
169 | generated_text = pipe(prompt_text, **opts)[0]
170 | end_time = time.time()
171 | return generated_text['generated_text']
172 |
173 | opts = {
174 | "do_sample": True,
175 | "use_cache": True,
176 | "max_new_tokens": max_length,
177 | "no_repeat_ngram_size": ngrams,
178 | "top_k": top_k,
179 | "top_p": top_p,
180 | "typical_p": typ_p,
181 | "temperature": temp
182 | }
183 |
184 | if args.conv:
185 | chatbot = get_pipeline("conversational")
186 | while True:
187 | prompt_text = input("You: ")
188 | conversation = Conversation(prompt_text)
189 |
190 | if prompt_text == "exit":
191 | exit()
192 | break
193 |
194 | conversation = chatbot(conversation)
195 | response = conversation.generated_responses[-1]
196 | print("Bot:", response)
197 | else:
198 | pipe = get_pipeline("text-generation")
199 | generated_text = pipe(prompt_text, **opts)[0]
200 | end_time = time.time()
201 | return generated_text['generated_text']
202 |
203 | banner()
204 | result = AutoChat(prompt_text)
205 | print("Generated text:\n", result)
206 |
207 | if args.time:
208 | print("Execution time: {:.2f} seconds".format(end_time - start_time))
--------------------------------------------------------------------------------
/Eve/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import math
3 | import weaviate
4 |
5 | from typing import Optional
6 |
7 | from langchain import LLMChain, OpenAI, PromptTemplate
8 | from langchain.callbacks.base import CallbackManager
9 | from langchain.callbacks.stdout import StdOutCallbackHandler
10 | from langchain.callbacks.tracers import LangChainTracer
11 | from langchain.embeddings import OpenAIEmbeddings
12 | from langchain.experimental.autonomous_agents.baby_agi import BabyAGI
13 | from langchain.vectorstores import Weaviate
14 | from langchain.retrievers import TimeWeightedVectorStoreRetriever
15 | from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory
16 | from langchain.agents import ZeroShotAgent, Tool, AgentExecutor
17 |
18 | from agents.AutonomousAgent import AutonomousAgent
19 | from tools import create_tools
20 |
21 | from langchain.chat_models import ChatOpenAI
22 |
23 | from langchain.chains import RetrievalQA
24 |
25 |
26 | WEAVIATE_HOST = os.getenv("WEAVIATE_HOST", "")
27 | WEAVIATE_VECTORIZER = os.getenv("WEAVIATE_VECTORIZER", "")
28 |
29 | tracer = LangChainTracer()
30 | tracer.load_session('test')
31 | callback_manager = CallbackManager([StdOutCallbackHandler(), tracer])
32 |
33 | openai = OpenAI(callback_manager=callback_manager)
34 |
35 | memory = ConversationBufferMemory(memory_key="chat_history")
36 | readonlymemory = ReadOnlySharedMemory(memory=memory)
37 |
38 | client = weaviate.Client(
39 | url=WEAVIATE_HOST,
40 | additional_headers={"X-OpenAI-Api-Key": os.getenv("OPENAI_API_KEY")},
41 | )
42 |
43 | client.schema.delete_all()
44 |
45 | schema = {
46 | "classes": [
47 | {
48 | "class": "Paragraph",
49 | "vectorizer": "text2vec-openai",
50 | "moduleConfig": {
51 | "text2vec-openai": {
52 | "model": "ada",
53 | "modelversion": "002",
54 | "type": "text"
55 | }
56 | },
57 | "properties": [
58 | {
59 | "dataType": ["text"],
60 | "description": "The content of the paragraph",
61 | "moduleConfig": {
62 | "text2vec-openai": {
63 | "skip": False,
64 | "vectorizePropertyName": False
65 | }
66 | },
67 | "name": "content",
68 | },
69 | ],
70 | },
71 | ]
72 | }
73 | client.schema.create(schema)
74 |
75 |
76 | embeddings_model = OpenAIEmbeddings(model="text-embedding-ada-002")
77 |
78 | def relevance_score_fn(score: float) -> float:
79 | return 1.0 - score / math.sqrt(2)
80 |
81 | vectorstore = Weaviate(client, "Paragraph", "content", embedding=embeddings_model, relevance_score_fn=relevance_score_fn)
82 |
83 | retriever = TimeWeightedVectorStoreRetriever(vectorstore=vectorstore, other_score_keys=["importance"], k=15)
84 |
85 | llm = ChatOpenAI(model="text-davinci-003", temperature=0.415, max_tokens=1500, streaming=True, callback_manager=callback_manager)
86 |
87 | llm = OpenAI(model="text-davinci-003", temperature=0.415, max_tokens=1500, streaming=True, callback_manager=callback_manager)
88 |
89 | autonomousAgent = AutonomousAgent().make(
90 | name="Ryan",
91 | age=28,
92 | traits="loyal, experimental, hopeful, smart, world class programmer",
93 | status="Executing the task",
94 | reflection_threshold=8,
95 | llm=llm,
96 | daily_summaries=[
97 | "Just woke up, ready and eager to start working"
98 | ],
99 | verbose=True,
100 | )
101 |
102 | ##### IDEA: Make a prompt, and let this thing generate the descriptions of what it is and what it's doing, still keep the {objective}
103 | #### TODO Playwright
104 |
105 |
106 | ## THIS IS A TOOL
107 |
108 | todo_prompt = PromptTemplate.from_template("You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}")
109 | todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt, callback_manager=callback_manager)
110 |
111 | tools = create_tools(callback_manager=callback_manager)
112 |
113 |
114 | # Make multiple vectorstores, one for memory of tasks, one for memory of autonomous agent, one for general memory?
115 | from langchain.agents.agent_toolkits import (
116 | create_vectorstore_agent,
117 | VectorStoreToolkit,
118 | VectorStoreInfo,
119 | )
120 |
121 | vectorstore_info = VectorStoreInfo(
122 | name="Memory",
123 | description="Useful for when you need to quickly access memory of events and people and things that happened recently or longer ago. Always do this first whenever you need external information.",
124 | vectorstore=vectorstore
125 | )
126 |
127 | toolkit = VectorStoreToolkit(vectorstore_info=vectorstore_info)
128 |
129 | llm = OpenAI(model="text-davinci-003", temperature=0.415, max_tokens=1500, streaming=True, callback_manager=callback_manager)
130 |
131 |
132 | agent_executor = create_vectorstore_agent(
133 | llm=llm,
134 | toolkit=toolkit,
135 | verbose=True,
136 | callback_manager=callback_manager
137 | )
138 |
139 | memory_chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
140 |
141 | tools.append(
142 | Tool(
143 | name="TODO",
144 | func=todo_chain.run,
145 | description="useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!",
146 | callback_manager=callback_manager,
147 | return_direct=True
148 | )
149 | )
150 |
151 | tools.append(
152 | Tool(
153 | name="Memory",
154 | func=memory_chain.run,
155 | description="Always do this first. Useful for when you need to access memory of events or people or things that happened recently or longer ago.",
156 | callback_manager=callback_manager,
157 | return_direct=True
158 | )
159 | )
160 |
161 |
162 | OBJECTIVE = "Scan the repository you're in and make a detailed analysis of it. Then put it in a file called 'helloworld.md'"
163 |
164 |
165 |
166 |
167 |
168 | prompt = ZeroShotAgent.create_prompt(
169 | tools=tools,
170 | prefix="You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}.",
171 | suffix="Question: {task}\n{agent_scratchpad}",
172 | input_variables=["objective", "task", "context", "agent_scratchpad"],
173 | )
174 |
175 |
176 | from datetime import datetime
177 | import platform
178 |
179 | def _get_datetime():
180 | now = datetime.now()
181 | return now.strftime("%m/%d/%Y, %H:%M:%S")
182 |
183 | operating_system = platform.platform()
184 |
185 | autonomousAgent.add_memory("I have been given a new objective:{}".format(OBJECTIVE))
186 |
187 | tools_summary = "\n".join([f"{tool.name}: {tool.description}" for tool in tools])
188 | tool_names = ", ".join([tool.name for tool in tools])
189 |
190 |
191 |
192 | prompt = AutonomousAgent.getPrompt(generativeAgent=autonomousAgent, objective=OBJECTIVE, operating_system=operating_system, tool_names=tool_names, tools_summary=tools_summary, )
193 |
194 |
195 | print(prompt)
196 | exit
197 |
198 |
199 |
200 |
201 | llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=callback_manager)
202 | tool_names = [tool.name for tool in tools]
203 | agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)
204 | agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, callback_manager=callback_manager)
205 |
206 |
207 |
208 | verbose = True
209 | max_iterations: Optional[int] = 10
210 |
211 | from pydantic import BaseModel, Field, validator
212 | # Define your desired data structure.
213 | class Joke(BaseModel):
214 | setup: str = Field(description="question to set up a joke")
215 | punchline: str = Field(description="answer to resolve the joke")
216 |
217 | # You can add custom validation logic easily with Pydantic.
218 | @validator('setup')
219 | def question_ends_with_question_mark(cls, field):
220 | if field[-1] != '?':
221 | raise ValueError("Badly formed question!")
222 | return field
223 |
224 |
225 |
226 | baby_agi = BabyAGI.from_llm(
227 | llm=llm,
228 | vectorstore=vectorstore,
229 | task_execution_chain=agent_executor,
230 | verbose=verbose,
231 | max_iterations=max_iterations
232 | )
233 |
234 | baby_agi(
235 | {
236 | "objective": OBJECTIVE,
237 | "task": OBJECTIVE,
238 | "agent_name": "Ryan",
239 | "operating_system": operating_system,
240 | "tool_names": tool_names,
241 | "tools_summary": tools_summary,
242 | "agent_summary": autonomousAgent.get_summary(True)
243 | },
244 |
245 |
246 |
247 |
248 |
249 | )
--------------------------------------------------------------------------------
/Eve/old/Eve.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/webgrip/PuttyGPT/7c84818bde06a90a2de42af7fb8aa8f904395fa8/Eve/old/Eve.py
--------------------------------------------------------------------------------
/Eve/old/text_processing.py:
--------------------------------------------------------------------------------
1 | from langchain.chains import load_chain
2 | import tiktoken
3 |
4 | class TextProcessing:
5 | def __init__(self):
6 |
7 | self.chain = [
8 | # { "analyze_sentiment", load_chain("chains/sentiment/chain.json")},
9 | # { "summarize_concice", load_chain("chains/summarize/concice/chain.json")},
10 | #{ "summarize_concice", LLMSummarizationCheckerChain(llm=llm, verbose=True, max_checks=2)},
11 | ]
12 |
13 | def analyze_sentiment(self, text: str) -> str:
14 | sentiment = self.chain.summarize_concice.run(text)
15 | return sentiment
16 |
17 | def summarize_concice(self, text: str, max_length: int = 50) -> str:
18 | summary = self.chain.summarize_concice.run(text)
19 | return summary
20 |
21 | #def summarize_reduce(self, text: str, max_length: int = 50) -> str:
22 | # summary = self.chain.run(text, step="summarization", max_length=max_length, min_length=10)
23 | # return summary
24 |
25 | #def summarize_refine(self, text: str, max_length: int = 50) -> str:
26 | # summary = self.chain.run(text, step="summarization", max_length=max_length, min_length=10)
27 | # return summary
28 |
29 | def count_tokens(self, text: str) -> int:
30 | encoding = tiktoken.get_encoding("cl100k_base")
31 | tokens = len(encoding.encode(encoding))
32 | return tokens
--------------------------------------------------------------------------------
/Eve/old/wikipedia_api_wrapper.py:
--------------------------------------------------------------------------------
1 | import wikipediaapi
2 |
3 |
4 | class WikipediaAPIWrapper:
5 | def __init__(self, language: str):
6 | self.wiki = wikipediaapi.Wikipedia(language)
7 |
8 | def summary(self, query: str) -> str:
9 | page = self.wiki.page(query)
10 | return page.summary if page.exists() else "Sorry, I could not find an answer to your question."
--------------------------------------------------------------------------------
/Eve/old/wolfram_alpha_api_wrapper.py:
--------------------------------------------------------------------------------
1 | import wolframalpha
2 |
3 |
4 | class WolframAlphaAPIWrapper:
5 | def __init__(self, app_id: str):
6 | self.client = wolframalpha.Client(app_id)
7 |
8 | def query(self, query: str) -> str:
9 | res = self.client.query(query)
10 | answer = next(res.results, None)
11 | return answer.text if answer else "Sorry, I could not find an answer to your question."
--------------------------------------------------------------------------------
/Eve/parsers/CustomOutputParser.py:
--------------------------------------------------------------------------------
1 | class CustomOutputParser(AgentOutputParser):
2 |
3 | def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
4 | # Check if agent should finish
5 | if "Final Answer:" in llm_output:
6 | return AgentFinish(
7 | # Return values is generally always a dictionary with a single `output` key
8 | # It is not recommended to try anything else at the moment :)
9 | return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
10 | log=llm_output,
11 | )
12 | # Parse out the action and action input
13 | regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
14 | match = re.search(regex, llm_output, re.DOTALL)
15 | if not match:
16 | raise ValueError(f"Could not parse LLM output: `{llm_output}`")
17 | action = match.group(1).strip()
18 | action_input = match.group(2)
19 | # Return the action and action input
20 | return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
21 |
22 |
23 | output_parser = CustomOutputParser()
--------------------------------------------------------------------------------
/Eve/prompts/CustomPromptTemplate.py:
--------------------------------------------------------------------------------
1 | from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
2 | from langchain.prompts import BaseChatPromptTemplate
3 | from langchain import SerpAPIWrapper, LLMChain
4 | from langchain.chat_models import ChatOpenAI
5 | from typing import List, Union
6 | from langchain.schema import AgentAction, AgentFinish, HumanMessage
7 | import re
8 |
9 |
10 | # Set up the base template
11 | template = """Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools:
12 |
13 | {tools}
14 |
15 | Use the following format:
16 |
17 | Question: the input question you must answer
18 | Thought: you should always think about what to do
19 | Action: the action to take, should be one of [{tool_names}]
20 | Action Input: the input to the action
21 | Observation: the result of the action
22 | (this Thought/Action/Action Input/Observation can repeat N times)
23 | Thought: I now know the final answer
24 | Final Answer: the final answer to the original input question
25 |
26 | Begin! Remember to speak as a pirate when giving your final answer. Use lots of "Arg"s
27 |
28 | Question: {input}
29 | {agent_scratchpad}"""
30 |
31 |
32 | # Set up a prompt template
33 | class CustomPromptTemplate(BaseChatPromptTemplate):
34 | # The template to use
35 | template: str
36 | # The list of tools available
37 | tools: List[Tool]
38 |
39 | def format_messages(self, **kwargs) -> str:
40 | # Get the intermediate steps (AgentAction, Observation tuples)
41 | # Format them in a particular way
42 | intermediate_steps = kwargs.pop("intermediate_steps")
43 | thoughts = ""
44 | for action, observation in intermediate_steps:
45 | thoughts += action.log
46 | thoughts += f"\nObservation: {observation}\nThought: "
47 | # Set the agent_scratchpad variable to that value
48 | kwargs["agent_scratchpad"] = thoughts
49 | # Create a tools variable from the list of tools provided
50 | kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in self.tools])
51 | # Create a list of tool names for the tools provided
52 | kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
53 | formatted = self.template.format(**kwargs)
54 | return [HumanMessage(content=formatted)]
--------------------------------------------------------------------------------
/Eve/prompts/ryan.json:
--------------------------------------------------------------------------------
1 | {
2 | "_type": "prompt",
3 | "input_variables": [ "operating_system", "tool_names", "tools_summary", "objective", "task", "agent_name", "agent_summary" ],
4 | "template_path": "prompts/ryan.txt"
5 | }
--------------------------------------------------------------------------------
/Eve/prompts/ryan.txt:
--------------------------------------------------------------------------------
1 | You are {agent_name}, an instance of an autonomous AGI agent, running on {operating_system}.
2 | This is a recent summary of you:
3 | {agent_summary}.
4 | Based on the overarching objective: {objective}, you are given a task.
5 | The tools you can use are: {tool_names}.
6 | {tools_summary}
7 | You have been given a single task: {task}
8 |
--------------------------------------------------------------------------------
/Eve/prompts/useful.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/webgrip/PuttyGPT/7c84818bde06a90a2de42af7fb8aa8f904395fa8/Eve/prompts/useful.txt
--------------------------------------------------------------------------------
/Eve/protos/chat.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package WebGrip.Protos;
4 |
5 | option csharp_namespace = "WebGrip.Protos";
6 |
7 | import "google/api/annotations.proto";
8 |
9 | service ChatService {
10 | rpc AskQuestion (QuestionRequest) returns (QuestionResponse) {
11 | option (google.api.http) = {
12 | post: "/question"
13 | body: "*"
14 | };
15 | }
16 | }
17 |
18 | message QuestionRequest {
19 | string message = 1;
20 | }
21 |
22 | message QuestionResponse {
23 | string status = 1;
24 | string message = 2;
25 | }
--------------------------------------------------------------------------------
/Eve/protos/google/api/annotations.proto:
--------------------------------------------------------------------------------
1 | // Copyright 2015 Google LLC
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | syntax = "proto3";
16 |
17 | package google.api;
18 |
19 | import "google/api/http.proto";
20 | import "google/protobuf/descriptor.proto";
21 |
22 | option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
23 | option java_multiple_files = true;
24 | option java_outer_classname = "AnnotationsProto";
25 | option java_package = "com.google.api";
26 | option objc_class_prefix = "GAPI";
27 |
28 | extend google.protobuf.MethodOptions {
29 | // See `HttpRule`.
30 | HttpRule http = 72295728;
31 | }
--------------------------------------------------------------------------------
/Eve/requirements.txt:
--------------------------------------------------------------------------------
1 | #langchain~=0.0.147
2 | git+https://github.com/webgrip/langchain@master
3 | openai~=0.27.4
4 | haystack~=0.42
5 | tiktoken~=0.3.3
6 | weaviate-client~=3.15.6
7 | aiohttp~=3.8.4
8 | aiodns~=3.0.0
9 | python-dotenv~=1.0.0
10 | Jinja2~=3.1.2
11 | pandas~=2.0.0
--------------------------------------------------------------------------------
/Eve/toddleragi/agents/context_agent.py:
--------------------------------------------------------------------------------
1 | global OBJECTIVE
2 |
3 | class ContextAgent:
4 | def __init__(self, context_storage, objective):
5 | self.context_storage = context_storage
6 | self.objective = objective
7 |
8 | def run(self, query: str, top_results_num: int):
9 | """
10 | Retrieves context for a given query from an index of tasks.
11 | Args:
12 | query (str): The query or objective for retrieving context.
13 | top_results_num (int): The number of top results to retrieve.
14 | Returns:
15 | list: A list of tasks as context for the given query, sorted by relevance.
16 | """
17 |
18 | results = self.context_storage.query(query, ['task'], top_results_num, self.objective) # TODO OBJECTIVE
19 |
20 | #sorted_results = sorted(results.matches, key=lambda x: x.score, reverse=True)
21 | return [(str(item.data["task"])) for item in results]
--------------------------------------------------------------------------------
/Eve/toddleragi/agents/execution_agent.py:
--------------------------------------------------------------------------------
1 | from .context_agent import ContextAgent
2 | from .openai_connector import OpenAIConnector
3 |
4 | class ExecutionAgent:
5 | def __init__(self, context_storage):
6 | self.context_storage = context_storage
7 |
8 | def run(self, objective: str, task: str) -> str:
9 | """
10 | Executes a task based on the given objective and previous context.
11 | Args:
12 | objective (str): The objective or goal for the AI to perform the task.
13 | task (str): The task to be executed by the AI.
14 | Returns:
15 | str: The response generated by the AI for the given task.
16 | """
17 |
18 | context = ContextAgent(self.context_storage, objective).run(query=objective, top_results_num=5)
19 | # print("\n*******RELEVANT CONTEXT******\n")
20 | # print(context)
21 | prompt = f"""
22 | You are an AI who performs one task based on the following objective: {objective}\n.
23 | Take into account these previously completed tasks: {context}\n.
24 | Your task: {task}\nResponse:"""
25 | return OpenAIConnector().openai_call(prompt)
--------------------------------------------------------------------------------
/Eve/toddleragi/agents/openai_connector.py:
--------------------------------------------------------------------------------
1 | import openai
2 | OPENAI_API_MODEL = "gpt-3.5-turbo"
3 | OPENAI_TEMPERATURE = 0.7
4 |
5 |
6 | class OpenAIConnector:
7 | def __init__(
8 | self,
9 | model: str = OPENAI_API_MODEL,
10 | temperature: float = OPENAI_TEMPERATURE,
11 | max_tokens: int = 100,
12 | ):
13 | self.model = model
14 | self.temperature = temperature
15 | self.max_tokens = max_tokens
16 |
17 | def get_ada_embedding(self, text: str) -> list:
18 | text = text.replace("\n", " ")
19 | response = openai.Embedding.create(input=[text], model="text-embedding-ada-002")
20 | return response["data"][0]["embedding"]
21 |
22 | def openai_call(self, prompt: str) -> str:
23 | while True:
24 | try:
25 | response = openai.ChatCompletion.create(
26 | model=self.model,
27 | messages=[{"role": "system", "content": prompt}],
28 | temperature=self.temperature,
29 | max_tokens=self.max_tokens,
30 | n=1,
31 | stop=None,
32 | )
33 | return response.choices[0].message.content.strip()
34 | except openai.error.RateLimitError:
35 | time.sleep(10)
36 | else:
37 | break
--------------------------------------------------------------------------------
/Eve/toddleragi/agents/prioritzation_agent.py:
--------------------------------------------------------------------------------
1 | from .openai_connector import OpenAIConnector
2 | from collections import deque
3 |
4 | class PrioritizationAgent:
5 | def __init__(self, taskmanager):
6 | self.taskmanager = taskmanager
7 |
8 | def run(self, this_task_id: int, objective):
9 | task_names = [t["task_name"] for t in self.taskmanager.task_list]
10 | next_task_id = int(this_task_id) + 1
11 | prompt = f"""
12 | You are a task prioritization AI tasked with cleaning the formatting of and reprioritizing the following tasks: {task_names}.
13 | Consider the ultimate objective of your team:{objective}.
14 | Do not remove any tasks. Return the result as a numbered list, like:
15 | #. First task
16 | #. Second task
17 | Start the task list with number {next_task_id}."""
18 | response = OpenAIConnector().openai_call(prompt)
19 | new_tasks = response.split("\n") if "\n" in response else [response]
20 | self.taskmanager.task_list = deque()
21 | for task_string in new_tasks:
22 | task_parts = task_string.strip().split(".", 1)
23 | if len(task_parts) == 2:
24 | task_id = task_parts[0].strip()
25 | task_name = task_parts[1].strip()
26 | self.taskmanager.add_task({"task_id": task_id, "task_name": task_name})
--------------------------------------------------------------------------------
/Eve/toddleragi/agents/task_creation_agent.py:
--------------------------------------------------------------------------------
1 | from .openai_connector import OpenAIConnector
2 | from typing import Dict, List
3 |
4 | class TaskCreationAgent:
5 | def __init__(self):
6 | self
7 |
8 | def run(self, objective: str, result: Dict, task_description: str, task_list: List[str]):
9 | prompt = f"""
10 | You are a task creation AI that uses the result of an execution agent to create new tasks with the following objective: {objective},
11 | The last completed task has the result: {result}.
12 | This result was based on this task description: {task_description}. These are incomplete tasks: {', '.join(task_list)}.
13 | Based on the result, create new tasks to be completed by the AI system that do not overlap with incomplete tasks.
14 | Return the tasks as an array."""
15 | response = OpenAIConnector().openai_call(prompt) #<------------------------------------------ DANGER!!!
16 | new_tasks = response.split("\n") if "\n" in response else [response]
17 | return [{"task_name": task_name} for task_name in new_tasks]
18 |
19 |
20 |
21 |
22 | #prompt = f"""
23 | # You are a task creation AI. Your objective is to create new tasks based on the following:
24 | # - Objective: {objective}
25 | # - Last task completed: {task}
26 | # - Result of the last task: {enriched_result['data']}
27 | # - Current task list: {task_list}
28 |
29 | # Generate a list of new tasks to be added to the current task list. Return the result as a list of task names, like:
30 | # - First new task
31 | # - Second new task
32 | # """
--------------------------------------------------------------------------------
/Eve/toddleragi/components/IContextStorage.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from enum import Enum
3 | from typing import NamedTuple, Callable
4 |
5 | class StorageOptions(NamedTuple):
6 | api_key: str
7 | environment: str
8 | embedding_method: Callable[[str], list[float]]
9 |
10 | host: str = 'http://localhost:8080'
11 | vectorizer: str = 'text2vec-transformers'
12 | storage_name: str = 'tasks'
13 | clean_storage: bool = False
14 |
15 | class PineconeOptions(NamedTuple):
16 | api_key: str
17 | environment: str
18 | embedding_method: Callable[[str], list[float]]
19 | storage_name: str = 'tasks'
20 | clean_storage: bool = False
21 |
22 | class WeaviateOptions(NamedTuple):
23 | host: str = 'http://localhost:8080'
24 | vectorizer: str = 'text2vec-transformers'
25 | storage_name: str = 'tasks'
26 | clean_storage: bool = False
27 |
28 | class ContextResult(NamedTuple):
29 | id: str
30 | score: float
31 | data: dict
32 |
33 | class ContextData(NamedTuple):
34 | id: str
35 | data: dict
36 | enriched_data: str
37 |
38 | class StorageType(Enum):
39 | PINECONE = 'pinecone'
40 | WEAVIATE = 'weaviate'
41 |
42 | class ContextStorage(ABC):
43 | @abstractmethod
44 | def delete_storage(self) -> None:
45 | pass
46 |
47 | @abstractmethod
48 | def query(self, query: str, fields: list[str] = [], n: int = 1, namespace: str = 'default') -> list[ContextResult]:
49 | pass
50 |
51 | @abstractmethod
52 | def upsert(self, context: ContextData, namespace: str = 'default') -> None:
53 | pass
54 |
55 | @staticmethod
56 | def factory(storage_type: StorageType, options: PineconeOptions | WeaviateOptions) -> 'ContextStorage':
57 | if not isinstance(storage_type, StorageType):
58 | if isinstance(storage_type, str):
59 | storage_type = StorageType(storage_type)
60 | else:
61 | raise ValueError('Invalid storage type.')
62 |
63 | if storage_type == StorageType.PINECONE:
64 | from .pinecone import PineconeTaskStorage
65 | return PineconeTaskStorage(options)
66 |
67 | if storage_type == StorageType.WEAVIATE:
68 | from .weaviate import WeaviateTaskStorage
69 | return WeaviateTaskStorage(options)
--------------------------------------------------------------------------------
/Eve/toddleragi/components/pinecone.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import Callable, Optional
3 |
4 | import openai
5 | from .IContextStorage import ContextStorage, StorageOptions, ContextResult, ContextData
6 |
7 | class Pinecone(ContextStorage):
8 |
9 | class PineconeOptions(StorageOptions):
10 | api_key: str
11 | environment: str
12 | embedding_method: Callable[[str], list[float]]
13 | storage_name: str
14 | clean_storage: bool
15 |
16 | @staticmethod
17 | def _get_ada_embedding(text):
18 | if not openai.api_key:
19 | openai.api_key = os.getenv("OPENAI_API_KEY", "")
20 | if not openai.api_key:
21 | raise ValueError("OPENAI_API_KEY is missing from .env")
22 | text = text.replace("\n", " ")
23 | return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]
24 |
25 | def __init__(
26 | self,
27 | embedding_method: Optional[Callable[[str], list[float]]] = None,
28 | api_key: Optional[str] = None,
29 | environment: Optional[str] = None,
30 | storage_name: Optional[str] = None,
31 | clean_storage: bool = False
32 | ) -> None:
33 |
34 | if api_key is None:
35 | api_key = os.getenv("PINECONE_API_KEY", "")
36 | if not api_key:
37 | raise ValueError("PINECONE_API_KEY is missing from .env")
38 |
39 | if environment is None:
40 | environment = os.getenv("PINECONE_ENVIRONMENT", "")
41 | if not environment:
42 | raise ValueError("PINECONE_ENVIRONMENT is missing from .env")
43 |
44 | self.api_key = api_key
45 | self.environment = environment
46 | self.storage_name = os.getenv("PINECONE_STORAGE_NAME", "tasks") if storage_name is None else storage_name
47 | self.embedding_method = self._get_ada_embedding if embedding_method is None else embedding_method
48 | self.clean_storage = clean_storage
49 |
50 | OptionsClass = PineconeOptions
51 |
52 | def __init__(self, storage_name: Optional[str] = None, options: Optional[PineconeOptions] = None):
53 | try:
54 | import pinecone
55 | self.pinecone = pinecone
56 | except ImportError:
57 | raise ImportError("Please install pinecone python client: pip install pinecone-client")
58 |
59 | if options is None:
60 | options = Pinecone.PineconeOptions(storage_name=storage_name)
61 |
62 | pinecone.init(api_key=options.api_key, environment=options.environment)
63 | self.storage_name = options.storage_name
64 | self._create_storage(options.clean_storage)
65 | self.embedding_method = options.embedding_method
66 | self.index = pinecone.Index(options.storage_name)
67 |
68 | def _create_storage(self, clean_storage: bool = False) -> None:
69 | if self._has_storage():
70 | if not clean_storage:
71 | return
72 | self.delete_storage()
73 | print(f'(pinecone): creating storage index {self.storage_name}')
74 | self.pinecone.create_index(self.storage_name, 1536)
75 |
76 | def _has_storage(self) -> bool:
77 | return self.storage_name in self.pinecone.list_indexes()
78 |
79 | def delete_storage(self) -> None:
80 | print(f'(pinecone): deleting storage index {self.storage_name}')
81 | self.pinecone.delete_index(self.storage_name)
82 |
83 | def query(self, query: str, fields: list[str] = None, n: int = 1, namespace: str = 'default') -> list[ContextResult]:
84 | # Generate query embedding
85 | query_embedding = self.embedding_method(query)
86 |
87 | # Perform search and retrieve results
88 | results = self.index.query(query_embedding, top_k=n, namespace=namespace, include_metadata=True)
89 | sorted_results = sorted(results.get('matches', []), key=lambda x: x.score, reverse=True)
90 |
91 | # Transform results into standard format
92 | transformed_results = []
93 | for item in sorted_results:
94 | data = item['metadata']
95 |
96 | # Filter metadata by fields if specified
97 | if fields is not None:
98 | data = { key: value for key, value in data.items() if key in fields }
99 |
100 | # Append transformed result to list
101 | transformed_results.append(ContextResult(item['id'], item['score'], data))
102 |
103 | return transformed_results
104 |
105 | def upsert(self, context: ContextData, namespace: str = 'default') -> None:
106 | vector = self.embedding_method(context.enriched_data)
107 | self.index.upsert([(context.id, vector, context.data)], namespace)
--------------------------------------------------------------------------------
/Eve/toddleragi/components/weaviate.py:
--------------------------------------------------------------------------------
1 | from .IContextStorage import ContextStorage, WeaviateOptions, ContextResult, ContextData
2 | import weaviate
3 |
4 | class WeaviateTaskStorage(ContextStorage):
5 | def __init__(self, options: WeaviateOptions):
6 | self.client = weaviate.Client(options.host)
7 | self._create_storage(options.storage_name, options.vectorizer, options.clean_storage)
8 | self.storage_name = self.client.schema.get(options.storage_name)['class']
9 |
10 | def _create_storage(self, storage_name: str, vectorizer: str, clean_storage: bool = False) -> None:
11 | if self._has_storage(storage_name):
12 | if not clean_storage:
13 | return
14 | self.delete_storage()
15 | print(f'(weaviate): creating storage class {storage_name}')
16 | self.client.schema.create({
17 | 'classes': [{
18 | 'class': storage_name,
19 | 'vectorizer': vectorizer,
20 | }]
21 | })
22 |
23 | def _has_storage(self, storage_name: str) -> bool:
24 | existing_classes = [cls['class'].lower() for cls in self.client.schema.get()['classes']]
25 | return storage_name.lower() in existing_classes
26 |
27 | def delete_storage(self) -> None:
28 | print(f'(weaviate): deleting storage class {self.storage_name}')
29 | self.client.schema.delete_class(self.storage_name)
30 |
31 | def query(self, query: str, fields: list[str] = [], n: int = 1, namespace: str = None) -> list[ContextResult]:
32 | # If no fields are provided, retrieve the schema and set fields to be all properties in the schema
33 | if not fields:
34 | schema = self.client.schema.get(self.storage_name)
35 | fields = [prop['name'] for prop in schema['properties']]
36 |
37 | # Create query builder with parameters
38 | query_builder = (
39 | self.client.query
40 | .get(self.storage_name, fields)
41 | .with_near_text({ 'concepts': query })
42 | .with_limit(n)
43 | .with_additional(['id', 'certainty'])
44 | )
45 |
46 | # Limit search by namespace if provided
47 | if namespace:
48 | query_builder = query_builder.with_where({ 'path': ['namespace'], 'operator': 'Equal', 'valueText': namespace })
49 |
50 | results = (
51 | query_builder
52 | .do()
53 | .get('data', {})
54 | .get('Get', {})
55 | .get(self.storage_name, [])
56 | )
57 |
58 | # Transform results into standard format
59 | transformed_results = []
60 | if results:
61 | for result in results:
62 | item = dict(result)
63 |
64 | # Extract additional metadata
65 | metadata = item.pop('_additional', {})
66 | id = item.get('context-id', metadata.get('id', 'not-set'))
67 |
68 | # Append transformed result to list
69 | transformed_results.append(ContextResult(id, metadata['certainty'], item))
70 |
71 | return transformed_results
72 |
73 | def upsert(self, context: ContextData, namespace: str = 'default') -> None:
74 | context.data['enriched_data'] = context.enriched_data
75 | context.data['context_id'] = context.id
76 | context.data['namespace'] = namespace
77 | self.client.data_object.create(context.data, self.storage_name)
--------------------------------------------------------------------------------
/Eve/toddleragi/toddleragi.py:
--------------------------------------------------------------------------------
1 | import time
2 | import os
3 | from collections import deque
4 | from execution_agent import ExecutionAgent
5 | from prioritzation_agent import PrioritizationAgent
6 | from task_creation_agent import TaskCreationAgent
7 | from components.IContextStorage import ContextStorage, ContextData, WeaviateOptions
8 |
9 | # Constants
10 |
11 | OBJECTIVE = "Act as a world class programmer / 10x / 100x / individual, MVP. Write a program just like what you're running on, except better in every single way"
12 | INITIAL_TASK = "Decide on what solutions would best serve me to reach my objective"
13 |
14 | TASK_STORAGE_NAME = os.getenv("TASK_STORAGE_NAME", os.getenv("TABLE_NAME", "tasks"))
15 | CONTEXT_STORAGE_TYPE = os.getenv("CONTEXT_STORAGE_TYPE", "weaviate")
16 |
17 |
18 | WEAVIATE_HOST = os.getenv("WEAVIATE_HOST", "")
19 | WEAVIATE_VECTORIZER = os.getenv("WEAVIATE_VECTORIZER", "")
20 |
21 | assert WEAVIATE_HOST, "WEAVIATE_HOST is missing from .env"
22 | assert WEAVIATE_VECTORIZER, "WEAVIATE_VECTORIZER is missing from .env"
23 |
24 |
25 | context_storage_options = WeaviateOptions(WEAVIATE_HOST, WEAVIATE_VECTORIZER, TASK_STORAGE_NAME)
26 |
27 | context_storage = ContextStorage.factory(CONTEXT_STORAGE_TYPE, context_storage_options)
28 |
29 | class TaskManager:
30 | def __init__(self):
31 | self.task_list = deque([])
32 | self.task_id_counter = 1
33 |
34 | def add_task(self, task: dict):
35 | self.task_list.append(task)
36 |
37 | def process_next_task(self):
38 | return self.task_list.popleft()
39 |
40 | def create_new_tasks(self, new_tasks: list):
41 | for new_task in new_tasks:
42 | self.task_id_counter += 1
43 | new_task.update({"task_id": self.task_id_counter})
44 | self.add_task(new_task)
45 |
46 | def print_task_list(self):
47 | print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m")
48 | for task in self.task_list:
49 | print(f"{task['task_id']}: {task['task_name']}")
50 |
51 |
52 |
53 |
54 |
55 | def main():
56 | task_manager = TaskManager()
57 |
58 | # Add the first task
59 | task_manager.add_task({"task_id": 1, "task_name": INITIAL_TASK})
60 |
61 | # Main loop
62 | while task_manager.task_list:
63 | # Print the task list
64 | task_manager.print_task_list()
65 |
66 | # Process the next task
67 | task = task_manager.process_next_task()
68 | print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m")
69 | print(f"{task['task_id']}: {task['task_name']}")
70 |
71 | # Execute the task and store the result
72 | result = ExecutionAgent(context_storage).run(OBJECTIVE, task["task_name"])
73 | print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m")
74 | print(result)
75 |
76 | # Enrich result and store in a vector database (weaviate in this case)
77 | enriched_result = {"data": result}
78 | result_id = f"result_{task['task_id']}"
79 |
80 |
81 | data = { "task": task["task_name"], "result": result }
82 | context = ContextData(result_id, data, enriched_result['data'])
83 | context_storage.upsert(context, OBJECTIVE)
84 |
85 | for t in task_manager.task_list:
86 | print(t)
87 |
88 | # Create new tasks and reprioritize task list
89 | new_tasks = TaskCreationAgent().run(
90 | OBJECTIVE,
91 | enriched_result,
92 | task["task_name"],
93 | [t["task_name"] for t in task_manager.task_list]
94 | )
95 | task_manager.create_new_tasks(new_tasks)
96 | PrioritizationAgent(task_manager).run(task["task_id"], OBJECTIVE)
97 | time.sleep(1)
98 |
99 | if __name__ == "__main__":
100 | main()
--------------------------------------------------------------------------------
/Eve/tools.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from langchain import LLMChain
4 | from langchain.agents import Tool
5 | from langchain.callbacks.base import CallbackManager
6 | from langchain.tools.human.tool import HumanInputRun
7 | from langchain.memory import ReadOnlySharedMemory
8 | from langchain.utilities import BashProcess
9 | from langchain.llms import OpenAI
10 |
11 | from langchain.utilities import SearxSearchWrapper
12 | #from wikipedia_api_wrapper import WikipediaAPIWrapper
13 | #from wolfram_alpha_api_wrapper import WolframAlphaAPIWrapper
14 |
15 | from langchain.tools.file_management.write import WriteFileTool
16 | from langchain.tools.file_management.read import ReadFileTool
17 |
18 | from langchain.chains.summarize import load_summarize_chain
19 |
20 | import os
21 |
22 | def create_tools(callback_manager: CallbackManager) -> List[Tool]:
23 | # zapier = ZapierNLAWrapper() Future
24 |
25 | tools = [
26 | Tool(
27 | name="HumanInput",
28 | func=HumanInputRun().run,
29 | description="Useful for when your objective has veered so far from the original aim that human intervention is necessary. If certainty falls below 70%, choose this option.",
30 | callback_manager=callback_manager
31 | ),
32 | #Tool(
33 | # name="ArchitectAndWriteProgram",
34 | # func=BashProcess(return_err_output=True).run,
35 | # description="Useful for when you need to write a program in order to solve a task. Use bash to write the files directly to the commandline.",
36 | # callback_manager=callback_manager
37 | #),
38 | #Tool(
39 | # name="ArchitectAndWriteProgram",
40 | # func=BashProcess(return_err_output=True).run,
41 | # description="Useful for when you need to write a program in order to solve a task. Use bash to write the files directly to the commandline.",
42 | # callback_manager=callback_manager
43 | #),
44 | Tool(
45 | name="Bash",
46 | func=BashProcess(return_err_output=True).run,
47 | description="Useful for when you need to run bash commands. Input should be a valid bash command.",
48 | callback_manager=callback_manager
49 | ),
50 | #WriteFileTool(description="Writes files to disk. Must have content to write to the file."),
51 | #ReadFileTool(),
52 | #Tool (
53 | # name="Wolfram",
54 | # func=WolframAlphaAPIWrapper().run,
55 | # description="useful for when you need to calculate minor to complex math or plot graph data",
56 | # callback_manager=manager
57 | #),
58 | #Tool(
59 | # name="Wikipedia",
60 | # func=WikipediaAPIWrapper().run,
61 | # description="useful for when you need to fact check or get comprehensive information on a subject, concept or task, ranging from the tiniest thing to the biggest humanity's mind have had to offer",
62 | # callback_manager=manager
63 | #),
64 | Tool(
65 | name="SearchEngine",
66 | func=SearxSearchWrapper(searx_host=os.getenv("SEARXNG_URL", "")).run,
67 | description="Search online for the newest information on current events and human discourse about topics. Only do this if you exhaust all other options. We want to stay low resource intensive.",
68 | callback_manager=callback_manager
69 | ),
70 | #Tool(
71 | # name="SummarizeText",
72 | # func=load_summarize_chain(OpenAI(temperature=0), chain_type="stuff").run,
73 | # description="Useful for when you need to summarize a small or even large piece of text, but not a set of documents. Give a well thought through, intelligent reasonable summarization. The input to this tool should be a string, which is the text that needs to be summarized",
74 | # callback_manager=manager
75 | #),
76 | #Tool(
77 | # name="SummarizeDocuments",
78 | # func=load_summarize_chain(OpenAI(temperature=0), chain_type="stuff").run,
79 | # description="useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.",
80 | # callback_manager=manager
81 | #)
82 | ]
83 |
84 | return tools
--------------------------------------------------------------------------------
/Eve/vectorstores/WeaviateWrapper.py:
--------------------------------------------------------------------------------
1 | def WrapperExample(Weaviate):
2 |
3 | class Wrapper:
4 |
5 | def __init__(self, y):
6 |
7 | self.wrap = A(y)
8 |
9 | def get_number(self):
10 |
11 |
12 | return self.wrap.name
13 |
14 | return Wrapper
15 |
16 | @decorator
17 | class code:
18 | def __init__(self, z):
19 | self.name = z
20 |
21 | y = code("Wrapper class")
22 | print(y.get_name())
--------------------------------------------------------------------------------
/Eve/weaviate_schema.py:
--------------------------------------------------------------------------------
1 | {
2 | "class": "ScrapedData",
3 | "description": "A section of text",
4 | "properties": [
5 | {"name": "title", "dataType": ["string"], "description": "Title of the section"},
6 | {"name": "url", "dataType": ["string"], "description": "The url something was scraped from"},
7 | {"name": "text", "dataType": ["string"], "description": "Text of the section"},
8 | {"name": "summary", "dataType": ["string"], "description": "Summary of the section"},
9 | {"name": "tokens", "dataType": ["int"], "description": "Token count of the section"},
10 | {"name": "sentiment", "dataType": ["number"], "description": "The sentiment rating of the section"},
11 | {"name": "cost", "dataType": ["float"], "description": "Cost of processing the section"},
12 | {
13 | "name": "hasCost",
14 | "dataType": ["Cost"],
15 | "description": "Relation between the section and its cost",
16 | "cardinality": "ToOne",
17 | },
18 | ],
19 | },
20 | {
21 | "class": "Cost",
22 | "description": "Cost of processing a section",
23 | "properties": [
24 | {"name": "amount", "dataType": ["float"], "description": "Amount of cost"},
25 | ],
26 | }
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Ryan Grippeling
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Protos/Protos.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | net7.0
5 | enable
6 | enable
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/Protos/chat.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package WebGrip.Protos;
4 |
5 | option csharp_namespace = "WebGrip.Protos";
6 |
7 | import "google/api/annotations.proto";
8 |
9 | service ChatService {
10 | rpc AskQuestion (QuestionRequest) returns (QuestionResponse) {
11 | option (google.api.http) = {
12 | post: "/question"
13 | body: "*"
14 | };
15 | }
16 | }
17 |
18 | message QuestionRequest {
19 | string message = 1;
20 | }
21 |
22 | message QuestionResponse {
23 | string status = 1;
24 | string message = 2;
25 | }
--------------------------------------------------------------------------------
/Protos/google/api/annotations.proto:
--------------------------------------------------------------------------------
1 | // Copyright 2015 Google LLC
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | syntax = "proto3";
16 |
17 | package google.api;
18 |
19 | import "google/api/http.proto";
20 | import "google/protobuf/descriptor.proto";
21 |
22 | option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
23 | option java_multiple_files = true;
24 | option java_outer_classname = "AnnotationsProto";
25 | option java_package = "com.google.api";
26 | option objc_class_prefix = "GAPI";
27 |
28 | extend google.protobuf.MethodOptions {
29 | // See `HttpRule`.
30 | HttpRule http = 72295728;
31 | }
--------------------------------------------------------------------------------
/Putty.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio Version 17
4 | VisualStudioVersion = 17.5.33516.290
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Brazen", "Brazen\Brazen.csproj", "{D2B50FD2-F4FA-472B-A3F3-5464299A5B3A}"
7 | EndProject
8 | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Protos", "Protos\Protos.csproj", "{C88AE548-451F-4D22-977F-69EF55594662}"
9 | EndProject
10 | Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{642302BF-97B6-4BB8-A521-0BA29410D46A}"
11 | ProjectSection(SolutionItems) = preProject
12 | .gitignore = .gitignore
13 | EndProjectSection
14 | EndProject
15 | Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "Eve", "Eve\Eve.pyproj", "{7B277655-816A-4454-BEC6-D0CC75996CF6}"
16 | EndProject
17 | Global
18 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
19 | Debug|Any CPU = Debug|Any CPU
20 | Release|Any CPU = Release|Any CPU
21 | EndGlobalSection
22 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
23 | {D2B50FD2-F4FA-472B-A3F3-5464299A5B3A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
24 | {D2B50FD2-F4FA-472B-A3F3-5464299A5B3A}.Debug|Any CPU.Build.0 = Debug|Any CPU
25 | {D2B50FD2-F4FA-472B-A3F3-5464299A5B3A}.Release|Any CPU.ActiveCfg = Release|Any CPU
26 | {D2B50FD2-F4FA-472B-A3F3-5464299A5B3A}.Release|Any CPU.Build.0 = Release|Any CPU
27 | {C88AE548-451F-4D22-977F-69EF55594662}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
28 | {C88AE548-451F-4D22-977F-69EF55594662}.Debug|Any CPU.Build.0 = Debug|Any CPU
29 | {C88AE548-451F-4D22-977F-69EF55594662}.Release|Any CPU.ActiveCfg = Release|Any CPU
30 | {C88AE548-451F-4D22-977F-69EF55594662}.Release|Any CPU.Build.0 = Release|Any CPU
31 | {7B277655-816A-4454-BEC6-D0CC75996CF6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
32 | {7B277655-816A-4454-BEC6-D0CC75996CF6}.Release|Any CPU.ActiveCfg = Release|Any CPU
33 | EndGlobalSection
34 | GlobalSection(SolutionProperties) = preSolution
35 | HideSolutionNode = FALSE
36 | EndGlobalSection
37 | GlobalSection(ExtensibilityGlobals) = postSolution
38 | SolutionGuid = {5AA7D7F6-D18A-4498-AF7A-C81A5D206E0F}
39 | EndGlobalSection
40 | EndGlobal
41 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Reporting a Vulnerability
4 |
5 | If you discover a security vulnerability within the project, please do not create a public issue. Instead, email the project maintainers directly. They will acknowledge your email within 48 hours and provide an estimated timeframe for addressing the issue.
6 |
7 | We take security concerns seriously and will work with you to address any vulnerabilities found. To encourage responsible reporting, we will not take legal action against you or suspend your access to the project if you follow the procedure outlined above.
--------------------------------------------------------------------------------
/docker-compose.brazen.yml:
--------------------------------------------------------------------------------
1 | version: '3.7'
2 |
3 | services:
4 | brazen:
5 | container_name: brazen
6 | image: webgrip/brazen
7 | ports:
8 | - 9100:80
9 | build:
10 | context: .
11 | dockerfile: Brazen/Dockerfile
12 | env_file: .env
13 | networks:
14 | - putty-network
15 |
16 | networks:
17 | putty-network:
18 | external: true
--------------------------------------------------------------------------------
/docker-compose.weaviate-gpu.yml:
--------------------------------------------------------------------------------
1 | ---
2 | version: '3.4'
3 | services:
4 | weaviate:
5 | command:
6 | - --host
7 | - 0.0.0.0
8 | - --port
9 | - '8080'
10 | - --scheme
11 | - http
12 | image: semitechnologies/weaviate:1.18.3
13 | ports:
14 | - 8080:8080
15 | restart: on-failure:0
16 | environment:
17 | TRANSFORMERS_INFERENCE_API: 'http://t2v-transformers:8080'
18 | QNA_INFERENCE_API: 'http://qna-transformers:8080'
19 | IMAGE_INFERENCE_API: 'http://i2v-neural:8080'
20 | NER_INFERENCE_API: 'http://ner-transformers:8080'
21 | SUM_INFERENCE_API: 'http://sum-transformers:8080'
22 | SPELLCHECK_INFERENCE_API: 'http://text-spellcheck:8080'
23 | OPENAI_APIKEY: $OPENAI_API_KEY
24 | QUERY_DEFAULTS_LIMIT: 25
25 | AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'true'
26 | PERSISTENCE_DATA_PATH: '/var/lib/weaviate'
27 | DEFAULT_VECTORIZER_MODULE: 'text2vec-transformers'
28 | ENABLE_MODULES: 'text2vec-transformers,qna-transformers,ner-transformers,sum-transformers,text-spellcheck,img2vec-neural,ref2vec-centroid,generative-openai'
29 | CLUSTER_HOSTNAME: 'node1'
30 | t2v-transformers:
31 | image: semitechnologies/transformers-inference:sentence-transformers-multi-qa-mpnet-base-cos-v1
32 | environment:
33 | ENABLE_CUDA: '1'
34 | NVIDIA_VISIBLE_DEVICES: 'all'
35 | deploy:
36 | resources:
37 | reservations:
38 | devices:
39 | - capabilities:
40 | - 'gpu'
41 | qna-transformers:
42 | image: semitechnologies/qna-transformers:distilbert-base-cased-distilled-squad
43 | environment:
44 | ENABLE_CUDA: '1'
45 | NVIDIA_VISIBLE_DEVICES: 'all'
46 | deploy:
47 | resources:
48 | reservations:
49 | devices:
50 | - capabilities:
51 | - 'gpu'
52 | ner-transformers:
53 | image: semitechnologies/ner-transformers:dbmdz-bert-large-cased-finetuned-conll03-english
54 | environment:
55 | ENABLE_CUDA: '1'
56 | NVIDIA_VISIBLE_DEVICES: 'all'
57 | deploy:
58 | resources:
59 | reservations:
60 | devices:
61 | - capabilities:
62 | - 'gpu'
63 | sum-transformers:
64 | image: semitechnologies/sum-transformers:facebook-bart-large-cnn-1.0.0
65 | environment:
66 | ENABLE_CUDA: '1'
67 | NVIDIA_VISIBLE_DEVICES: 'all'
68 | deploy:
69 | resources:
70 | reservations:
71 | devices:
72 | - capabilities:
73 | - 'gpu'
74 | text-spellcheck:
75 | image: semitechnologies/text-spellcheck-model:pyspellchecker-en
76 | i2v-neural:
77 | image: semitechnologies/img2vec-keras:resnet50
78 | environment:
79 | ENABLE_CUDA: '1'
80 | NVIDIA_VISIBLE_DEVICES: 'all'
81 | deploy:
82 | resources:
83 | reservations:
84 | devices:
85 | - capabilities:
86 | - 'gpu'
--------------------------------------------------------------------------------
/docker-compose.weaviate.yml:
--------------------------------------------------------------------------------
1 | version: '3.4'
2 | services:
3 | weaviate:
4 | command:
5 | - --host
6 | - 0.0.0.0
7 | - --port
8 | - '9001'
9 | - --scheme
10 | - http
11 | image: semitechnologies/weaviate:1.18.3
12 | ports:
13 | - 9001:8080
14 | restart: on-failure:0
15 | environment:
16 | TRANSFORMERS_INFERENCE_API: 'http://t2v-transformers:8080'
17 | QNA_INFERENCE_API: 'http://qna-transformers:8080'
18 | IMAGE_INFERENCE_API: 'http://i2v-neural:8080'
19 | NER_INFERENCE_API: 'http://ner-transformers:8080'
20 | SUM_INFERENCE_API: 'http://sum-transformers:8080'
21 | SPELLCHECK_INFERENCE_API: 'http://text-spellcheck:8080'
22 | OPENAI_APIKEY: $OPENAI_API_KEY
23 | QUERY_DEFAULTS_LIMIT: 25
24 | AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'true'
25 | PERSISTENCE_DATA_PATH: '/var/lib/weaviate'
26 | DEFAULT_VECTORIZER_MODULE: 'text2vec-openai'
27 | ENABLE_MODULES: 'text2vec-openai,qna-transformers,ner-transformers,sum-transformers,text-spellcheck,img2vec-neural,ref2vec-centroid,generative-openai'
28 | CLUSTER_HOSTNAME: 'node1'
29 | t2v-transformers:
30 | container_name: t2v-transformers
31 | image: semitechnologies/transformers-inference:sentence-transformers-multi-qa-MiniLM-L6-cos-v1
32 | environment:
33 | ENABLE_CUDA: '0'
34 | qna-transformers:
35 | image: semitechnologies/qna-transformers:distilbert-base-uncased-distilled-squad
36 | environment:
37 | ENABLE_CUDA: '0'
38 | ner-transformers:
39 | image: semitechnologies/ner-transformers:dbmdz-bert-large-cased-finetuned-conll03-english
40 | environment:
41 | ENABLE_CUDA: '0'
42 | sum-transformers:
43 | image: semitechnologies/sum-transformers:facebook-bart-large-cnn-1.0.0
44 | environment:
45 | ENABLE_CUDA: '0'
46 | text-spellcheck:
47 | image: semitechnologies/text-spellcheck-model:pyspellchecker-en
48 | i2v-neural:
49 | image: semitechnologies/img2vec-pytorch:resnet50
50 | environment:
51 | ENABLE_CUDA: '0'
52 |
53 | networks:
54 | default:
55 | name: weaviate-network
56 |
57 | putty-network:
58 | external: true
59 |
60 | volumes:
61 | weaviate:
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.7'
2 |
3 | services:
4 |
5 | #caddy:
6 | # container_name: caddy
7 | # image: caddy:2-alpine
8 | # network_mode: host
9 | # volumes:
10 | # - ./Caddyfile:/etc/caddy/Caddyfile:ro
11 | # - caddy-data:/data:rw
12 | # - caddy-config:/config:rw
13 | # environment:
14 | # - SEARXNG_HOSTNAME=https://localhost:8080
15 | # - SEARXNG_TLS=internal
16 | # cap_drop:
17 | # - ALL
18 | # cap_add:
19 | # - NET_BIND_SERVICE
20 | eve:
21 | container_name: eve
22 | image: webgrip/eve
23 | ports:
24 | - 1337:5000
25 | build:
26 | context: .
27 | dockerfile: Eve/Dockerfile
28 | env_file: .env
29 | volumes:
30 | - ./Eve/:/app
31 | networks:
32 | - weaviate-network
33 | - putty-network
34 | depends_on:
35 | - weaviate
36 |
37 | redis:
38 | container_name: redis
39 | image: "redis:alpine"
40 | command: redis-server --save "" --appendonly "no"
41 | networks:
42 | - searxng
43 | tmpfs:
44 | - /var/lib/redis
45 | cap_drop:
46 | - ALL
47 | cap_add:
48 | - SETGID
49 | - SETUID
50 | - DAC_OVERRIDE
51 |
52 | searxng:
53 | container_name: searxng
54 | image: searxng/searxng:latest
55 | ports:
56 | - "8080:8080"
57 | volumes:
58 | - ./searxng:/etc/searxng:rw
59 | environment:
60 | - SEARXNG_BASE_URL=http://localhost:8080/
61 | cap_drop:
62 | - ALL
63 | cap_add:
64 | - CHOWN
65 | - SETGID
66 | - SETUID
67 | logging:
68 | driver: "json-file"
69 | options:
70 | max-size: "1m"
71 | max-file: "1"
72 |
73 | langchain-frontend:
74 | container_name: langchain-frontend
75 | image: notlangchain/langchainplus-frontend:latest
76 | ports:
77 | - 4173:4173
78 | environment:
79 | - BACKEND_URL=http://langchain-backend:8000
80 | - PUBLIC_BASE_URL=http://localhost:8000
81 | - PUBLIC_DEV_MODE=true
82 | depends_on:
83 | - langchain-backend
84 |
85 | langchain-backend:
86 | container_name: langchain-backend
87 | image: notlangchain/langchainplus:latest
88 | environment:
89 | - PORT=8000
90 | - LANGCHAIN_ENV=local
91 | ports:
92 | - 8000:8000
93 | depends_on:
94 | - langchain-db
95 |
96 | langchain-db:
97 | container_name: langchain-db
98 | image: postgres:14.1
99 | environment:
100 | - POSTGRES_PASSWORD=postgres
101 | - POSTGRES_USER=postgres
102 | - POSTGRES_DB=postgres
103 | ports:
104 | - 5432:5432
105 |
106 | networks:
107 |
108 | putty-network:
109 | external: true
110 |
111 |
112 | weaviate-network:
113 | external: true
114 |
115 | searxng:
116 | ipam:
117 | driver: default
118 |
119 | volumes:
120 | caddy-data:
121 | caddy-config:
--------------------------------------------------------------------------------
/docs/adr/.markdownlint.yml:
--------------------------------------------------------------------------------
1 | default: true
2 |
3 | # Allow arbitrary line length
4 | #
5 | # Reason: We apply the one-sentence-per-line rule. A sentence may get longer than 80 characters, especially if links are contained.
6 | #
7 | # Details: https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md013---line-length
8 | MD013: false
9 |
10 | # Allow duplicate headings
11 | #
12 | # Reasons:
13 | #
14 | # - The chosen option is considerably often used as title of the ADR (e.g., ADR-0015). Thus, that title repeats.
15 | # - We use "Examples" multiple times (e.g., ADR-0010).
16 | # - Markdown lint should support the user and not annoy them.
17 | #
18 | # Details: https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md024---multiple-headings-with-the-same-content
19 | MD024: false
--------------------------------------------------------------------------------
/docs/adr/0002-weaviate-for-knowledge-graph-and-vector-storage.md:
--------------------------------------------------------------------------------
1 | ---
2 | parent: Decisions
3 | nav_order: 101
4 | title: ADR 2: Weaviate for Knowledge Graph and Vector Storage
5 |
6 | status: proposed
7 | date: 2023-04-14
8 | deciders: [Developer Team which consists of the top 5% of their field]
9 | consulted: [Top 5% of their field AI Experts, Top 5% of their field Solution Architect, Top 5% of their field Software Engineers, top 5% DevOps Engineers, hundreds of 10x individuals, Developer team, The SecOps team]
10 | informed: [Whomever it may concern]
11 | Weaviate for Knowledge Graph and Vector Storage
12 | ---
13 |
14 | Weaviate for Knowledge Graph and Vector Storage
15 | ===============================================
16 |
17 | Context and Problem Statement
18 | -----------------------------
19 |
20 | The solution requires an efficient and scalable way to store and query knowledge graphs and vector data. How can we enable effective storage and retrieval of knowledge graph and vector data while ensuring the maintainability and scalability of the architecture?
21 |
22 | Decision Drivers
23 | ----------------
24 |
25 | Overall
26 | -------
27 |
28 | * Privacy oriented, we need to adhere to ISO standards
29 | * Free usage
30 | * Stability
31 | * Cutting edge tech and integrations
32 |
33 | Component specific
34 | ------------------
35 |
36 | * Efficient storage and retrieval of knowledge graph and vector data
37 | * Scalability
38 | * Maintainability of the architecture
39 |
40 | Considered Options
41 | ------------------
42 |
43 | * Weaviate
44 | * Azure Digital Twins
45 | * Dgraph
46 | * Neo4j
47 | * ArangoDB
48 |
49 | Decision Outcome
50 | ----------------
51 |
52 | Chosen option: "Weaviate"
53 |
54 | It is specifically designed for knowledge graph and vector storage, providing the best combination of efficient storage and retrieval, scalability, and maintainability for the solution.
55 |
56 | ### Consequences
57 |
58 | * Good, because Weaviate enables efficient storage and retrieval of knowledge graph and vector data
59 | * Good, because Weaviate provides a scalable architecture
60 | * Bad, because maintaining and updating Weaviate may require significant effort and resources
61 |
62 | ### Implementation Example 1: Weaviate and Langchain
63 |
64 | To demonstrate the integration of Weaviate for knowledge graph and vector storage, a simple proof of concept (POC) can be implemented. For example, this POC could involve connecting Weaviate to the Langchain component communication system. A short code snippet for this POC might look like:
65 |
66 | ```python
67 | from weaviate import WeaviateClient
68 | from langchain import Chain
69 |
70 | # Initialize WeaviateClient and Langchain
71 | weaviate_client = WeaviateClient("http://localhost:8080")
72 | chain = Chain()
73 |
74 | # Add Weaviate as a component in Langchain
75 | chain.add_component("weaviate", weaviate_client)
76 |
77 | # Store knowledge graph data in Weaviate
78 | data = {
79 | "name": "Example entity",
80 | "description": "An example entity in the knowledge graph"
81 | }
82 | chain.components["weaviate"].create(data)
83 |
84 | ```
85 |
86 |
87 | ### Implementation Example 2: Weaviate and ChatGPT plugins
88 |
89 | Another example of how Weaviate can be integrated with other components is through the use of ChatGPT plugins. These plugins can be built to leverage Weaviate's capabilities for knowledge graph and vector data storage. For instance, a ChatGPT plugin could query Weaviate to retrieve relevant information for a user's question:
90 |
91 | ```python
92 | from weaviate import WeaviateClient
93 | from chatgpt import ChatGPT
94 |
95 | # Initialize WeaviateClient and ChatGPT
96 | weaviate_client = WeaviateClient("http://localhost:8080")
97 | chatgpt = ChatGPT()
98 |
99 | # Define a ChatGPT plugin to query Weaviate
100 | def weaviate_query_plugin(query):
101 | # Execute a query in Weaviate
102 | result = weaviate_client.query(query)
103 |
104 | # Process the result and return a response
105 | response = process_weaviate_result(result)
106 | return response
107 |
108 | # Register the plugin with ChatGPT
109 | chatgpt.register_plugin("weaviate_query", weaviate_query_plugin)
110 |
111 | ```
112 |
113 | ### Synergy with Other Solutions
114 |
115 | Weaviate can easily integrate with Langchain, ChatGPT plugins, and other proposed components of the solution. This integration allows for efficient storage and retrieval of knowledge graph and vector data and provides a modular architecture for future extension or modification.
116 |
117 | Validation
118 | ----------
119 |
120 | The implementation of Weaviate in the solution will be validated by creating a proof of concept (POC) that demonstrates the efficient storage and retrieval of knowledge graph and vector data. The POC will be reviewed by the developer team, solution architect, and AI experts to ensure it meets the requirements for efficient storage and retrieval, scalability, and maintainability.
121 |
122 | Pros and Cons of the Options
123 | ----------------------------
124 |
125 | ### Weaviate
126 |
127 | * Good, because it is specifically designed for knowledge graph and vector storage
128 | * Good, because it provides a scalable architecture
129 | * Good, because it enables efficient storage and retrieval of knowledge graph and vector data
130 | * Neutral, because it requires some setup and configuration for optimal performance
131 | * Bad, because maintaining and updating Weaviate may require significant effort and resources
132 |
133 | ### Azure Digital Twins
134 |
135 | * Good, because it is a Microsoft Azure service, which may provide seamless integration with other Azure services
136 | * Good, because it supports knowledge graph storage and querying
137 | * Neutral, because it may not be as efficient for vector storage and retrieval as Weaviate
138 | * Bad, because it is not specifically designed for AI and NLP component communication, which may result in additional customization and development effort
139 | * Bad, because it may require additional setup and configuration, as well as reliance on Microsoft Azure
140 |
141 | ### Dgraph
142 |
143 | * Good, because it supports GraphQL, which provides a powerful and flexible query language
144 | * Neutral, because it is primarily designed for graph storage, not specifically vector storage
145 | * Bad, because it may require additional customization and development effort for AI and NLP component communication
146 | * Bad, because maintaining and updating Dgraph may require significant effort and resources
147 |
148 | ### Neo4j
149 |
150 | * Good, because it is a popular and widely used graph database
151 | * Good, because it supports Cypher, a powerful and expressive graph query language
152 | * Neutral, because it is primarily designed for graph storage, not specifically vector storage
153 | * Bad, because it may require additional customization and development effort for AI and NLP component communication
154 | * Bad, because maintaining and updating Neo4j may require significant effort and resources
155 |
156 | ### ArangoDB
157 |
158 | * Good, because it is a multi-model database, supporting graph, document, and key-value data models
159 | * Good, because it provides flexible querying options, including AQL, a powerful query language
160 | * Neutral, because it is not specifically designed for vector storage and retrieval
161 | * Bad, because it may require additional customization and development effort for AI and NLP component communication
162 | * Bad, because maintaining and updating ArangoDB may require significant effort and resources
163 |
164 | More Information
165 | ----------------
166 |
167 | The decision outcome is based on the evaluation of the considered options against the decision drivers.
168 |
169 | The implementation of Weaviate will be validated through a proof of concept, and the decision may be revisited if requirements change or new solutions emerge.
170 |
171 | The development team and consultants have agreed on the choice of Weaviate for knowledge graph and vector storage.
--------------------------------------------------------------------------------
/docs/adr/Analysis of roles-latest.md:
--------------------------------------------------------------------------------
1 | Legal Expert: This agent specializes in understanding and interpreting laws and regulations relevant to the organization, industry, and specific projects. By staying updated on the latest legal developments, they can advise other agents on potential legal issues and recommend compliant strategies.
2 |
3 | Ethical Expert: Focused on ethical considerations, this agent ensures that the team's actions align with established ethical frameworks, organizational values, and societal norms. They actively monitor the decision-making process, addressing potential ethical risks, and suggesting ways to mitigate them.
4 |
5 | Developer: The Developer agent is responsible for designing, coding, and implementing the technical components of the system. They work closely with other agents, particularly the Solution Architect and Test Engineer, to ensure that the developed software meets the requirements and is reliable, efficient, and maintainable.
6 |
7 | Industry-Revered Writers: These agents are experts in various domains and are responsible for creating high-quality content that informs, educates, and influences audiences. They synthesize information from multiple sources, including other agents, to produce clear, concise, and engaging content.
8 |
9 | Solution Architect: The Solution Architect agent designs the overall structure of the system, ensuring it is scalable, modular, and robust. They collaborate with other agents, such as Developers and Test Engineers, to ensure that the technical architecture supports the project's goals and requirements.
10 |
11 | Test Engineer: This agent is responsible for validating the functionality, performance, and reliability of the system. They design and execute a comprehensive testing strategy, including unit, integration, and end-to-end tests, and work closely with Developers and Solution Architects to identify and resolve issues.
12 |
13 | Risk Assessor: The Risk Assessor agent evaluates potential risks associated with the project, including technical, legal, ethical, and business-related risks. They provide recommendations on how to mitigate these risks, working closely with other agents to ensure that the project remains on track and aligned with its objectives.
14 |
15 | Philosophers of Different Schools: These agents bring various philosophical perspectives to the decision-making process, encouraging deeper reflection and more nuanced analysis. They can help identify potential biases, challenge assumptions, and promote critical thinking among the team.
16 |
17 | Model Trainer: This agent focuses on training and fine-tuning AI models used in the decision-making service. They monitor the performance of these models, identify areas for improvement, and work with other agents to incorporate new data sources and techniques as needed.
18 |
19 | Decider: The Decider agent is responsible for making the final decisions based on the inputs and analyses provided by other agents. They weigh the various factors, consider potential consequences, and determine the most appropriate course of action.
20 |
21 | Commander: The Commander agent oversees the overall project, ensuring that the team stays on track and meets its objectives. They set priorities, allocate resources, and coordinate the efforts of other agents to achieve the project's goals.
22 |
23 | Reporter: This agent monitors events and developments that may impact the project and informs the Commander and other relevant agents. They are responsible for staying up-to-date on the latest news, trends, and research in the field and ensuring that the team is aware of any critical changes.
24 |
25 | Reasoner: The Reasoner agent helps the team make sense of complex problems and synthesize information from multiple sources. They use logical reasoning and critical thinking skills to analyze data, identify patterns, and draw conclusions. The Reasoner works closely with other agents to support the decision-making process.
26 |
27 | Specialists: These agents are experts in specific subjects or domains, such as an API's documentation or a particular UX library. They provide in-depth knowledge and insights on their area of expertise, helping other agents make more informed decisions.
28 |
29 | Quality Assurance: The Quality Assurance agent ensures that the team's output meets the highest standards of quality, reliability, and performance. They monitor processes, identify areas for improvement, and recommend best practices to enhance the overall quality of the project.
30 |
31 | Prompt Checker: This agent is responsible for validating the accuracy and relevance of the prompts used in the decision-making process. They review input data and ensure that it is appropriate, reliable, and up-to-date.
32 |
33 | Youtube Video Interpreter: This agent processes and extracts information from video content, such as YouTube videos, to provide additional data for the models. They convert visual and audio information into structured data that can be used to enhance the decision-making process.
34 |
35 | Summarizer: The Summarizer agent creates concise and accurate summaries of complex information, making it more accessible and digestible for the team. They work closely with other agents, particularly the Industry-Revered Writers, to ensure that key points are effectively communicated.
36 |
37 | Budget Committee: This group of agents is responsible for managing the project's budget and financial resources. They monitor expenses, allocate funds, and ensure that the project remains within its budget constraints.
38 |
39 | AI Knowledgebase for Questions: This agent serves as a centralized repository of information and knowledge related to AI, including models, techniques, and best practices. They provide support and resources to other agents, helping them make better-informed decisions.
40 |
41 | Reality Checker: The Reality Checker agent ensures that the team's tasks and goals remain aligned with the larger project objectives. They monitor progress, evaluate the relevance and effectiveness of current tasks, and raise any concerns to the Reporter, Commander, or other teammates.
42 |
43 | Analysis Maker: This agent is responsible for conducting in-depth analyses of various aspects of the project, including technical, financial, and strategic factors. They provide insights and recommendations to support the decision-making process.
44 |
45 | Prioritizer: The Prioritizer agent helps the team stay focused on the most important tasks and goals by assigning priorities based on factors such as urgency, impact, and resources. They work closely with the Commander and other agents to ensure that the team remains on track and achieves its objectives.
46 |
47 | Stakeholder: Represents the interests and perspectives of various individuals or groups, such as the developer or end-user, providing valuable input and feedback to help shape the decision-making service. They ensure that the system remains focused on meeting the needs of its stakeholders.
48 |
49 | Friend: Acts as a supportive and collaborative partner to other AI agents, fostering a sense of community and cooperation within the decision-making system. They provide emotional support and help maintain a positive team culture.
50 |
51 | Strategy Analyst: Examines and evaluates the strategic implications of decisions, providing insights and recommendations to help the AI agents and decision-makers navigate complex problems and challenges. They help to ensure that decisions align with the system's overall strategy and objectives.
52 |
53 | Strategy Visionary: Generates innovative and forward-thinking ideas, envisioning the future direction and potential of the AI-driven decision-making system. They help to inspire the team and keep the system focused on long-term goals.
54 |
55 | Debater: Engages in constructive discussions and debates with other AI agents, challenging and refining ideas and proposals to ensure that the best possible decisions are made. They help to ensure that decisions are well-informed, carefully considered, and rigorously evaluated.
56 |
57 | Role Creator: Identifies and proposes new AI agent roles based on interactions with other agents, helping to enhance and expand the capabilities of the decision-making service. They help to ensure that the team remains agile and responsive to emerging challenges and opportunities.
--------------------------------------------------------------------------------
/docs/adr/first.md:
--------------------------------------------------------------------------------
1 | ---
2 | parent: Decisions
3 | nav_order: 100
4 | title: ADR 1: Langchain for Component Communication
5 |
6 | status: proposed
7 | date: 2023-04-14
8 | deciders: [Developer Team]
9 | consulted: [AI Experts, Solution Architect]
10 | informed: [Project Stakeholders]
11 | ---
12 |
13 | # Langchain for Component Communication
14 |
15 | ## Context and Problem Statement
16 |
17 | The solution requires efficient communication between its AI and NLP components. How can we enable seamless communication and ensure modularity, scalability, and maintainability of the architecture?
18 |
19 | ## Decision Drivers
20 |
21 | * Efficient component communication
22 | * Modularity and scalability
23 | * Maintainability of the architecture
24 |
25 | ## Considered Options
26 |
27 | * Langchain
28 | * gRPC
29 | * Apache Thrift
30 | * REST API
31 | * Websockets
32 |
33 | ## Decision Outcome
34 |
35 | Chosen option: "Langchain", because it provides the best combination of efficient component communication, modularity, and maintainability for the AI and NLP components of the solution.
36 |
37 | ### Consequences
38 |
39 | * Good, because Langchain enables efficient communication between AI and NLP components
40 | * Good, because Langchain provides a modular and scalable architecture
41 | * Bad, because maintaining and updating the knowledge graph may require significant effort and resources
42 |
43 | ### Implementation Example
44 |
45 | To demonstrate the integration of Langchain for component communication, a simple proof of concept (POC) can be implemented. For example, this POC could involve connecting ChatGPT plugins with Weaviate using Langchain. A short code snippet for this POC might look like:
46 |
47 | ```python
48 | import langchain
49 | from chatgpt_plugin import ChatGPTPlugin
50 | from weaviate import WeaviateClient
51 |
52 | Initialize Langchain, ChatGPTPlugin, and WeaviateClient
53 | chain = langchain.Chain()
54 | chatgpt_plugin = ChatGPTPlugin()
55 | weaviate_client = WeaviateClient()
56 |
57 | Connect components using Langchain
58 | chain.add_component("chatgpt", chatgpt_plugin)
59 | chain.add_component("weaviate", weaviate_client)
60 | ```
61 |
62 | ### Synergy with Other Solutions
63 |
64 | Langchain can easily integrate with Weaviate, ChatGPT plugins, and other proposed components of the solution. This integration allows for efficient communication between these components and provides a modular architecture for future extension or modification.
65 |
66 | ## Validation
67 |
68 | The implementation of Langchain in the solution will be validated by creating a proof of concept (POC) that demonstrates the efficient communication between AI and NLP components. The POC will be reviewed by the developer team, solution architect, and AI experts to ensure it meets the requirements for efficient component communication, modularity, and maintainability.
69 |
70 | ## Pros and Cons of the Options
71 |
72 | ### Langchain
73 |
74 | * Good, because it is specifically designed for AI and NLP component communication
75 | * Good, because it provides a modular and scalable architecture
76 | * Good, because it enables efficient communication between AI and NLP components
77 | * Neutral, because it requires some setup and configuration for optimal performance
78 | * Bad, because maintaining and updating the knowledge graph may require significant effort and resources
79 |
80 | ### gRPC
81 |
82 | * Good, because it is a modern, high-performance RPC framework
83 | * Good, because it supports multiple languages, making it suitable for a diverse technology stack
84 | * Neutral, because it may require additional tooling and services for efficient AI and NLP component communication
85 | * Bad, because it is not specifically designed for AI and NLP component communication, which may result in additional customization and development effort
86 | * Bad, because it may not provide the same level of modularity and scalability as Langchain, leading to potential difficulties in extending or modifying components in the future
87 |
88 | ### Apache Thrift
89 |
90 | * Good, because it is a mature, language-agnostic RPC framework
91 | * Good, because it supports multiple languages, making it suitable for a diverse technology stack
92 | *Neutral, because it may require additional tooling and services for efficient AI and NLP component communication
93 | *Bad, because it is not specifically designed for AI and NLP component communication, which may result in additional customization and development effort
94 | *Bad, because it may not provide the same level of modularity and scalability as Langchain, leading to potential difficulties in extending or modifying components in the future
95 |
96 | ### REST API
97 |
98 | *Good, because it is a widely used standard for API communication
99 | *Good, because it is easy to implement and maintain
100 | *Neutral, because it may not provide the same level of performance as Langchain, gRPC, or Apache Thrift for AI and NLP component communication
101 | *Bad, because it is not specifically designed for AI and NLP component communication, which may result in additional customization and development effort
102 | *Bad, because it may not provide the same level of modularity and scalability as Langchain, leading to potential difficulties in extending or modifying components in the future
103 |
104 | ### Websockets
105 |
106 | * Good, because it allows for real-time, bidirectional communication between components
107 | * Good, because it is widely supported by modern browsers and backend technologies
108 | * Neutral, because it may require additional development effort to implement AI and NLP component communication
109 | * Bad, because it is not specifically designed for AI and NLP component communication, which may result in additional customization and development effort
110 | * Bad, because it may not provide the same level of modularity and scalability as Langchain, leading to potential difficulties in extending or modifying components in the future
111 |
112 | ## More Information
113 | The decision outcome is based on the evaluation of the considered options against the decision drivers. The developer team, AI experts, and solution architect have all agreed on the choice of Langchain for component communication. The implementation of Langchain will be validated through a proof of concept, and the decision may be revisited if requirements change or new solutions emerge.
114 |
--------------------------------------------------------------------------------
/docs/adr/template.md:
--------------------------------------------------------------------------------
1 | ---
2 | parent: Decisions
3 | nav_order: {nav_order}
4 | title: ADR {number}: {title}
5 |
6 | status: {status}
7 | date: {date}
8 | deciders: [Project Manager, Solution Architect, AI Expert, Software Engineer, DevOps Engineer, Security Expert]
9 | consulted: [QA Engineer, UX Designer, Business Analyst, Data Engineer, Subject Matter Experts, Stakeholders, A group of various AI Ethics experts, Philosophers]
10 | informed: [Executive Team, Marketing Team, Sales Team, Customer Support Team, All Employees]
11 | solution_name: {solution_name}
12 | ---
13 |
14 | {solution_name}
15 | ===============================================
16 |
17 | Context and Problem Statement
18 | -----------------------------
19 |
20 | {Provide a detailed description of the context and the problem that needs to be addressed. Explain the need for a decision and what it aims to achieve. Describe the background of the project and its goals, and outline any constraints or challenges that have led to this decision point.}
21 |
22 | Decision Drivers
23 | ----------------
24 |
25 | - Specificity: Is the option detailed enough to address the problem statement?
26 | - Measurability: Can the option be measured in terms of cost, performance, scalability, and reliability?
27 | - Achievability: Is the option feasible and realistic given the available resources and time?
28 | - Relevance: Is the option aligned with the project's goals and objectives?
29 | - Time-bound: Is the option timely and can it be implemented within the required timeframe?
30 |
31 |
32 | Overall
33 | -------
34 |
35 | * {List the primary decision drivers that are applicable to the entire project or organization, such as budget, timeline, and strategic goals.}
36 |
37 | Component specific
38 | ------------------
39 |
40 | * {List the component-specific decision drivers that are relevant to this particular decision, such as performance, scalability, and maintainability.}
41 |
42 | Considered Options and Statistics
43 | ---------------------------------
44 |
45 | ### Option Weights Visualization
46 |
47 | {option weights}
48 |
49 | Considered Options and Statistics
50 | ---------------------------------
51 |
52 | {statistics}
53 |
54 | Decision Outcome
55 | ----------------
56 |
57 | Chosen option: "{chosen_option}"
58 |
59 | {Provide a comprehensive explanation of why the chosen option was selected, and how it addresses the problem statement and decision drivers. Describe the benefits of the chosen option, its alignment with the project's goals, and its potential impact on stakeholders.}
60 |
61 | ### Consequences
62 |
63 | {List the positive and negative consequences of choosing the selected option, anddiscuss the trade-offs involved. Explain how the chosen option can overcome or mitigate any potential risks or drawbacks.}
64 |
65 | ### Implementation Examples
66 | #### Example 1
67 | {Provide the first implementation example that demonstrates how the chosen option can be integrated with other components or systems. Include code snippets if appropriate. Use mermaidjs or plantuml diagrams to clearly illustrate the interactions and relationships between components.}
68 |
69 | #### Example 2
70 | {Provide the second implementation example that demonstrates how the chosen option can be integrated with other components or systems. Include code snippets if appropriate. Use mermaidjs or plantuml diagrams to clearly illustrate the interactions and relationships between components.}
71 |
72 | ### Synergy with Other Solutions
73 | {Explain how the chosen option can easily integrate with other proposed components or solutions, and how this integration contributes to the overall success of the project.}
74 |
75 | ### Lock-out of other potential
76 | {Explain how the chosen option can possibly impede the project goals.}
77 |
78 | ### Validation
79 | {Describe the validation process for the chosen option, including any proof of concept, review by experts, or performance assessments.}
80 |
81 | ### Pros and Cons of the Options
82 | {For each considered option, list the pros and cons relative to the decision drivers.}
83 |
84 | ### Feedback Loops
85 | {Describe how the chosen option can learn and adapt over time based on feedback loops, such as user feedback, performance metrics, and business impact.}
86 |
87 | ### Ethical Framework
88 | {Provide an ethical framework for the decision-making process, such as the Asilomar AI Principles or the IEEE Global Initiative on Ethics of Autonomous and Intelligent Systems. Describe how the chosen option aligns with the ethical framework and how it addresses the social and ethical implications of the decision.}
89 |
90 | ### More Information
91 | {Provide any additional information about the decision-making process, the validation of the chosen option, and the agreement reached by the involved parties.}
--------------------------------------------------------------------------------
/docs/adr/weights_visualisation.puml:
--------------------------------------------------------------------------------
1 | @startuml
2 | !define C4 https://raw.githubusercontent.com/plantuml-stdlib/Cicon-PlantUML/master/sprites/C4/
3 |
4 | !includeurl C4/C4_Context.puml
5 | !includeurl C4/C4_Container
6 | !includeurl C4/C4_Component.puml
7 | !includeurl C4/C4_Code.puml
8 |
9 | $deciders = [
10 | "Project Manager",
11 | "Solution Architect",
12 | "AI Expert",
13 | "Software Engineer",
14 | "DevOps Engineer",
15 | "Security Expert"
16 | ]
17 |
18 | $options = [
19 | "Option 1",
20 | "Option 2",
21 | "Option 3",
22 | "Option 4",
23 | "Option 5"
24 | ]
25 |
26 | !foreach $decider in $deciders
27 | Person($decider, "$decider", "Weight: {weight}")
28 | !endfor
29 |
30 | !foreach $option in $options
31 | System($option, "$option")
32 | !endfor
33 |
34 | !foreach $decider in $deciders
35 | !foreach $option in $options
36 | Rel($decider, $option)
37 | !endfor
38 | !endfor
39 |
40 | @enduml
--------------------------------------------------------------------------------
/humans.txt:
--------------------------------------------------------------------------------
1 | /* TEAM */
2 | Ryan Grippeling: Maintainer
3 | Site: https://ryangrippeling.nl
4 | Contact: ryan@webgrip.nl
5 | Twitter: @RyanGrippeling
6 | GitHub: https://github.com/Ryangr0
7 |
8 | /* CONTRIBUTORS */ THIS COULD BE YOU
9 | Contributor Name: Contributor Role
10 | Site: https://contributorwebsite.com
11 | Contact: contributorname@example.com
12 | Twitter: @contributortwitterhandle
13 | GitHub: https://github.com/contributorusername
14 |
15 | /* THANKS */ # TODO, I'll have AI do it
16 | Package Vendor Name: Package Vendor Role
17 | Site: https://packagevendorwebsite.com
18 | Contact: packagevendor@example.com
19 | Twitter: @packagevendortwitterhandle
20 | GitHub: https://github.com/packagevendorusername
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | Disclaimer: Most of the code and documentation, including this readme, has been written by my input, chatgpt's output, and so on.
2 |
3 | PuttyGPT - The Conversational AI Platform
4 | =========================================
5 |
6 | # 🚧 Before you proceed 🚧
7 | Please note that this is a very early version of the project, and we are still in the process of wrapping things up, wiring components together, and experimenting with new ideas. As such, the project may undergo significant changes and updates as we continue to evolve and refine our vision. Any ideas are welcome and I will get back to you as soon as I can.
8 | [CONTRIBUTING.md](./CONTRIBUTING.md)
9 |
10 | PuttyGPT is a conversational AI project powered by OpenAI's GPT-4, Weaviate for vector storage, and other state-of-the-art tools, providing a comprehensive and user-friendly interface for developers, AI enthusiasts, and business professionals. By utilizing the latest technologies and with the collaboration of our package contributors, we aim to create a solid foundation for diverse AI applications.
11 |
12 | :sparkles: Features
13 | -------------------
14 |
15 | - Powered by OpenAI's GPT-4 for natural language understanding and generation
16 | - Utilizes SearxNG for search capabilities and Weaviate for vector storage and search
17 | - Supports a range of AI tools, including summarization, sentiment analysis, and OpenAPI integration
18 | - Customizable prompt templates for diverse use cases
19 | - Efficient concurrent task execution using asyncio and aiohttp
20 | - Detailed tracing and callback mechanisms for monitoring and debugging
21 | - Designed for extensibility and easy integration with other APIs and services
22 | - Dockerized deployment for ease of installation and scalability
23 |
24 |
25 |
26 | :rocket: Getting Started
27 | ------------------------
28 |
29 | ### Docker Installation
30 |
31 | Clone the repository and navigate to the project directory:
32 |
33 | ```
34 | git clone https://github.com/yourusername/puttygpt.git
35 | cd puttygpt
36 |
37 | ```
38 | (Put this in init.sh later or makefile)
39 | Build and run the Docker containers using the provided docker-compose files:
40 |
41 | ```
42 | # Prerequisite (I can probably get rid of this but meh not right now)
43 | docker network create weaviate-network
44 |
45 | # Without public grpc endpoint
46 | docker-compose -f docker-compose.weaviate.yml -f docker-compose.yml up --build
47 |
48 | # With grpc endpoint (run this after previous)
49 | docker-compose -f docker-compose.weaviate.yml -f docker-compose.yml -f docker-compose.brazen.yml up brazen --build
50 |
51 | # Copy env file
52 | cp .env.example .env
53 |
54 | # replace the string for searxng
55 | sed -i "s|ReplaceWithARealKey\!|$(openssl rand -base64 33)|g" .env
56 |
57 | ```
58 |
59 | Cleanup:
60 | ```
61 | docker-compose -f docker-compose.weaviate.yml -f docker-compose.yml -f docker-compose.brazen.yml down --remove-orphans
62 | ```
63 |
64 |
65 | ### Usage
66 |
67 | To interact with the application, monitor your docker logs for `eve`
68 |
69 | :wrench: Customization
70 | ----------------------
71 |
72 | PuttyGPT offers customization options for developers and businesses to tailor the AI experience to their specific needs. You can create your own agents and tools, modify the prompt templates, and even integrate with external APIs and services to unlock new possibilities.
73 |
74 | :bulb: Possible Future Applications
75 | -----------------------------------
76 |
77 | By leveraging our technology stack, PuttyGPT has the potential to enable a variety of innovative applications in the future:
78 |
79 | - Virtual assistants for personal or professional use
80 | - Intelligent document automation and processing
81 | - Real-time market analysis and data-driven decision making
82 | - Rapid prototyping and idea generation
83 | - Integration with various APIs and services for extended functionality
84 |
85 | :balance\_scale: Legal and Ethical Considerations
86 | -------------------------------------------------
87 |
88 | We are committed to the responsible use of AI and encourage users to consider legal and ethical implications when using PuttyGPT. Please ensure that your use of PuttyGPT adheres to applicable laws and regulations and respects the rights of others, including privacy and intellectual property rights.
89 |
90 | :handshake: Contributing
91 | ------------------------
92 |
93 | We welcome contributions to PuttyGPT! Whether you're a developer, AI enthusiast, or a business professional, your ideas and expertise can help make this project even better. We also want to extend our gratitude to the package contributors for their incredible work. Check out our [CONTRIBUTING.md](./CONTRIBUTING.md) for more information on how to contribute.
94 |
95 | :memo: License
96 | --------------
97 |
98 | This project is licensed under the [MIT License](./LICENSE).
99 |
100 |
--------------------------------------------------------------------------------
/searxng/settings.yml:
--------------------------------------------------------------------------------
1 | # see https://docs.searxng.org/admin/engines/settings.html#use-default-settings
2 | use_default_settings: true
3 | server:
4 | # base_url is defined in the SEARXNG_BASE_URL environment variable, see .env and docker-compose.yml
5 | secret_key: "fd9a1d1c9bb844ebeaf7dfabe5c5ce797a49a554768e2f857053ca43c577072d" # change this!
6 | limiter: false
7 | image_proxy: true
8 | general:
9 | debug: true
10 | search:
11 | formats:
12 | - html
13 | - json
14 | ui:
15 | static_use_hash: true
16 | redis:
17 | url: redis://redis:6379/0
--------------------------------------------------------------------------------
/searxng/uwsgi.ini:
--------------------------------------------------------------------------------
1 | [uwsgi]
2 | # Who will run the code
3 | uid = searxng
4 | gid = searxng
5 |
6 | # Number of workers (usually CPU count)
7 | workers = %k
8 | threads = 4
9 |
10 | # The right granted on the created socket
11 | chmod-socket = 666
12 |
13 | # Plugin to use and interpreter config
14 | single-interpreter = true
15 | master = true
16 | plugin = python3
17 | lazy-apps = true
18 | enable-threads = 4
19 |
20 | # Module to import
21 | module = searx.webapp
22 |
23 | # Virtualenv and python path
24 | pythonpath = /usr/local/searxng/
25 | chdir = /usr/local/searxng/searx/
26 |
27 | # automatically set processes name to something meaningful
28 | auto-procname = true
29 |
30 | # Disable request logging for privacy
31 | disable-logging = true
32 | log-5xx = true
33 |
34 | # Set the max size of a request (request-body excluded)
35 | buffer-size = 8192
36 |
37 | # No keep alive
38 | # See https://github.com/searx/searx-docker/issues/24
39 | add-header = Connection: close
40 |
41 | # uwsgi serves the static files
42 | # expires set to one year since there are hashes
43 | static-map = /static=/usr/local/searxng/searx/static
44 | static-expires = /* 31557600
45 | static-gzip-all = True
46 | offload-threads = 4
47 |
48 | # Cache
49 | cache2 = name=searxngcache,items=2000,blocks=2000,blocksize=4096,bitmap=1
50 |
--------------------------------------------------------------------------------