├── .github
└── workflows
│ ├── docs.yml
│ └── pytest.yml
├── .gitignore
├── .vscode
├── launch.json
└── settings.json
├── LICENSE
├── README.md
├── build.py
├── docs
├── Makefile
├── make.bat
├── principles.md
├── ramblings
│ ├── # what happemns duiring losure.py
│ ├── 0.1.0
│ │ ├── a new provider_api.md
│ │ ├── autostreamprevention.py
│ │ ├── cem.py
│ │ ├── chat.md
│ │ ├── context_versioning.py
│ │ ├── cpbo.py
│ │ ├── metaprompting.md
│ │ ├── metapromptingtorch.py
│ │ ├── mypytest.py
│ │ └── test.py
│ ├── announcement
│ ├── client.md
│ ├── docstodos.txt
│ ├── dspy.md
│ ├── eval_db_schema.md
│ ├── evalspec.md
│ ├── gvar.ipynb
│ ├── humanfeedback.py
│ ├── misc.md
│ ├── notes_on_adapters.py
│ ├── notes_on_dspy2.py
│ ├── notes_on_tracing.md
│ ├── parsing_example.py
│ ├── resposne_formats.md
│ ├── serialization.md
│ ├── spec_notes.md
│ ├── thoughtsonevals.md
│ └── yield_ell.py
├── requirements.txt
└── src
│ ├── _static
│ ├── auto_commit.png
│ ├── compare.png
│ ├── compositionality.webp
│ ├── ell-wide-dark.png
│ ├── ell-wide-light.png
│ ├── ell-wide.png
│ ├── ell_studio.webp
│ ├── ell_studio_better.webp
│ ├── favicon.ico
│ ├── gif1.webp
│ ├── invocations.webp
│ ├── logo.png
│ ├── multimodal_compressed.webp
│ ├── og.png
│ ├── og2.png
│ ├── useitanywhere.webp
│ ├── useitanywhere_compressed.webp
│ ├── versions.webp
│ └── versions_small.webp
│ ├── _templates
│ ├── header.html
│ └── layout.html
│ ├── audio_transcript.txt
│ ├── conf.py
│ ├── core_concepts
│ ├── configuration.rst
│ ├── decorators.rst.old
│ ├── ell_complex.rst
│ ├── ell_simple.rst
│ ├── ell_studio.rst
│ ├── eval_usage.rst.partial
│ ├── evaluations.rst
│ ├── evaluations.rst.sample
│ ├── language_model_programs.rst.old
│ ├── message_api.rst
│ ├── messages_and_content_blocks.rst.old
│ ├── models_and_api_clients.rst
│ ├── multimodality.rst
│ ├── structured_outputs.rst
│ ├── tool_usage.rst
│ └── versioning_and_storage.rst
│ ├── getting_started.rst
│ ├── index.rst
│ ├── installation.rst
│ ├── logo.png
│ ├── og.png
│ ├── reference
│ └── index.rst
│ └── user_guide
│ └── designing_effective_lmps.rst.unused
├── ell-studio
├── .gitignore
├── README.md
├── jsconfig.json
├── package-lock.json
├── package.json
├── public
│ ├── favicon.ico
│ ├── gif.gif
│ ├── index.html
│ ├── logo192.png
│ ├── logo512.png
│ ├── manifest.json
│ └── robots.txt
├── src
│ ├── App.js
│ ├── App.test.js
│ ├── components
│ │ ├── Badge.js
│ │ ├── DependencyGraphPane.js
│ │ ├── Header.js
│ │ ├── HierarchicalTable.js
│ │ ├── HierarchicalTableContext.js
│ │ ├── IORenderer.js
│ │ ├── LMPDetailsSidePanel.js
│ │ ├── LMPHistoryChart.js
│ │ ├── OldCard.js
│ │ ├── ResizableSidebar.js
│ │ ├── Sidebar.js
│ │ ├── VersionBadge.js
│ │ ├── VersionHistoryPane.js
│ │ ├── common
│ │ │ ├── Badge.js
│ │ │ ├── Button.js
│ │ │ ├── Card.js
│ │ │ ├── Checkbox.js
│ │ │ ├── Resizable.js
│ │ │ ├── ScrollArea.js
│ │ │ ├── SidePanel.js
│ │ │ ├── Spinner.js
│ │ │ ├── StatItem.js
│ │ │ ├── ToggleSwitch.js
│ │ │ └── Tooltips.js
│ │ ├── depgraph
│ │ │ ├── DependencyGraph.css
│ │ │ ├── DependencyGraph.js
│ │ │ ├── LMPCardTitle.js
│ │ │ ├── collide.js
│ │ │ ├── graphUtils.js
│ │ │ └── layoutUtils.js
│ │ ├── evaluations
│ │ │ ├── EvaluationCard.js
│ │ │ ├── EvaluationCardTitle.js
│ │ │ ├── EvaluationDataset.js
│ │ │ ├── EvaluationDetailsSidebar.js
│ │ │ ├── EvaluationOverview.js
│ │ │ ├── EvaluationsAnalyticsSidePanel.js
│ │ │ ├── EvaluationsIcon.js
│ │ │ ├── LabelDisplay.js
│ │ │ ├── MetricDisplay.js
│ │ │ ├── MetricGraphGrid.js
│ │ │ ├── MetricTable.js
│ │ │ ├── RunSummary.js
│ │ │ ├── reference.json
│ │ │ └── runs
│ │ │ │ ├── EvaluationRunDetailsSidebar.js
│ │ │ │ ├── EvaluationRunMetrics.js
│ │ │ │ ├── EvaluationRunOverview.js
│ │ │ │ ├── EvaluationRunResultsTable.js
│ │ │ │ ├── EvaluationRunsTable.js
│ │ │ │ └── SearchAndFiltersBar.js
│ │ ├── graphing
│ │ │ ├── ErrorBarPlugin.js
│ │ │ ├── Graph.js
│ │ │ ├── GraphSystem.js
│ │ │ ├── SharedVerticalIndicator.js
│ │ │ └── TrendLine.js
│ │ ├── invocations
│ │ │ ├── ContentsRenderer.js
│ │ │ ├── InvocationInfoPane.js
│ │ │ ├── InvocationsAnalyticsSidePanel.js
│ │ │ ├── InvocationsLayout.js
│ │ │ ├── InvocationsTable.js
│ │ │ └── details
│ │ │ │ ├── InvocationDataPane.js
│ │ │ │ ├── InvocationDetailsPopover.js
│ │ │ │ └── TraceDetailsPane.js
│ │ ├── layouts
│ │ │ └── GenericPageLayout.js
│ │ ├── oldgraph
│ │ │ └── OldMetricChart.js
│ │ └── source
│ │ │ ├── CodeHighlighter.js
│ │ │ ├── CodeSection.js
│ │ │ ├── DiffRenderer.js
│ │ │ ├── LMPSourceView.js
│ │ │ ├── StandardRenderer.js
│ │ │ └── codeCleanerUtils.js
│ ├── contexts
│ │ └── ThemeContext.js
│ ├── hooks
│ │ └── useBackend.js
│ ├── index.css
│ ├── index.js
│ ├── library
│ │ └── utils.js
│ ├── logo.svg
│ ├── pages
│ │ ├── Evaluation.js
│ │ ├── EvaluationRun.js
│ │ ├── Evaluations.js
│ │ ├── Home.js
│ │ ├── Invocations.js
│ │ └── LMP.js
│ ├── reportWebVitals.js
│ ├── setupTests.js
│ ├── styles
│ │ ├── SourceCodeView.css
│ │ ├── globals.css
│ │ └── sourceCode.css
│ └── utils
│ │ ├── lmpUtils.js
│ │ └── lstrCleanStringify.js
└── tailwind.config.js
├── examples
├── .gitignore
├── bv.py
├── chord_progression_writer.py
├── claude.py
├── client_example.py
├── diamond_depencies.py
├── evals
│ ├── classification.py
│ ├── poems.py
│ ├── psolve.py
│ ├── summaries.py
│ └── vibes.py
├── exa
│ ├── README.md
│ └── exa.py
├── future
│ ├── catmeme.jpg
│ ├── json_mode.py
│ ├── limbo.py
│ ├── meme_maker.py
│ ├── multimodal_tool_use.py
│ ├── o1_graph.py
│ ├── openaigym.py
│ ├── parallel_tool_calls.py
│ ├── realtimewebcam.py
│ ├── structured.py
│ ├── tool_using_chatbot.py
│ ├── use_tool_once.py
│ ├── weather_example.py
│ └── webcam_activity_describer.py
├── git_issue.py
├── hello_postgres.py
├── hello_world.py
├── joke.py
├── llm_lottery.py
├── multilmp.py
├── notebook.ipynb
├── o1.py
├── openai_audio.py
├── openai_prompt_caching.py
├── providers
│ ├── anthropic_ex.py
│ ├── azure_ex.py
│ ├── gemini_ex.py
│ ├── groq_ex.py
│ ├── instructor_ex.py
│ ├── ollama_ex.py
│ ├── openai_ex.py
│ ├── openrouter_ex.py
│ ├── vllm_ex.py
│ └── xai_ex.py
├── quick_chat.py
├── rag
│ ├── rag.py
│ └── wikipedia_mini_rag.py
└── server_example.py
├── logo.png
├── poetry.lock
├── pyproject.toml
├── src
└── ell
│ ├── __init__.py
│ ├── __version__.py
│ ├── configurator.py
│ ├── contrib
│ └── __init__.py
│ ├── evaluation
│ ├── __init__.py
│ ├── evaluation.py
│ ├── results.py
│ ├── serialization.py
│ └── util.py
│ ├── lmp
│ ├── __init__.py
│ ├── _track.py
│ ├── complex.py
│ ├── function.py
│ ├── simple.py
│ └── tool.py
│ ├── models
│ ├── __init__.py
│ ├── anthropic.py
│ ├── bedrock.py
│ ├── google.py
│ ├── groq.py
│ ├── ollama.py
│ ├── openai.py
│ └── xai.py
│ ├── provider.py
│ ├── providers
│ ├── __init__.py
│ ├── anthropic.py
│ ├── bedrock.py
│ ├── google.py
│ ├── groq.py
│ └── openai.py
│ ├── py.typed
│ ├── stores
│ ├── __init__.py
│ ├── migrations
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── env.py
│ │ ├── make.py
│ │ ├── script.py.mako
│ │ └── versions
│ │ │ ├── 4524fb60d23e_initial_ell_sql_schema.py
│ │ │ └── f6528d04bbbd_evaluations.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── core.py
│ │ └── evaluations.py
│ ├── sql.py
│ └── store.py
│ ├── studio
│ ├── __init__.py
│ ├── __main__.py
│ ├── config.py
│ ├── connection_manager.py
│ ├── datamodels.py
│ └── server.py
│ ├── types
│ ├── __init__.py
│ ├── _lstr.py
│ ├── lmp.py
│ └── message.py
│ └── util
│ ├── WARNING
│ ├── __init__.py
│ ├── _warnings.py
│ ├── char_bitmaps.npy
│ ├── closure.py
│ ├── closure_util.py
│ ├── differ.py
│ ├── plot_ascii.py
│ ├── serialization.py
│ ├── should_import.py
│ ├── tqdm.py
│ └── verbosity.py
├── tailwind.config.js
├── tests
├── .exampleignore
├── __init__.py
├── conftest.py
├── run_all_examples.py
├── test_autocommit_model.py
├── test_closure.py
├── test_evaluation.py
├── test_lmp_to_prompt.py
├── test_lstr.py
├── test_message_type.py
├── test_migrations.py
├── test_openai_provider.py
├── test_results.py
├── test_should_import.py
├── test_sql_store.py
└── test_tools.py
└── x
├── README.md
└── openai_realtime
├── .gitignore
├── LICENSE
├── README.md
├── examples
├── audio_example.py
├── chat_assistant_clone.py
├── discord_gpt4o.py
├── run_bot.py
└── run_bot.sh
├── poetry.lock
├── pyproject.toml
├── src
└── openai_realtime
│ ├── __init__.py
│ ├── api.py
│ ├── client.py
│ ├── conversation.py
│ ├── event_handler.py
│ └── utils.py
└── tests
├── samples
└── toronto.mp3
├── test_audio.py
└── test_mock.py
/.github/workflows/docs.yml:
--------------------------------------------------------------------------------
1 | name: Build and Deploy Sphinx Docs
2 |
3 | on:
4 | push:
5 | branches:
6 | - main # Trigger the workflow only on pushes to the main branch
7 |
8 | # Add this permissions block
9 | permissions:
10 | contents: write
11 |
12 | jobs:
13 | build:
14 | runs-on: ubuntu-latest
15 |
16 | steps:
17 | - name: Checkout the repository
18 | uses: actions/checkout@v3
19 |
20 | - name: Set up Python
21 | uses: actions/setup-python@v4
22 | with:
23 | python-version: '3.12.4' # Specify the Python version
24 |
25 | - name: Install Poetry
26 | uses: snok/install-poetry@v1
27 | with:
28 | version: 1.5.1
29 |
30 | - name: Install project dependencies
31 | run: poetry install -E all
32 |
33 | - name: Install doc-specific dependencies
34 | run: |
35 | cd docs
36 | poetry run pip install -r requirements.txt
37 |
38 | - name: Build the documentation
39 | run: |
40 | cd docs
41 | rm -rf _build # Remove existing build directory
42 | poetry run make html # Use poetry run to ensure the correct environment is used
43 | poetry run python -c "import ell; version = ell.__version__; print(version); open('_build/html/_static/ell_version.txt', 'w').write(version)"
44 |
45 | # env:
46 | # SPHINXOPTS: "-W" # Treat warnings as errors
47 |
48 | - name: Deploy to GitHub Pages
49 | uses: peaceiris/actions-gh-pages@v4
50 | with:
51 | github_token: ${{ secrets.GITHUB_TOKEN }}
52 | publish_dir: ./docs/_build/html # Path to the built documentation
53 | cname: docs.ell.so
--------------------------------------------------------------------------------
/.github/workflows/pytest.yml:
--------------------------------------------------------------------------------
1 | name: Run pytest
2 |
3 | on:
4 | push:
5 | branches: [ main ]
6 | pull_request:
7 | branches: [ main ]
8 |
9 | jobs:
10 | test:
11 | runs-on: ubuntu-latest
12 | strategy:
13 | matrix:
14 | python-version: [3.9, "3.10", "3.11"]
15 |
16 | steps:
17 | - uses: actions/checkout@v3
18 |
19 | - name: Set up Python ${{ matrix.python-version }}
20 | uses: actions/setup-python@v4
21 | with:
22 | python-version: ${{ matrix.python-version }}
23 |
24 | - name: Install Poetry
25 | uses: snok/install-poetry@v1
26 | with:
27 | version: 1.5.1
28 |
29 | - name: Install dependencies
30 | run: |
31 | poetry install -E all
32 |
33 | - name: Run pytest
34 | run: |
35 | poetry run pytest tests/
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | {
2 | "configurations": [
3 | {
4 | "name": "studio:server:dev",
5 | "type": "python",
6 | "request": "launch",
7 | "module": "ell.studio",
8 | "args": [
9 | "--storage-dir",
10 | "${workspaceFolder}/./logdir",
11 | "--dev"
12 | ],
13 | "justMyCode": false
14 | },
15 | {
16 | "type": "node",
17 | "request": "launch",
18 | "name": "studio:client:dev",
19 | "runtimeExecutable": "npm",
20 | "runtimeArgs": [
21 | "run",
22 | "start:dev"
23 | ],
24 | "cwd": "${workspaceFolder}/ell-studio",
25 | "console": "integratedTerminal"
26 | }
27 | ]
28 | }
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "eslint.workingDirectories": [
3 | "./ell-studio"
4 | ],
5 | "typescript.tsdk": "ell-studio/node_modules/typescript/lib",
6 | "javascript.preferences.importModuleSpecifier": "relative",
7 | "typescript.preferences.importModuleSpecifier": "relative",
8 | "search.exclude": {
9 | "**/node_modules": true,
10 | "**/dist": true,
11 | "**/build": true
12 | },
13 | "files.exclude": {
14 | "**/.git": true,
15 | "**/.DS_Store": true,
16 | "**/node_modules": true,
17 | "**/dist": true,
18 | "**/build": true
19 | },
20 | "typescript.validate.enable": true,
21 | "javascript.validate.enable": true,
22 | "python.testing.pytestArgs": [
23 | "tests"
24 | ],
25 | "python.testing.unittestEnabled": false,
26 | "python.testing.pytestEnabled": true
27 | }
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 William Guss
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/build.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | import shutil
4 | import toml
5 | import sys
6 |
7 | def run_command(command, cwd=None):
8 | result = subprocess.run(command, shell=True, cwd=cwd, check=True)
9 | return result
10 |
11 | def npm_install():
12 | print("Running npm install")
13 | run_command("npm install", cwd="ell-studio")
14 |
15 |
16 | def npm_build():
17 | print("Running npm build")
18 | run_command("npm run build", cwd="ell-studio")
19 | print("Copying static files")
20 | source_dir = os.path.join("ell-studio", "build")
21 | target_dir = os.path.join("src", "ell", "studio", "static")
22 | shutil.rmtree(target_dir, ignore_errors=True)
23 | shutil.copytree(source_dir, target_dir)
24 | print(f"Copied static files from {source_dir} to {target_dir}")
25 |
26 |
27 | def get_ell_version():
28 | pyproject_path = "pyproject.toml"
29 | pyproject_data = toml.load(pyproject_path)
30 | return pyproject_data["tool"]["poetry"]["version"]
31 |
32 |
33 | def run_pytest():
34 | print("Running pytest")
35 | try:
36 | run_command("pytest", cwd="tests")
37 | except subprocess.CalledProcessError:
38 | print("Pytest failed. Aborting build.")
39 | sys.exit(1)
40 |
41 |
42 | def run_all_examples():
43 | print("Running all examples")
44 | try:
45 | run_command("python run_all_examples.py -w 16", cwd="tests")
46 | except subprocess.CalledProcessError:
47 | print("Some examples failed. Please review the output above.")
48 | user_input = input("Do you want to continue with the build? (y/n): ").lower()
49 | if user_input != 'y':
50 | print("Aborting build.")
51 | sys.exit(1)
52 |
53 |
54 | def main():
55 | ell_version = get_ell_version()
56 | os.environ['REACT_APP_ELL_VERSION'] = ell_version
57 | npm_install()
58 | npm_build()
59 | run_pytest()
60 | run_all_examples()
61 | print("Build completed successfully.")
62 |
63 |
64 | if __name__ == "__main__":
65 | main()
66 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = src
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=src
11 | set BUILDDIR=_build
12 |
13 | %SPHINXBUILD% >NUL 2>NUL
14 | if errorlevel 9009 (
15 | echo.
16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
17 | echo.installed, then set the SPHINXBUILD environment variable to point
18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
19 | echo.may add the Sphinx directory to PATH.
20 | echo.
21 | echo.If you don't have Sphinx installed, grab it from
22 | echo.https://www.sphinx-doc.org/
23 | exit /b 1
24 | )
25 |
26 | if "%1" == "" goto help
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/principles.md:
--------------------------------------------------------------------------------
1 | # Principles for developing ell
2 |
3 | Some principles for developing ell that we pick up along the way.
4 |
5 | 1. went missing
6 | 2. went missing..
7 | 1. the user shouldn't wait to find out they're missing something:
8 | Consider caching
9 | ```
10 | import ell
11 |
12 | @ell.simple
13 | def fn(): return "prompt"
14 |
15 | with ell.cache(fn):
16 | fn()
17 | ```
18 | If I don't have a store installed, this shit will break when i get to the ell.cache.
19 |
20 | We prefer to have store enable caching; that is the cache contextmanager is only enabled if we have a store:
21 |
22 | ```
23 | import ell
24 |
25 | store = ell.stores.SQLiteStore("mystore")
26 | ell.use_store(store)
27 |
28 | @ell.simple
29 | def fn(): return "prompt"
30 |
31 | with ell.store.cache(lmp):
32 | fn()
33 | ```
34 |
35 | 2. no unreadable side-effects.
36 | ```
37 | store = ell.stores.SQLiteStore("mystore")
38 | ell.use_store(store)
39 | ```
40 | is preferred to:
41 | ```
42 | store = ell.stores.SQLiteStore("mystore")
43 | store.install()
44 | ```
45 | This is a side-effect.
46 |
47 |
48 | 4. api providers are the single source of truth for model information
49 | - we will never implement Model("gpt-4", Capabilities(vision=True))
50 | - always rely on the api to tell you if you're using something a model can't do
51 | - in that sense ell.simple should be the thinnest possible wrapper around the api
52 |
53 | 5. ell is a library not a framework
54 | - we are building pytorch not keras. nice agent frameworks etc can exist on top of ell, but are not a part of ell itself. ell is meant to give you all of the building blocks to build systems.
55 | - in the meta programming space, we will support standardized building blocks (optimizers, established prompt compilers, etc) but not too frameworky.
56 | (this is actually is a sticky point and drawing the line will always be hard, but initially this is good.)
57 |
58 | 6. less abstraction is better
59 | - more single files , less multi file abstractions
60 | - you should just be able to read the source & understand.
61 |
62 | 7. ell studio is not ell
63 | - ell studio is an exception in that we can bloat it as much as we need to make the dx beautiful.
--------------------------------------------------------------------------------
/docs/ramblings/# what happemns duiring losure.py:
--------------------------------------------------------------------------------
1 | # what happemns duiring losure
2 | CODE_INSTURCTIONS = """
3 |
4 | Other Instructoons:
5 | - You only respond in code with no commentary (except in the and docstrings.)
6 | - Do not respond in markdown just write code.
7 | - It is extremely important that you don't start you code with ```python. """
8 |
9 | class Test:
10 | def to_str(self):
11 | return f"a: {self.a}, b: {self.b}"
12 |
13 |
14 | test = # [Test object]
15 |
16 | import ell.caching
17 | @ell.simple("gpt-4-turbo", temperature=0.1, max_tokens=5)
18 | def write_a_complete_python_class(user_spec : str):
19 | return [ell.system(f"""You are an expert python programmer capable of interpreting a user's spec and writing a python class to accomidate their request. You should document all your code, and you best practices.
20 | {CODE_INSTURCTIONS} {test.to_str()}
21 | """), ell.user(user_spec)]
22 |
23 |
24 |
25 | # what happens during invocation
26 | write_a_complete_python_class("lol") ->
27 | # Inovation(
28 | args = (lol,),
29 | kwargs = {}
30 | globals = {
31 | # attempt to serialize this.
32 | test: str(test)
33 | }
34 | freevars = {}
35 | )
36 |
--------------------------------------------------------------------------------
/docs/ramblings/0.1.0/autostreamprevention.py:
--------------------------------------------------------------------------------
1 | import openai
2 | import os
3 |
4 | # Define the function to stream the response
5 | def stream_openai_response(prompt):
6 | try:
7 | # Make the API call
8 | response = openai.chat.completions.create(
9 | model="o1-mini", # Specify the model
10 | messages=[{"role": "user", "content": prompt}],
11 | stream=True # Enable streaming
12 | )
13 |
14 | # Stream the response
15 | for chunk in response:
16 | if chunk.choices[0].delta.get("content"):
17 | print(chunk.choices[0].delta.content, end="", flush=True)
18 |
19 | print() # Print a newline at the end
20 |
21 | except Exception as e:
22 | print(f"An error occurred: {e}")
23 |
24 | # Example usage
25 | prompt = "Tell me a short joke."
26 | stream_openai_response(prompt)
27 |
28 | # This shows that openai won't fake streaming, it will just fail on the request
--------------------------------------------------------------------------------
/docs/ramblings/0.1.0/context_versioning.py:
--------------------------------------------------------------------------------
1 |
2 | import inspect
3 | import ast
4 | from contextlib import contextmanager
5 |
6 | @contextmanager
7 | def context():
8 | # Get the current frame
9 | frame = inspect.currentframe()
10 | try:
11 | # Get the caller's frame
12 | caller_frame = frame.f_back.f_back
13 | # Get the filename and line number where the context manager is called
14 | filename = caller_frame.f_code.co_filename
15 | lineno = caller_frame.f_lineno
16 |
17 | # Read the source code from the file
18 | with open(filename, 'r') as f:
19 | source = f.read()
20 |
21 | # Parse the source code into an AST
22 | parsed = ast.parse(source, filename)
23 | # print(source)
24 | # Find the 'with' statement at the given line number
25 | class WithVisitor(ast.NodeVisitor):
26 | def __init__(self, target_lineno):
27 | self.target_lineno = target_lineno
28 | self.with_node = None
29 |
30 | def visit_With(self, node):
31 | if node.lineno <= self.target_lineno <= node.end_lineno:
32 | self.with_node = node
33 | self.generic_visit(node)
34 |
35 | visitor = WithVisitor(lineno)
36 | visitor.visit(parsed)
37 |
38 | # print(parsed, source)
39 | if visitor.with_node:
40 | # Extract the source code of the block inside 'with'
41 | start = visitor.with_node.body[0].lineno
42 | end = visitor.with_node.body[-1].end_lineno
43 | block_source = '\n'.join(source.splitlines()[start-1:end])
44 | print("Source code inside 'with' block:")
45 | print(block_source)
46 | else:
47 | print("Could not find the 'with' block.")
48 |
49 | # Yield control to the block inside 'with'
50 | yield
51 | finally:
52 | # Any cleanup can be done here
53 | pass
54 |
55 | from context_versioning import context
56 | # Example usage
57 | if __name__ == "__main__":
58 | with context():
59 | x = 10
60 | y = x * 2
61 | print(y)
62 |
63 |
--------------------------------------------------------------------------------
/docs/ramblings/0.1.0/metapromptingtorch.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | import torch as th
4 |
5 |
6 | weights = th.nn.Parameter(th.randn(10))
7 |
8 |
9 | def forward(x):
10 | return x * weights
11 |
12 |
13 | x = th.randn(10)
14 |
15 | print(forward(x))
16 | print(weights)
17 |
18 | # OOOH WAHT IF WE DID MANY TYPES OF LEARNABLES in
--------------------------------------------------------------------------------
/docs/ramblings/0.1.0/mypytest.py:
--------------------------------------------------------------------------------
1 | from typing import TypedDict
2 |
3 |
4 | class Test(TypedDict):
5 | name: str
6 | age: int
7 |
8 |
9 | def test(**t: Test):
10 | print(t)
11 |
12 | # no type hinting like ts thats unfortunate.
13 | test( )
14 |
--------------------------------------------------------------------------------
/docs/ramblings/0.1.0/test.py:
--------------------------------------------------------------------------------
1 |
2 | from typing import Callable
3 |
4 | # The follwoing works...
5 |
6 |
7 |
8 | def decorator(fn : Callable):
9 | def wrapper(*args, **kwargs):
10 | print("before")
11 | result = fn(*args, **kwargs)
12 | print("after")
13 | return result
14 | return wrapper
15 |
16 |
17 | class TestCallable:
18 | def __init__(self, fn : Callable):
19 | self.fn = fn
20 |
21 | def __call__(self, *args, **kwargs):
22 | return self.fn(*args, **kwargs)
23 |
24 | def convert_to_test_callable(fn : Callable):
25 | return TestCallable(fn)
26 |
27 | x = TestCallable(lambda : 1)
28 |
29 | @decorator
30 | @convert_to_test_callable
31 | def test():
32 | print("test")
33 |
34 | @decorator
35 | class MyCallable:
36 | def __init__(self, fn : Callable):
37 | self.fn = fn
38 |
39 | def __call__(self, *args, **kwargs):
40 | return self.fn(*args, **kwargs)
41 |
42 | # Oh so now ell.simples can actually be used as decorators on classes
43 |
--------------------------------------------------------------------------------
/docs/ramblings/announcement:
--------------------------------------------------------------------------------
1 | 🚀 I'm excited to announce the future of prompt engineering: 𝚎𝚕𝚕.
2 |
3 | developed from ideas during my time at OpenAI, 𝚎𝚕𝚕 is light, functional lm programming library:
4 |
5 | - automatic versioning & tracing
6 | - rich local oss visualization tools
7 | - multimodality native
8 |
9 | Read on ⬇️
10 |
11 | 𝚎𝚕𝚕 was built out of frustration for frameworks like @LangChainAI on three principles
12 |
13 | - prompts are programs not strings
14 | - prompts are parameters of machine learning models
15 | - every call to a language model is worth its weight in credits
16 |
17 | prompting should be readable, scientific, and optimizable
18 |
19 | prompt engineering is an optimization process
20 |
21 | because you write your prompts as normal python functions, 𝚎𝚕𝚕 automatically versions and serializes them via dynamic analysis of "lexical closures" - no custom IDE or editor required
22 |
23 | 𝚎𝚕𝚕.𝚒𝚗𝚒𝚝(𝚜𝚝𝚘𝚛𝚎='./𝚕𝚘𝚐𝚍𝚒𝚛')
24 |
25 | local tools for monitoring & visualization
26 |
27 | prompt engineering goes from a dark art to a science with the right tools. Ell Studio is a local, open source tool for prompt version control, monitoring, visualization.
28 |
29 | 𝚎𝚕𝚕-𝚜𝚝𝚞𝚍𝚒𝚘 --𝚜𝚝𝚘𝚛𝚊𝚐𝚎 ./𝚕𝚘𝚐𝚍𝚒𝚛
30 |
31 |
32 | Multimodality should be first class
33 |
34 | in anticipation of the upcoming gpt-4o + 🍓 api, 𝚎𝚕𝚕 is built with multimodality first.
35 |
36 | with a rich numpy style message api with multimodal type coercion, using images, video, and audio is intuitive
37 |
38 |
39 | 🎉 𝚎𝚕𝚕 is available on PyPI today w/
40 |
41 | 𝚙𝚒𝚙 𝚒𝚗𝚜𝚝𝚊𝚕𝚕 𝚎𝚕𝚕-𝚊𝚒
42 |
43 | check out the source https://github.com/MadcowD/ell
44 | and read the docs https://docs.ell.so/
45 |
46 | ⏰ new features soon, including SGD & RL on prompts and so much more!
47 |
48 |
49 | 🙏 huge shout out to everyone who's helped with this project
50 | @jakeottiger @a_dixon @shelwin_ zraig, frank hu, & my discord
51 | so many other good convos w @goodside @aidan_mclau and others
--------------------------------------------------------------------------------
/docs/ramblings/client.md:
--------------------------------------------------------------------------------
1 | # Model Client Resolution
2 |
3 | ## 1. If its not registered, warn the user that we're going to default to OpenAI.
4 | ## 2. If I attempt to define a registered model on a client, & the client specified or the default client doesn't have an API key, warn the user.
5 | ## 3. If I call the the lmp and the whatever client I resolve ot does not have an api key shit ourselves.
6 |
--------------------------------------------------------------------------------
/docs/ramblings/docstodos.txt:
--------------------------------------------------------------------------------
1 |
2 | docs todo:
3 |
4 | github icon
5 |
6 | core concepts:
7 |
8 |
9 | @ell.simple.rst
10 |
11 | - autodoc doc string
12 | - usage (can be doc string or )
13 |
14 | @ell.complex.rst
15 |
16 | - autodoc doc string
17 | - usage (can be doc string or )
18 |
19 |
20 | Tool Usage.rst
21 |
22 | Models & API Clients .rst
23 | Configuration.rst
24 |
25 |
26 | @ell.complex .rst
27 |
28 | Tool Usage .rst
29 |
30 |
31 |
32 |
33 | API Clients & Models .rst
34 |
35 | Configuration .rst
36 |
37 | Designing Effective Language Model Programs .rst
38 |
39 | Related Work .rst
40 |
41 | * why this is better than langchain
42 | * why this is better than langsmith
43 | * why this is better than weave
44 | * instructor
45 | * why not use the openai api directly?
46 | - u can definitely use numpy to do deep learning, but it's not advisable for large apps
47 |
48 |
49 |
50 |
51 | Whole tutorials seciton:
52 |
53 | RAG
54 | Vector DBs
55 | Agents
56 | Chatbots
57 |
--------------------------------------------------------------------------------
/docs/ramblings/dspy.md:
--------------------------------------------------------------------------------
1 | # How do we make a simpler dspy?
2 |
3 |
4 |
--------------------------------------------------------------------------------
/docs/ramblings/misc.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | We want this to log to the console when someone sets a logging flag.
4 |
5 | # Optimizer
6 | Prompts can be optimized using a variety of techniques (in particular we can optimize them against various bench marks using soft prompting or hard prompting.)
7 | ```python
8 | opt = ell.Optimizer(hyper_params)
9 | # This only accounts for simple one shot optimizations. What about minibatches and control about what the optimizer sees?
10 | # I suppose it really isn't that deep and we can abstract that away from the model context.
11 | optimized_hello = opt.optimize(hello_world, eval_fn, dataset)
12 |
13 | # Why should this be a state?
14 | serializer = ell.Serializer()
15 | ell.save(optimized_hello, "lol.ell")
16 | # Need to define a type of callable fn that results from a model optimzier so that people can easily implement their own optimizers. This will come later of course.
17 | ```
18 | ->
19 | Raw python code plus any model serialization thats on top of it.. with the original function hash etc. Can be re serialized in another context.
20 |
21 | # Serialization
22 | ```python
23 | """
24 | An example of how to utilize the serializer to save and load invocations from the model.
25 | """
26 |
27 | import ell
28 |
29 |
30 | @ell.simple(model="gpt-4-turbo", provider=None, temperature=0.1, max_tokens=5)
31 | def some_lmp(*args, **kwargs):
32 | """Just a normal doc stirng"""
33 | return [
34 | ell.system("Test system prompt from message fmt"),
35 | ell.user("Test user prompt 3"),
36 | ]
37 |
38 |
39 | # much cleaner.
40 | if __name__ == "__main__":
41 | serializer = ell.Serializer("location")
42 | serializer.install() # Any invocation hereafter will be saved.
43 |
44 | # Some open questions can we
45 |
46 | ```
47 |
48 | The above is an exmaple as to why we'd want to have instances of serializers. We think of it as storing al linvocaitons and models as a program is run or evolves. The problem is you need to install the serializer every time and that doens't feel so good?
49 |
50 | For example in version control we just save all outputs on a commit but you ahve to remember the serialization location etc instead of there being some global store. Which is optimal?
51 |
52 | Alternatively serialization happens by default in some global serialzier diretory? No I hate this.
53 |
54 | Whats the h n. We detect a .ell direcotry near the file? No thats unintuitive. This hsould behave like tensorboard
55 | - [] Look at tensorboard, pytorch, & wandb equivalent no need to reinvent.
56 |
57 | What if we instal two different serializers????
58 | People dont like spexifying file locations it is cumbersome.
59 |
60 |
--------------------------------------------------------------------------------
/docs/ramblings/notes_on_tracing.md:
--------------------------------------------------------------------------------
1 |
2 | lmp() <- result_lstrs[] ...compute... another_lmp(result_lstrs[]) <- new_result[]
3 |
4 | invocation -> lstrs <- another_invoke
5 | |
6 | |
7 | third invocation (trace(3rd invocation) = [invocation, another_invocation]
8 |
9 |
10 | orignators in ell will always be invocation ids.
11 |
12 | when i get a new lstr from calling an LMP, i will get the id of the invocaiton that produced it as the sole originator
13 |
14 |
15 | some_lmp() -> y:= lstr("content", originator=invocation_id of that call.)
16 |
17 | y += x
18 |
19 | some_lmp() -> y:= lstr("content", originator=(invocation_id), instantenous_meta_data={
20 | logits,
21 | completion id
22 | model id
23 | invocation_id,
24 | lmp_id.
25 | })
26 |
27 |
28 |
29 | y.invocation_id
30 |
31 |
32 | y += " 123"
33 | y = some_lmp(meta_data = True)
34 |
35 | # Should the user ever know about invocations or traces or originators?
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx==7.2.6
2 | pydata-sphinx-theme==0.15.2
3 | sphinx-autodoc-typehints==1.25.2
4 | myst-parser==2.0.0
5 | sphinx-copybutton==0.5.2
6 | sphinx-design==0.5.0
7 | sphinx-inline-tabs==2023.4.21
8 | sphinx-togglebutton==0.3.2
9 | sphinxcontrib-mermaid==0.9.2
10 | sphinxawesome_theme==5.2.0
11 | autodoc_pydantic==2.2.0
--------------------------------------------------------------------------------
/docs/src/_static/auto_commit.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/auto_commit.png
--------------------------------------------------------------------------------
/docs/src/_static/compare.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/compare.png
--------------------------------------------------------------------------------
/docs/src/_static/compositionality.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/compositionality.webp
--------------------------------------------------------------------------------
/docs/src/_static/ell-wide-dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/ell-wide-dark.png
--------------------------------------------------------------------------------
/docs/src/_static/ell-wide-light.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/ell-wide-light.png
--------------------------------------------------------------------------------
/docs/src/_static/ell-wide.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/ell-wide.png
--------------------------------------------------------------------------------
/docs/src/_static/ell_studio.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/ell_studio.webp
--------------------------------------------------------------------------------
/docs/src/_static/ell_studio_better.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/ell_studio_better.webp
--------------------------------------------------------------------------------
/docs/src/_static/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/favicon.ico
--------------------------------------------------------------------------------
/docs/src/_static/gif1.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/gif1.webp
--------------------------------------------------------------------------------
/docs/src/_static/invocations.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/invocations.webp
--------------------------------------------------------------------------------
/docs/src/_static/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/logo.png
--------------------------------------------------------------------------------
/docs/src/_static/multimodal_compressed.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/multimodal_compressed.webp
--------------------------------------------------------------------------------
/docs/src/_static/og.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/og.png
--------------------------------------------------------------------------------
/docs/src/_static/og2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/og2.png
--------------------------------------------------------------------------------
/docs/src/_static/useitanywhere.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/useitanywhere.webp
--------------------------------------------------------------------------------
/docs/src/_static/useitanywhere_compressed.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/useitanywhere_compressed.webp
--------------------------------------------------------------------------------
/docs/src/_static/versions.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/versions.webp
--------------------------------------------------------------------------------
/docs/src/_static/versions_small.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/_static/versions_small.webp
--------------------------------------------------------------------------------
/docs/src/core_concepts/configuration.rst:
--------------------------------------------------------------------------------
1 | =============
2 | Configuration
3 | =============
4 |
5 | ell provides various configuration options to customize its behavior.
6 |
7 | .. autofunction:: ell.init
8 | :no-index:
9 |
10 | This ``init`` function is a convenience function that sets up the configuration for ell. It is a thin wrapper around the ``Config`` class, which is a Pydantic model.
11 |
12 | You can modify the global configuration using the ``ell.config`` object which is an instance of ``Config``:
13 |
14 | .. autopydantic_model:: ell.Config
15 | :members:
16 | :exclude-members: default_client, registry, store, providers
17 | :model-show-json: false
18 | :model-show-validator-members: false
19 | :model-show-config-summary: false
20 | :model-show-field-summary: true
21 | :model-show-validator-summary: false
22 | :no-index:
--------------------------------------------------------------------------------
/docs/src/core_concepts/models_and_api_clients.rst:
--------------------------------------------------------------------------------
1 | ========================
2 | Models & API Clients
3 | ========================
4 |
5 | In language model programming, the relationship between models and API clients is crucial. ell provides a robust framework for managing this relationship, offering various ways to specify clients for models, register custom models, and leverage default configurations.
6 |
7 | Model Registration and Default Clients
8 | --------------------------------------
9 |
10 | ell automatically registers numerous models from providers like OpenAI, Anthropic, Cohere, and Groq upon initialization. This allows you to use models without explicitly specifying a client.
11 |
12 | If no client is found for a model, ell falls back to a default OpenAI client. This enables the utilization of newly released models without updating ell for new model registrations. If the fallback fails because the model is not available in the OpenAI API, you can register your own client for the model using the `ell.config.register_model` method or specify a client when calling the language model program below
13 |
14 |
15 | Specifying Clients for Models
16 | -----------------------------
17 |
18 | ell offers multiple methods to specify clients for models:
19 |
20 | 1. Decorator-level Client Specification:
21 |
22 | .. code-block:: python
23 |
24 | import ell
25 | import openai
26 |
27 | client = openai.Client(api_key="your-api-key")
28 |
29 | @ell.simple(model="gpt-next", client=client)
30 | def my_lmp(prompt: str):
31 | return f"Respond to: {prompt}"
32 |
33 | 2. Function Call-level Client Specification:
34 |
35 | .. code-block:: python
36 |
37 | result = my_lmp("Hello, world!", client=another_client)
38 |
39 | 3. Global Client Registration:
40 |
41 | .. code-block:: python
42 |
43 | ell.config.register_model("gpt-next", my_custom_client)
44 |
45 | Custom Model Registration
46 | -------------------------
47 |
48 | For custom or newer models, ell provides a straightforward registration method:
49 |
50 | .. code-block:: python
51 |
52 | import ell
53 | import my_custom_client
54 |
55 | ell.config.register_model("my-custom-model", my_custom_client)
56 |
57 |
--------------------------------------------------------------------------------
/docs/src/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/logo.png
--------------------------------------------------------------------------------
/docs/src/og.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/docs/src/og.png
--------------------------------------------------------------------------------
/docs/src/reference/index.rst:
--------------------------------------------------------------------------------
1 | =============
2 | ell package
3 | =============
4 |
5 | .. note::
6 | This is coming soon, be sure to read the source code.
7 |
8 | Automatically generated API reference for ``ell``.
9 |
10 |
11 | .. automodule:: ell
12 | :members:
13 | :undoc-members:
14 | :show-inheritance:
15 |
16 |
17 | .. toctree::
18 | :maxdepth: 2
19 | :caption: API Reference
20 |
21 | self
--------------------------------------------------------------------------------
/ell-studio/.gitignore:
--------------------------------------------------------------------------------
1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2 |
3 | # dependencies
4 | /node_modules
5 | /.pnp
6 | .pnp.js
7 |
8 | # testing
9 | /coverage
10 |
11 | # production
12 | /build
13 |
14 | # misc
15 | .DS_Store
16 | .env.local
17 | .env.development.local
18 | .env.test.local
19 | .env.production.local
20 |
21 | npm-debug.log*
22 | yarn-debug.log*
23 | yarn-error.log*
24 |
--------------------------------------------------------------------------------
/ell-studio/jsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "jsx": "react",
4 | "baseUrl": "src",
5 | "paths": {
6 | "*": ["*"]
7 | }
8 | },
9 | "include": ["src"]
10 | }
--------------------------------------------------------------------------------
/ell-studio/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "ell-studio",
3 | "version": "0.0.1",
4 | "private": true,
5 | "dependencies": {
6 | "@heroicons/react": "^2.1.5",
7 | "@radix-ui/react-checkbox": "^1.1.1",
8 | "@radix-ui/react-icons": "^1.3.0",
9 | "@radix-ui/react-scroll-area": "^1.1.0",
10 | "@radix-ui/react-select": "^2.1.1",
11 | "@radix-ui/react-slot": "^1.1.0",
12 | "@radix-ui/react-tooltip": "^1.1.3",
13 | "@tanstack/react-query": "^5.51.21",
14 | "@testing-library/jest-dom": "^5.17.0",
15 | "@testing-library/react": "^13.4.0",
16 | "@testing-library/user-event": "^13.5.0",
17 | "axios": "^1.6.0",
18 | "base64-js": "^1.5.1",
19 | "chart.js": "^4.4.4",
20 | "chartjs-chart-error-bars": "^4.4.2",
21 | "class-variance-authority": "^0.7.0",
22 | "clsx": "^2.1.1",
23 | "d3-force": "^3.0.0",
24 | "d3-quadtree": "^3.0.1",
25 | "dagre": "^0.8.5",
26 | "date-fns": "^3.6.0",
27 | "dotenv": "^16.4.5",
28 | "framer-motion": "^11.3.24",
29 | "install": "^0.13.0",
30 | "lucide-react": "^0.424.0",
31 | "npm": "^10.8.2",
32 | "prismjs": "^1.29.0",
33 | "react": "^18.3.1",
34 | "react-chartjs-2": "^5.2.0",
35 | "react-dom": "^18.3.1",
36 | "react-hot-toast": "^2.4.1",
37 | "react-icons": "^5.2.1",
38 | "react-markdown": "^9.0.1",
39 | "react-resizable-panels": "^2.0.22",
40 | "react-responsive": "^10.0.0",
41 | "react-router-dom": "^6.18.0",
42 | "react-scripts": "^5.0.1",
43 | "react-sparklines": "^1.7.0",
44 | "react-syntax-highlighter": "^15.5.0",
45 | "reactflow": "^11.11.4",
46 | "recharts": "^2.12.7",
47 | "tailwind-merge": "^2.4.0",
48 | "tailwindcss-animate": "^1.0.7",
49 | "unidiff": "^1.0.4",
50 | "web-vitals": "^2.1.4"
51 | },
52 | "scripts": {
53 | "start": "react-scripts start",
54 | "build": "react-scripts build",
55 | "test": "react-scripts test",
56 | "eject": "react-scripts eject",
57 | "start:dev": "react-scripts start"
58 | },
59 | "eslintConfig": {
60 | "extends": [
61 | "react-app",
62 | "react-app/jest"
63 | ]
64 | },
65 | "browserslist": {
66 | "production": [
67 | ">0.2%",
68 | "not dead",
69 | "not op_mini all"
70 | ],
71 | "development": [
72 | "last 1 chrome version",
73 | "last 1 firefox version",
74 | "last 1 safari version"
75 | ]
76 | },
77 | "devDependencies": {
78 | "autoprefixer": "^10.4.16",
79 | "postcss": "^8.4.31",
80 | "tailwindcss": "^3.3.5"
81 | }
82 | }
83 |
--------------------------------------------------------------------------------
/ell-studio/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/ell-studio/public/favicon.ico
--------------------------------------------------------------------------------
/ell-studio/public/gif.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/ell-studio/public/gif.gif
--------------------------------------------------------------------------------
/ell-studio/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
12 |
13 |
17 |
18 |
27 | Ell Studio
28 |
29 |
30 |
31 |
32 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/ell-studio/public/logo192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/ell-studio/public/logo192.png
--------------------------------------------------------------------------------
/ell-studio/public/logo512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/ell-studio/public/logo512.png
--------------------------------------------------------------------------------
/ell-studio/public/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "short_name": "React App",
3 | "name": "Create React App Sample",
4 | "icons": [
5 | {
6 | "src": "favicon.ico",
7 | "sizes": "64x64 32x32 24x24 16x16",
8 | "type": "image/x-icon"
9 | },
10 | {
11 | "src": "logo192.png",
12 | "type": "image/png",
13 | "sizes": "192x192"
14 | },
15 | {
16 | "src": "logo512.png",
17 | "type": "image/png",
18 | "sizes": "512x512"
19 | }
20 | ],
21 | "start_url": ".",
22 | "display": "standalone",
23 | "theme_color": "#000000",
24 | "background_color": "#ffffff"
25 | }
26 |
--------------------------------------------------------------------------------
/ell-studio/public/robots.txt:
--------------------------------------------------------------------------------
1 | # https://www.robotstxt.org/robotstxt.html
2 | User-agent: *
3 | Disallow:
4 |
--------------------------------------------------------------------------------
/ell-studio/src/App.test.js:
--------------------------------------------------------------------------------
1 | import { render, screen } from '@testing-library/react';
2 | import App from './App';
3 |
4 | test('renders learn react link', () => {
5 | render();
6 | const linkElement = screen.getByText(/learn react/i);
7 | expect(linkElement).toBeInTheDocument();
8 | });
9 |
--------------------------------------------------------------------------------
/ell-studio/src/components/Badge.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 |
3 | const colorClasses = {
4 | blue: 'bg-blue-100 text-blue-800',
5 | green: 'bg-green-100 text-green-800',
6 | yellow: 'bg-yellow-100 text-yellow-800',
7 | purple: 'bg-purple-100 text-purple-800',
8 | orange: 'bg-orange-100 text-orange-800',
9 | gray: 'bg-gray-100 text-gray-800',
10 | };
11 |
12 | export const Badge = ({ color = 'gray', children }) => {
13 | return (
14 |
15 | {children}
16 |
17 | );
18 | };
--------------------------------------------------------------------------------
/ell-studio/src/components/DependencyGraphPane.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { FiChevronRight } from 'react-icons/fi';
3 | import { Link } from 'react-router-dom';
4 | import {DependencyGraph} from './depgraph/DependencyGraph';
5 |
6 | // When changing pages we need to rerender this component (or create a new graph)
7 | const DependencyGraphPane = ({ lmp, uses }) => {
8 | const lmps = [lmp, ...uses];
9 | console.log(uses)
10 | return (
11 |
12 | );
13 | };
14 |
15 | export default DependencyGraphPane;
--------------------------------------------------------------------------------
/ell-studio/src/components/Header.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Link } from 'react-router-dom';
3 | import { useTheme } from '../contexts/ThemeContext';
4 |
5 | const Header = () => {
6 | const { darkMode } = useTheme();
7 |
8 | return (
9 |
10 |
11 |
Ell Studio
12 |
17 |
18 |
19 | );
20 | };
21 |
22 | export default Header;
--------------------------------------------------------------------------------
/ell-studio/src/components/OldCard.js:
--------------------------------------------------------------------------------
1 | import React from "react";
2 |
3 | export function OldCard({ children, title, noMinW, ...rest }) {
4 | return (
5 |
9 | {children}
10 |
11 | );
12 | }
13 |
--------------------------------------------------------------------------------
/ell-studio/src/components/ResizableSidebar.js:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect, useRef } from 'react';
2 |
3 | const ResizableSidebar = ({ children, onResize, initialWidth = window.innerWidth / 2, minWidth = 400 }) => {
4 | const [sidebarWidth, setSidebarWidth] = useState(initialWidth);
5 | const resizeRef = useRef(null);
6 |
7 | useEffect(() => {
8 | const handleMouseMove = (e) => {
9 | if (resizeRef.current) {
10 | const newWidth = document.body.clientWidth - e.clientX;
11 | const newSidebarWidth = Math.max(minWidth, newWidth);
12 | setSidebarWidth(newSidebarWidth);
13 | onResize(newSidebarWidth);
14 | }
15 | };
16 |
17 | const handleMouseUp = () => {
18 | resizeRef.current = null;
19 | document.removeEventListener("mousemove", handleMouseMove);
20 | document.removeEventListener("mouseup", handleMouseUp);
21 | };
22 |
23 | const handleMouseDown = (e) => {
24 | e.preventDefault();
25 | resizeRef.current = e.target;
26 | document.addEventListener("mousemove", handleMouseMove);
27 | document.addEventListener("mouseup", handleMouseUp);
28 | };
29 |
30 | const resizer = document.getElementById("sidebar-resizer");
31 | resizer.addEventListener("mousedown", handleMouseDown);
32 |
33 | const handleWindowResize = () => {
34 | const newWidth = Math.max(minWidth, sidebarWidth);
35 | setSidebarWidth(newWidth);
36 | onResize(newWidth);
37 | };
38 |
39 | window.addEventListener("resize", handleWindowResize);
40 |
41 | return () => {
42 | resizer.removeEventListener("mousedown", handleMouseDown);
43 | window.removeEventListener("resize", handleWindowResize);
44 | };
45 | }, [onResize, minWidth, sidebarWidth]);
46 |
47 | return (
48 |
52 |
53 | {children}
54 |
55 | );
56 | };
57 |
58 | export default ResizableSidebar;
--------------------------------------------------------------------------------
/ell-studio/src/components/VersionBadge.js:
--------------------------------------------------------------------------------
1 | import React, { useRef, useEffect, useState } from 'react';
2 |
3 | const getColorFromVersion = (version) => {
4 | const hue = (version * 137.508) % 360;
5 | return `hsl(${hue}, 40%, 70%)`;
6 | };
7 |
8 | const VersionBadge = ({ version, hash, className = '', shortVersion = false, truncationLength = 9 }) => {
9 | const [isOverflowing, setIsOverflowing] = useState(false);
10 | const badgeRef = useRef(null);
11 | const baseColor = getColorFromVersion(version);
12 | const lighterColor = `hsl(${baseColor.match(/\d+/)[0]}, 40%, 75%)`;
13 | const textColor = 'text-gray-900';
14 |
15 | useEffect(() => {
16 | const checkOverflow = () => {
17 | if (badgeRef.current) {
18 | setIsOverflowing(badgeRef.current.scrollWidth > badgeRef.current.clientWidth);
19 | }
20 | };
21 |
22 | checkOverflow();
23 | window.addEventListener('resize', checkOverflow);
24 | return () => window.removeEventListener('resize', checkOverflow);
25 | }, [version, hash]);
26 |
27 | const useShortVersion = shortVersion || isOverflowing;
28 |
29 | return (
30 |
31 |
{useShortVersion ? `v${version}` : `Version ${version}`}
32 | {hash && !useShortVersion &&
{hash.substring(0, truncationLength)}
}
33 |
34 | );
35 | };
36 |
37 | export default VersionBadge;
--------------------------------------------------------------------------------
/ell-studio/src/components/common/Badge.js:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 | import { cva } from "class-variance-authority"
3 | import { cn } from "library/utils"
4 |
5 | const badgeVariants = cva(
6 | "inline-flex items-center rounded-md border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2",
7 | {
8 | variants: {
9 | variant: {
10 | default:
11 | "border-transparent bg-primary text-primary-foreground shadow hover:bg-primary/80",
12 | secondary:
13 | "border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80",
14 | destructive:
15 | "border-transparent bg-destructive text-destructive-foreground shadow hover:bg-destructive/80",
16 | outline: "text-foreground",
17 | },
18 | },
19 | defaultVariants: {
20 | variant: "default",
21 | },
22 | }
23 | )
24 |
25 | function Badge({ className, variant, ...props }) {
26 | return (
27 |
28 | )
29 | }
30 |
31 | export { Badge, badgeVariants }
32 |
--------------------------------------------------------------------------------
/ell-studio/src/components/common/Button.js:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 | import { Slot } from "@radix-ui/react-slot"
3 | import { cva } from "class-variance-authority"
4 | import { cn } from "library/utils"
5 |
6 | const buttonVariants = cva(
7 | "inline-flex items-center justify-center whitespace-nowrap rounded-md text-sm font-medium transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-ring disabled:pointer-events-none disabled:opacity-50",
8 | {
9 | variants: {
10 | variant: {
11 | default:
12 | "bg-primary text-primary-foreground shadow hover:bg-primary/90",
13 | destructive:
14 | "bg-destructive text-destructive-foreground shadow-sm hover:bg-destructive/90",
15 | outline:
16 | "border border-input bg-background shadow-sm hover:bg-accent hover:text-accent-foreground",
17 | secondary:
18 | "bg-secondary text-secondary-foreground shadow-sm hover:bg-secondary/80",
19 | ghost: "hover:bg-accent hover:text-accent-foreground",
20 | link: "text-primary underline-offset-4 hover:underline",
21 | },
22 | size: {
23 | default: "h-9 px-4 py-2",
24 | sm: "h-8 rounded-md px-3 text-xs",
25 | lg: "h-10 rounded-md px-8",
26 | icon: "h-9 w-9",
27 | },
28 | },
29 | defaultVariants: {
30 | variant: "default",
31 | size: "default",
32 | },
33 | }
34 | )
35 |
36 | const Button = React.forwardRef(
37 | ({ className, variant, size, asChild = false, ...props }, ref) => {
38 | const Comp = asChild ? Slot : "button"
39 | return (
40 |
45 | )
46 | }
47 | )
48 | Button.displayName = "Button"
49 |
50 | export { Button, buttonVariants }
51 |
--------------------------------------------------------------------------------
/ell-studio/src/components/common/Card.js:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 | import { cn } from "library/utils"
3 |
4 | const Card = React.forwardRef(({ className, ...props }, ref) => (
5 |
13 | ))
14 | Card.displayName = "Card"
15 |
16 | const CardHeader = React.forwardRef(({ className, ...props }, ref) => (
17 |
22 | ))
23 | CardHeader.displayName = "CardHeader"
24 |
25 | const CardTitle = React.forwardRef(({ className, ...props }, ref) => (
26 |
31 | ))
32 | CardTitle.displayName = "CardTitle"
33 |
34 | const CardDescription = React.forwardRef(({ className, ...props }, ref) => (
35 |
40 | ))
41 | CardDescription.displayName = "CardDescription"
42 |
43 | const CardContent = React.forwardRef(({ className, ...props }, ref) => (
44 |
45 | ))
46 | CardContent.displayName = "CardContent"
47 |
48 | const CardFooter = React.forwardRef(({ className, ...props }, ref) => (
49 |
54 | ))
55 | CardFooter.displayName = "CardFooter"
56 |
57 | export { Card, CardHeader, CardFooter, CardTitle, CardDescription, CardContent }
58 |
--------------------------------------------------------------------------------
/ell-studio/src/components/common/Checkbox.js:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 | import * as CheckboxPrimitive from "@radix-ui/react-checkbox"
3 | import { CheckIcon } from "@radix-ui/react-icons"
4 |
5 | import { cn } from "library/utils"
6 |
7 | const Checkbox = React.forwardRef(({ className, ...props }, ref) => (
8 |
16 |
19 |
20 |
21 |
22 | ))
23 | Checkbox.displayName = CheckboxPrimitive.Root.displayName
24 |
25 | export { Checkbox }
26 |
--------------------------------------------------------------------------------
/ell-studio/src/components/common/Resizable.js:
--------------------------------------------------------------------------------
1 | import { DragHandleDots2Icon } from "@radix-ui/react-icons"
2 | import * as ResizablePrimitive from "react-resizable-panels"
3 | import { cn } from "library/utils"
4 |
5 | const ResizablePanelGroup = ({
6 | className,
7 | ...props
8 | }) => (
9 |
16 | )
17 |
18 | const ResizablePanel = ResizablePrimitive.Panel
19 |
20 | const ResizableHandle = ({
21 | withHandle,
22 | className,
23 | ...props
24 | }) => (
25 | div]:rotate-90",
28 | className
29 | )}
30 | {...props}
31 | >
32 | {withHandle && (
33 |
34 |
35 |
36 | )}
37 |
38 | )
39 |
40 | export { ResizablePanelGroup, ResizablePanel, ResizableHandle }
41 |
--------------------------------------------------------------------------------
/ell-studio/src/components/common/ScrollArea.js:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 | import * as ScrollAreaPrimitive from "@radix-ui/react-scroll-area"
3 |
4 | import { cn } from "library/utils"
5 |
6 | const ScrollArea = React.forwardRef(({ className, children, ...props }, ref) => (
7 |
8 | {children}
9 |
10 | ))
11 | ScrollArea.displayName = ScrollAreaPrimitive.Root.displayName
12 |
13 | const ScrollBar = React.forwardRef(({ className, orientation = "vertical", ...props }, ref) => (
14 |
27 |
28 |
29 | ))
30 | ScrollBar.displayName = ScrollAreaPrimitive.ScrollAreaScrollbar.displayName
31 |
32 | export { ScrollArea, ScrollBar }
--------------------------------------------------------------------------------
/ell-studio/src/components/common/SidePanel.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { ScrollArea } from './ScrollArea';
3 |
4 | const SidePanel = ({ title, children }) => (
5 |
6 | {title}
7 |
8 | {children}
9 |
10 |
11 | );
12 |
13 | export default SidePanel;
--------------------------------------------------------------------------------
/ell-studio/src/components/common/Spinner.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 |
3 | export const Spinner = ({ size = 'md' }) => {
4 | const sizeClasses = {
5 | sm: 'w-4 h-4',
6 | md: 'w-6 h-6',
7 | lg: 'w-8 h-8',
8 | };
9 |
10 | return (
11 |
29 | );
30 | };
--------------------------------------------------------------------------------
/ell-studio/src/components/common/StatItem.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 |
3 | const StatItem = ({ icon: Icon, label, value }) => (
4 |
8 |
9 |
10 | {label}
11 |
12 | {value}
13 |
14 | );
15 |
16 | export default StatItem;
--------------------------------------------------------------------------------
/ell-studio/src/components/common/ToggleSwitch.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 |
3 | const ToggleSwitch = ({ leftLabel, rightLabel, isRight, onToggle }) => {
4 | return (
5 |
9 |
14 | {leftLabel}
15 |
16 |
21 | {rightLabel}
22 |
23 |
24 | );
25 | };
26 |
27 | export default ToggleSwitch;
--------------------------------------------------------------------------------
/ell-studio/src/components/common/Tooltips.js:
--------------------------------------------------------------------------------
1 | import * as React from "react"
2 | import * as TooltipPrimitive from "@radix-ui/react-tooltip"
3 | import { cn } from "library/utils"
4 |
5 | const TooltipProvider = TooltipPrimitive.Provider
6 |
7 | const Tooltip = TooltipPrimitive.Root
8 |
9 | const TooltipTrigger = TooltipPrimitive.Trigger
10 |
11 | const TooltipContent = React.forwardRef(({ className, sideOffset = 4, ...props }, ref) => (
12 |
21 | ))
22 | TooltipContent.displayName = TooltipPrimitive.Content.displayName
23 |
24 | export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider }
25 |
--------------------------------------------------------------------------------
/ell-studio/src/components/depgraph/DependencyGraph.css:
--------------------------------------------------------------------------------
1 | .dependency-graph {
2 | width: 100%;
3 | height: 100%;
4 | background-color: #f8fafc;
5 | border: 1px solid #e2e8f0;
6 | border-radius: 8px;
7 | }
8 |
9 | .lmp-node {
10 | padding: 10px;
11 | border-radius: 5px;
12 | background-color: #ffffff;
13 | border: 1px solid #e2e8f0;
14 | width: 180px;
15 | box-shadow: 0 1px 3px 0 rgba(0, 0, 0, 0.1), 0 1px 2px 0 rgba(0, 0, 0, 0.06);
16 | }
17 |
18 | .lmp-node-content {
19 | font-size: 14px;
20 | color: #4b5563;
21 | text-align: center;
22 | }
23 |
24 | .lmp-node-content a {
25 | color: #4b5563;
26 | text-decoration: none;
27 | }
28 |
29 | .lmp-node-content a:hover {
30 | color: #6366f1;
31 | }
--------------------------------------------------------------------------------
/ell-studio/src/components/depgraph/LMPCardTitle.js:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import { BiCube } from "react-icons/bi";
3 | import { FiZap, FiTool } from "react-icons/fi";
4 | import VersionBadge from "../VersionBadge";
5 |
6 | export function LMPCardTitle({
7 | lmp,
8 | fontSize,
9 | displayVersion,
10 | padding = true,
11 | scale = 1,
12 | additionalClassName = '',
13 | clickable = true,
14 | shortVersion = false,
15 | paddingClassOverride = '',
16 | nameOverride = null,
17 | showInvocationCount = true,
18 | outlineStyle = 'solid',
19 | nameOverridePrint = null, // New prop for printing name override
20 | ...rest
21 | }) {
22 | const paddingClass = paddingClassOverride ? paddingClassOverride : padding ? 'p-2' : '';
23 |
24 | const scaleClass = `scale-${scale}`;
25 | const hoverClass = clickable ? ' duration-200 ease-in-out hover:bg-opacity-80 hover:bg-gray-700' : '';
26 | const cursorClass = clickable ? 'cursor-pointer' : '';
27 |
28 | // Define outline styles
29 | const outlineClasses = {
30 | solid: lmp.is_lmp ? 'bg-blue-100 text-blue-800' : 'bg-yellow-100 text-yellow-800',
31 | dashed: lmp.is_lmp ? 'bg-transparent text-blue-500 border border-dotted border-blue-400' : 'bg-transparent text-yellow-500 border border-dotted border-yellow-400'
32 | };
33 |
34 | return (
35 |
36 |
37 | {lmp.lmp_type === "LM" ?
38 |
39 | : lmp.lmp_type === "TOOL" ?
40 |
41 | : lmp.lmp_type === "METRIC" ?
42 |
43 | : }
44 |
45 | {nameOverride ? nameOverride :
46 | {nameOverridePrint || lmp.name}()
47 |
}
48 | {displayVersion &&
}
49 | {showInvocationCount && lmp.num_invocations > 0 && (
50 |
51 |
52 | {lmp.num_invocations}
53 |
54 | )}
55 |
56 | );
57 | }
--------------------------------------------------------------------------------
/ell-studio/src/components/depgraph/collide.js:
--------------------------------------------------------------------------------
1 | import { quadtree } from 'd3-quadtree';
2 |
3 | export function collide() {
4 | let nodes = [];
5 | let force = (alpha) => {
6 | const tree = quadtree(
7 | nodes,
8 | (d) => d.x,
9 | (d) => d.y
10 | );
11 |
12 | for (const node of nodes) {
13 | const r = node.width / 2;
14 | const nx1 = node.x - r;
15 | const nx2 = node.x + r;
16 | const ny1 = node.y - r;
17 | const ny2 = node.y + r;
18 |
19 | tree.visit((quad, x1, y1, x2, y2) => {
20 | if (!quad.length) {
21 | do {
22 | if (quad.data !== node) {
23 | const r = node.width / 2 + quad.data.width / 2;
24 | let x = node.x - quad.data.x;
25 | let y = node.y - quad.data.y;
26 | let l = Math.hypot(x, y);
27 |
28 | if (l < r) {
29 | l = ((l - r) / l) * alpha;
30 | node.x -= x *= l;
31 | node.y -= y *= l;
32 | quad.data.x += x;
33 | quad.data.y += y;
34 | }
35 | }
36 | } while ((quad = quad.next));
37 | }
38 |
39 | return x1 > nx2 || x2 < nx1 || y1 > ny2 || y2 < ny1;
40 | });
41 | }
42 | };
43 |
44 | force.initialize = (newNodes) => (nodes = newNodes);
45 |
46 | return force;
47 | }
48 |
49 | export default collide;
--------------------------------------------------------------------------------
/ell-studio/src/components/evaluations/EvaluationCardTitle.js:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import { FiZap } from "react-icons/fi";
3 | import VersionBadge from "../VersionBadge";
4 | import EvaluationsIcon from "./EvaluationsIcon";
5 |
6 | export function EvaluationCardTitle({
7 | evaluation,
8 | fontSize = "sm",
9 | displayVersion = true,
10 | padding = true,
11 | scale = 1,
12 | additionalClassName = '',
13 | clickable = true,
14 | shortVersion = false,
15 | paddingClassOverride = '',
16 | nameOverride = null,
17 | showRunCount = true,
18 | outlineStyle = 'solid',
19 | ...rest
20 | }) {
21 | const paddingClass = paddingClassOverride ? paddingClassOverride : padding ? 'p-2' : '';
22 |
23 | const scaleClass = `scale-${scale}`;
24 | const hoverClass = clickable ? 'duration-200 ease-in-out hover:bg-opacity-80 hover:bg-gray-700' : '';
25 | const cursorClass = clickable ? 'cursor-pointer' : '';
26 |
27 | const outlineClasses = {
28 | solid: 'bg-blue-100 text-blue-800',
29 | dashed: 'bg-transparent text-blue-500 border border-dotted border-blue-400'
30 | };
31 |
32 | return (
33 |
34 |
35 |
36 |
37 | {nameOverride ? nameOverride : (
38 |
39 | {evaluation.name}
40 |
41 | )}
42 | {displayVersion && (
43 |
49 | )}
50 | {showRunCount && evaluation.runs && evaluation.runs.length > 0 && (
51 |
52 |
53 | {evaluation.runs.length}
54 |
55 | )}
56 |
57 | );
58 | }
59 |
--------------------------------------------------------------------------------
/ell-studio/src/components/evaluations/EvaluationOverview.js:
--------------------------------------------------------------------------------
1 | import React, { useState } from 'react';
2 | import { FiBarChart2, FiClock, FiDatabase, FiTag, FiZap } from 'react-icons/fi';
3 | import { Card, CardContent } from '../common/Card';
4 | import RunSummary from './RunSummary';
5 | import VersionBadge from '../VersionBadge';
6 | import { getTimeAgo } from '../../utils/lmpUtils';
7 | import MetricGraphGrid from './MetricGraphGrid';
8 |
9 | function EvaluationOverview({ evaluation, groupedRuns, onActiveIndexChange }) {
10 | const [activeIndex, setActiveIndex] = useState(null);
11 |
12 | const handleActiveIndexChange = (index) => {
13 | setActiveIndex(index);
14 | onActiveIndexChange(index);
15 | };
16 |
17 | return (
18 | <>
19 |
20 |
21 | Evaluation
22 | •
23 |
24 |
25 | {evaluation.labelers ? (
26 |
31 | ) : (
32 |
37 | )}
38 |
39 | >
40 | );
41 | }
42 |
43 | export default EvaluationOverview;
44 |
--------------------------------------------------------------------------------
/ell-studio/src/components/evaluations/EvaluationsAnalyticsSidePanel.js:
--------------------------------------------------------------------------------
1 | import React, { useMemo } from 'react';
2 | import { FiBarChart2, FiClock, FiDatabase } from 'react-icons/fi';
3 |
4 | const EvaluationsAnalyticsSidePanel = ({ evaluations }) => {
5 | const analytics = useMemo(() => {
6 | const totalEvaluations = evaluations.length;
7 | const activeEvaluations = evaluations.filter(e => e.status === 'Active').length;
8 | const completedEvaluations = evaluations.filter(e => e.status === 'Completed').length;
9 | const totalDatapoints = evaluations.reduce((sum, e) => sum + e.n_evals, 0);
10 |
11 | return { totalEvaluations, activeEvaluations, completedEvaluations, totalDatapoints };
12 | }, [evaluations]);
13 |
14 | return (
15 |
16 |
Evaluation Analytics
17 |
18 |
19 |
20 | Total Evaluations
21 | {analytics.totalEvaluations}
22 |
23 |
24 | Active Evaluations
25 | {analytics.activeEvaluations}
26 |
27 |
28 | Completed Evaluations
29 | {analytics.completedEvaluations}
30 |
31 |
32 | Total Datapoints
33 | {analytics.totalDatapoints}
34 |
35 |
36 |
37 | {/* You can add more analytics or charts here */}
38 |
39 | );
40 | };
41 |
42 | export default EvaluationsAnalyticsSidePanel;
--------------------------------------------------------------------------------
/ell-studio/src/components/evaluations/EvaluationsIcon.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { FiBarChart2, FiClipboard } from 'react-icons/fi';
3 |
4 | const EvaluationsIcon = ({ className = '' }) => (
5 |
6 |
7 |
8 |
9 | );
10 |
11 | export default EvaluationsIcon;
12 |
--------------------------------------------------------------------------------
/ell-studio/src/components/evaluations/LabelDisplay.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 |
3 | const LabelDisplay = ({
4 | value : valueNumberish, // This is the mean
5 | isAggregate = false,
6 | stats = null // { min, max, stdDev }
7 | }) => {
8 | const value = typeof valueNumberish === 'boolean' ? Number(valueNumberish) : valueNumberish;
9 |
10 | if (typeof value !== 'number') {
11 | return {value}
;
12 | }
13 |
14 | if (!isAggregate || !stats) {
15 | return {value.toFixed(2)}
;
16 | }
17 |
18 | const { min, max, stdDev } = stats;
19 | const mean = value;
20 |
21 | // Handle the case where min equals max
22 | const isConstant = min === max;
23 |
24 | // Calculate positions as percentages, clamping to the range
25 | const meanPos = isConstant ? 50 : ((mean - min) / (max - min)) * 100;
26 | const leftStdDevPos = isConstant ? 50 : Math.max(((mean - stdDev - min) / (max - min)) * 100, 0);
27 | const rightStdDevPos = isConstant ? 50 : Math.min(((mean + stdDev - min) / (max - min)) * 100, 100);
28 | const boxWidth = rightStdDevPos - leftStdDevPos;
29 |
30 | return (
31 |
32 |
{value.toFixed(2)}
33 |
34 | {/* Base bar */}
35 |
36 | {/* StdDev box - only show if there's variation */}
37 | {!isConstant && (
38 |
45 | )}
46 |
47 | {/* Mean marker - made slightly larger when it's a constant value */}
48 |
55 |
56 |
57 | );
58 | };
59 |
60 | export default LabelDisplay;
--------------------------------------------------------------------------------
/ell-studio/src/components/evaluations/MetricTable.js:
--------------------------------------------------------------------------------
1 | import React, { useState } from 'react';
2 | import { FiBarChart2 } from 'react-icons/fi';
3 | import TrendLine from '../graphing/TrendLine';
4 | import MetricDisplay from './MetricDisplay';
5 |
6 | const MetricTable = ({ summaries, historicalData, isVertical }) => {
7 | const [hoverIndex, setHoverIndex] = useState(null);
8 |
9 | return (
10 |
11 | {summaries.map((summary, index) => {
12 | const currentValue = hoverIndex !== null ? historicalData[summary.evaluation_labeler_id][hoverIndex].mean : summary.data.mean;
13 | const previousValue = historicalData[summary.evaluation_labeler_id][historicalData[summary.evaluation_labeler_id].length - 2]?.mean;
14 |
15 | return (
16 |
17 |
18 |
19 |
20 |
21 | {summary.evaluation_labeler.name}
22 |
23 |
24 |
25 |
26 | d.mean)}
28 | hoverIndex={hoverIndex}
29 | onHover={setHoverIndex}
30 | />
31 |
32 |
33 |
39 |
40 |
41 | {index < summaries.length - 1 && (
42 |
43 | )}
44 |
45 | );
46 | })}
47 |
48 | );
49 | };
50 |
51 | export default MetricTable;
52 |
--------------------------------------------------------------------------------
/ell-studio/src/components/evaluations/RunSummary.js:
--------------------------------------------------------------------------------
1 | import React, { useState } from 'react';
2 | import { LMPCardTitle } from '../depgraph/LMPCardTitle';
3 | import MetricTable from './MetricTable';
4 |
5 | const RunSummary = ({ groupedRuns, isVertical }) => {
6 | const latestRuns = Object.values(groupedRuns).map(runs => runs[runs.length - 1]);
7 | const mostRecentRun = latestRuns.reduce((latest, current) =>
8 | new Date(current.end_time) > new Date(latest.end_time) ? current : latest
9 | );
10 |
11 | const scalarSummaries = mostRecentRun.labeler_summaries.filter(summary => summary.is_scalar);
12 |
13 | const historicalData = scalarSummaries.reduce((acc, summary) => {
14 | acc[summary.evaluation_labeler_id] = groupedRuns[mostRecentRun.evaluated_lmp.name]
15 | .map(run => run.labeler_summaries
16 | .find(s => s.evaluation_labeler_id === summary.evaluation_labeler_id)?.data
17 | )
18 | .filter(Boolean);
19 | return acc;
20 | }, {});
21 |
22 | return (
23 |
24 |
32 |
37 |
38 | );
39 | };
40 |
41 | export default RunSummary;
--------------------------------------------------------------------------------
/ell-studio/src/components/evaluations/runs/EvaluationRunOverview.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Link } from 'react-router-dom';
3 | import { Card, CardContent } from '../../common/Card';
4 | import { EvaluationCardTitle } from '../EvaluationCardTitle';
5 | import { LMPCardTitle } from '../../depgraph/LMPCardTitle';
6 | import LMPSourceView from '../../source/LMPSourceView';
7 |
8 | function EvaluationRunOverview({ run }) {
9 | return (
10 |
11 |
12 |
13 |
14 |
15 |
23 |
24 |
25 |
26 |
•
27 |
28 | Run #{run?.id}
29 |
30 |
31 |
32 |
33 |
Evaluated LMP
34 |
35 |
36 |
41 |
42 |
47 |
48 |
49 |
50 |
51 |
52 | );
53 | }
54 |
55 | export default EvaluationRunOverview;
--------------------------------------------------------------------------------
/ell-studio/src/components/evaluations/runs/SearchAndFiltersBar.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 |
3 | function SearchAndFiltersBar({ searchQuery, setSearchQuery }) {
4 | return (
5 |
6 |
7 | setSearchQuery(e.target.value)}
12 | className="w-full px-4 py-2 rounded-md border border-border bg-background text-foreground focus:outline-none focus:ring-2 focus:ring-primary"
13 | />
14 |
15 | {/* We can add more filters here later */}
16 |
17 | );
18 | }
19 |
20 | export default SearchAndFiltersBar;
--------------------------------------------------------------------------------
/ell-studio/src/components/graphing/Graph.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { GraphRenderer, MetricAdder, useGraph } from './GraphSystem';
3 |
4 | const Graph = ({ graphId, metrics, type = 'line' }) => {
5 | useGraph(graphId);
6 |
7 | return (
8 | <>
9 | {metrics.map((metric, index) => (
10 |
16 | ))}
17 |
18 | >
19 | );
20 | };
21 |
22 | export default Graph;
23 |
--------------------------------------------------------------------------------
/ell-studio/src/components/graphing/TrendLine.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { Line } from 'react-chartjs-2';
3 | import { Chart as ChartJS, CategoryScale, LinearScale, LineElement, PointElement, Tooltip as ChartTooltip, Filler } from 'chart.js';
4 |
5 | ChartJS.register(CategoryScale, LinearScale, LineElement, PointElement, ChartTooltip, Filler);
6 |
7 | const TrendLine = ({ data, hoverIndex, onHover }) => {
8 | const trend = data[data.length - 1] - data[0];
9 | const trendColor = trend > 0 ? 'rgba(52, 211, 153, 0.8)' : 'rgba(239, 68, 68, 0.8)';
10 | const fillColor = trend > 0 ? 'rgba(52, 211, 153, 0.2)' : 'rgba(239, 68, 68, 0.2)';
11 |
12 | const chartData = {
13 | labels: data.map((_, index) => index + 1),
14 | datasets: [{
15 | data,
16 | borderColor: trendColor,
17 | backgroundColor: fillColor,
18 | pointRadius: 0,
19 | borderWidth: 1,
20 | tension: 0.4,
21 | fill: true,
22 | }],
23 | };
24 |
25 | const options = {
26 | responsive: true,
27 | maintainAspectRatio: false,
28 | plugins: {
29 | legend: { display: false },
30 | tooltip: { enabled: false }
31 | },
32 | scales: {
33 | x: { display: false },
34 | y: {
35 | display: false,
36 | min: Math.min(...data) * 0.95,
37 | max: Math.max(...data) * 1.05,
38 | }
39 | },
40 | };
41 |
42 | return (
43 | {
46 | const rect = e.currentTarget.getBoundingClientRect();
47 | const x = e.clientX - rect.left;
48 | const index = Math.round((x / rect.width) * (data.length - 1));
49 | onHover(index);
50 | }}
51 | onMouseLeave={() => onHover(null)}
52 | >
53 |
54 |
55 | );
56 | };
57 |
58 | export default TrendLine;
59 |
--------------------------------------------------------------------------------
/ell-studio/src/components/invocations/ContentsRenderer.js:
--------------------------------------------------------------------------------
1 |
2 | import { useBlob } from '../../hooks/useBackend';
3 | import IORenderer from '../IORenderer';
4 |
5 | export function ContentsRenderer({ item, field, ...rest }) {
6 | const contents = item.contents;
7 | console.log(contents[field]);
8 |
9 | if (contents.is_external && !contents.is_external_loaded) {
10 | return Loading...
;
11 | } else {
12 | return ;
13 | }
14 | }
--------------------------------------------------------------------------------
/ell-studio/src/components/invocations/InvocationInfoPane.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { FiClock, FiZap, FiHash, FiBox, FiTag, FiLayers } from 'react-icons/fi';
3 | import { motion } from 'framer-motion';
4 | import { Card } from '../common/Card';
5 |
6 | export function InvocationInfoPane({ invocation, isFullWidth }) {
7 | return (
8 |
14 |
15 |
Invocation Details
16 |
17 |
18 |
19 | Created At:
20 |
21 |
{new Date(invocation.created_at).toLocaleTimeString()}
22 |
23 |
24 |
25 | Latency:
26 |
27 |
{(invocation.latency_ms / 1000).toFixed(2)}s
28 |
29 |
30 |
31 | Prompt Tokens:
32 |
33 |
{invocation.prompt_tokens || "N/A"}
34 |
35 |
36 |
37 | Completion Tokens:
38 |
39 |
{invocation.completion_tokens || "N/A"}
40 |
41 |
42 |
43 | LMP Type:
44 |
45 |
46 |
47 | {invocation.lmp?.is_lm ? "LM" : "LMP"}
48 |
49 |
50 |
51 |
52 |
53 |
54 | );
55 | }
--------------------------------------------------------------------------------
/ell-studio/src/components/invocations/InvocationsLayout.js:
--------------------------------------------------------------------------------
1 | import React, { useState, useMemo, useEffect, useCallback } from 'react';
2 | import InvocationDetailsSidevar from './details/InvocationDetailsPopover';
3 | import { useNavigate, useLocation } from 'react-router-dom';
4 |
5 | const InvocationsLayout = ({ children, selectedTrace, setSelectedTrace, showSidebar = false, containerClass = '' }) => {
6 | const [sidebarWidth, setSidebarWidth] = useState(window.innerWidth / 2);
7 | const [windowWidth, setWindowWidth] = useState(window.innerWidth);
8 | const navigate = useNavigate();
9 | const location = useLocation();
10 |
11 | const handleSidebarResize = useCallback((newWidth) => {
12 | setSidebarWidth(newWidth);
13 | }, []);
14 |
15 | useEffect(() => {
16 | const handleWindowResize = () => {
17 | setWindowWidth(window.innerWidth);
18 | setSidebarWidth(prevWidth => Math.min(prevWidth, window.innerWidth / 2));
19 | };
20 |
21 | window.addEventListener('resize', handleWindowResize);
22 |
23 | return () => {
24 | window.removeEventListener('resize', handleWindowResize);
25 | };
26 | }, []);
27 |
28 | const mainContentStyle = useMemo(() => {
29 | if (showSidebar && selectedTrace) {
30 | const mainWidth = windowWidth - sidebarWidth - 64;
31 | if (mainWidth < ((windowWidth - 64) / 2)) {
32 | return { width: `${(windowWidth - 64) / 2}px` };
33 | }
34 | return { width: `${mainWidth}px` };
35 | }
36 | return {};
37 | }, [showSidebar, selectedTrace, sidebarWidth, windowWidth]);
38 |
39 | const handleCloseSidebar = () => {
40 | setSelectedTrace(null);
41 | navigate(location.pathname);
42 | };
43 |
44 | return (
45 | <>
46 |
47 |
48 | {children}
49 |
50 |
51 | {showSidebar && selectedTrace && (
52 |
57 | )}
58 | >
59 | );
60 | };
61 |
62 | export default InvocationsLayout;
--------------------------------------------------------------------------------
/ell-studio/src/components/invocations/details/TraceDetailsPane.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { FiFilter } from 'react-icons/fi';
3 |
4 | const TraceDetailsPain = ({ trace }) => {
5 | return (
6 |
7 |
TRACE
8 |
9 |
10 |
11 |
12 |
15 |
16 |
17 |
18 | ◢
19 | {trace.name}
20 | ●
21 |
22 |
23 | {trace.latency}
24 | {trace.tokens} tokens
25 | {trace.tags[0]}
26 |
27 |
28 | {/* Add more trace items here */}
29 |
30 | Some runs have been hidden. Show 10 hidden runs
31 |
32 |
33 | );
34 | };
35 |
36 | export default TraceDetailsPain;
--------------------------------------------------------------------------------
/ell-studio/src/components/layouts/GenericPageLayout.js:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect } from 'react';
2 | import { ResizablePanelGroup, ResizablePanel, ResizableHandle } from "../common/Resizable";
3 | import { ScrollArea } from '../common/ScrollArea';
4 | import InvocationsLayout from '../invocations/InvocationsLayout';
5 |
6 | const GenericPageLayout = ({
7 | children,
8 | selectedTrace,
9 | setSelectedTrace,
10 | sidebarContent,
11 | showSidebar = true,
12 | minimizeSidebar = false,
13 | }) => {
14 | const [sidebarVisible, setSidebarVisible] = useState(!selectedTrace && showSidebar);
15 | const [isSmallScreen, setIsSmallScreen] = useState(false);
16 |
17 | useEffect(() => {
18 | // Function to check window size
19 | const checkWindowSize = () => {
20 | setIsSmallScreen(window.innerWidth < 1024); // 1024px is typical laptop width
21 | };
22 |
23 | // Initial check
24 | checkWindowSize();
25 |
26 | // Add event listener
27 | window.addEventListener('resize', checkWindowSize);
28 |
29 | // Cleanup
30 | return () => window.removeEventListener('resize', checkWindowSize);
31 | }, []);
32 |
33 | useEffect(() => {
34 | setSidebarVisible(!selectedTrace && showSidebar && !(minimizeSidebar && isSmallScreen));
35 | }, [selectedTrace, showSidebar, minimizeSidebar, isSmallScreen]);
36 |
37 | return (
38 |
39 |
43 |
49 |
50 | {children}
51 |
52 |
53 |
54 | {sidebarVisible && (
55 | <>
56 |
57 |
62 |
63 | {sidebarContent}
64 |
65 |
66 | >
67 | )}
68 |
69 | );
70 | };
71 |
72 | export default GenericPageLayout;
--------------------------------------------------------------------------------
/ell-studio/src/components/source/CodeHighlighter.js:
--------------------------------------------------------------------------------
1 | import React, { useMemo, useCallback } from "react";
2 | import { Prism as SyntaxHighlighter, createElement } from "react-syntax-highlighter";
3 | import { atomDark as theme } from "react-syntax-highlighter/dist/esm/styles/prism";
4 | import { diffLines, formatLines } from 'unidiff';
5 | import { cleanCode } from './codeCleanerUtils';
6 | import { StandardRenderer } from './StandardRenderer';
7 | import { DiffRenderer } from './DiffRenderer';
8 |
9 | export function CodeHighlighter({
10 | code,
11 | previousCode,
12 | isDiffView,
13 | highlighterStyle = {},
14 | language = "python",
15 | showLineNumbers = true,
16 | startingLineNumber = 1,
17 | customHooks = [],
18 | defaultRowPadding = 1,
19 | offset: indentOffset = 35,
20 | }) {
21 | const { cleanedCode, hookRanges } = useMemo(() =>
22 | cleanCode(code, customHooks), [code, customHooks]);
23 | const commonProps = useMemo(() => {
24 | return {
25 | language,
26 | style: theme,
27 | customStyle: {
28 | margin: 0,
29 | padding: "1em",
30 | borderRadius: "0 0 6px 6px",
31 | ...highlighterStyle,
32 | },
33 | };
34 | }, [language, highlighterStyle]);
35 |
36 | const standardRenderer = useCallback(
37 | ({ rows, stylesheet, useInlineStyles }) =>
38 | StandardRenderer({
39 | rows,
40 | stylesheet,
41 | useInlineStyles,
42 | customHooks,
43 | hookRanges,
44 | indentOffset,
45 | defaultRowPadding
46 | }),
47 | [customHooks, hookRanges, indentOffset, defaultRowPadding]
48 | );
49 |
50 | const diffRenderer = useCallback(
51 | ({ stylesheet, useInlineStyles }) =>
52 | DiffRenderer({
53 | previousCode: previousCode,
54 | code: code,
55 | stylesheet,
56 | useInlineStyles,
57 | startingLineNumber,
58 | commonProps
59 | }),
60 | [previousCode, code, startingLineNumber, commonProps]
61 | );
62 |
63 |
64 | if (isDiffView && previousCode && code) {
65 |
66 | return (
67 |
73 | {""}
74 |
75 | );
76 | }
77 |
78 | return (
79 |
86 | {cleanedCode}
87 |
88 | );
89 | }
--------------------------------------------------------------------------------
/ell-studio/src/components/source/StandardRenderer.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { createElement } from "react-syntax-highlighter";
3 |
4 | export function StandardRenderer({
5 | rows,
6 | stylesheet,
7 | useInlineStyles,
8 | customHooks,
9 | hookRanges,
10 | indentOffset,
11 | defaultRowPadding
12 | }) {
13 | const rowTree = [];
14 | const activeHooks = customHooks.map(() => null);
15 |
16 | for (let i = 0; i < rows.length; i++) {
17 | let currentElement = (
18 |
25 | {createElement({
26 | node: rows[i],
27 | stylesheet,
28 | useInlineStyles,
29 | key: `code-segment-${i}`,
30 | })}
31 |
32 | );
33 |
34 | for (let hookIndex = 0; hookIndex < customHooks.length; hookIndex++) {
35 | const hook = customHooks[hookIndex];
36 |
37 | const containingInterval = hookRanges[hookIndex].some(
38 | ([start, end, _]) => start <= i && i <= end
39 | );
40 | if (containingInterval) {
41 | if (activeHooks[hookIndex] === null) {
42 | activeHooks[hookIndex] = [];
43 | }
44 | activeHooks[hookIndex].push(currentElement);
45 | currentElement = null;
46 | } else if (activeHooks[hookIndex] !== null) {
47 | const rangeOfLastHook = hookRanges[hookIndex].find(
48 | ([start, end, contents]) => start <= i - 1 && i - 1 <= end
49 | );
50 |
51 | rowTree.push(
52 | hook.wrapper({
53 | children: activeHooks[hookIndex],
54 | content: rangeOfLastHook[2],
55 | key: `${hook.name}-${i}`,
56 | })
57 | );
58 | activeHooks[hookIndex] = null;
59 | }
60 | }
61 |
62 | if (currentElement) {
63 | rowTree.push(currentElement);
64 | }
65 | }
66 |
67 | customHooks.forEach((hook, hookIndex) => {
68 | if (activeHooks[hookIndex] !== null) {
69 | const range = hookRanges[hookIndex][hookRanges[hookIndex].length - 1];
70 |
71 | rowTree.push(
72 | hook.wrapper({
73 | children: activeHooks[hookIndex],
74 | key: `${hook.name}-end`,
75 | content: range[2],
76 | })
77 | );
78 | }
79 | });
80 |
81 | return rowTree;
82 | }
--------------------------------------------------------------------------------
/ell-studio/src/components/source/codeCleanerUtils.js:
--------------------------------------------------------------------------------
1 | export function cleanCode(code, customHooks) {
2 | const hookRanges = customHooks.map(() => []);
3 | const lines = code.split("\n");
4 | const cleanedLines = [];
5 | let cleanedLineIndex = 0;
6 |
7 | for (let index = 0; index < lines.length; index++) {
8 | const line = lines[index];
9 | let skipLine = false;
10 |
11 | customHooks.forEach((hook, hookIndex) => {
12 | if (line.includes(hook.startTag)) {
13 | hookRanges[hookIndex].push([cleanedLineIndex]);
14 | skipLine = true;
15 | } else if (
16 | line.includes(hook.endTag) &&
17 | hookRanges[hookIndex][hookRanges[hookIndex].length - 1]?.length === 1
18 | ) {
19 | hookRanges[hookIndex][hookRanges[hookIndex].length - 1].push(
20 | cleanedLineIndex - 1
21 | );
22 | const contentHook = cleanedLines
23 | .slice(
24 | hookRanges[hookIndex][hookRanges[hookIndex].length - 1][0],
25 | cleanedLineIndex
26 | )
27 | .join("\n");
28 | hookRanges[hookIndex][hookRanges[hookIndex].length - 1].push(
29 | contentHook
30 | );
31 | skipLine = true;
32 | }
33 | });
34 |
35 | if (!skipLine) {
36 | cleanedLines.push(line);
37 | cleanedLineIndex++;
38 | }
39 | }
40 |
41 | const cleanedCode = cleanedLines.join("\n");
42 | return { cleanedCode, hookRanges };
43 | }
--------------------------------------------------------------------------------
/ell-studio/src/contexts/ThemeContext.js:
--------------------------------------------------------------------------------
1 | import React, { createContext, useState, useContext, useEffect } from 'react';
2 |
3 | const ThemeContext = createContext();
4 |
5 | export const useTheme = () => useContext(ThemeContext);
6 |
7 | export const ThemeProvider = ({ children }) => {
8 | const [darkMode, setDarkMode] = useState(true);
9 |
10 | useEffect(() => {
11 | if (darkMode) {
12 | document.documentElement.classList.add('dark');
13 | } else {
14 | document.documentElement.classList.remove('dark');
15 | }
16 | }, [darkMode]);
17 |
18 | const toggleDarkMode = () => {
19 | setDarkMode(!darkMode);
20 | };
21 |
22 | return (
23 |
24 | {children}
25 |
26 | );
27 | };
--------------------------------------------------------------------------------
/ell-studio/src/index.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
4 |
5 | body {
6 | @apply bg-gray-900;
7 | }
8 |
--------------------------------------------------------------------------------
/ell-studio/src/index.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import ReactDOM from 'react-dom/client';
3 | import './index.css';
4 | import App from './App';
5 | import reportWebVitals from './reportWebVitals';
6 |
7 | const root = ReactDOM.createRoot(document.getElementById('root'));
8 | root.render(
9 |
10 |
11 |
12 | );
13 |
14 | // If you want to start measuring performance in your app, pass a function
15 | // to log results (for example: reportWebVitals(console.log))
16 | // or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals
17 | reportWebVitals();
18 |
--------------------------------------------------------------------------------
/ell-studio/src/library/utils.js:
--------------------------------------------------------------------------------
1 | import { clsx } from "clsx"
2 | import { twMerge } from "tailwind-merge"
3 |
4 | export function cn(...inputs) {
5 | return twMerge(clsx(inputs))
6 | }
7 |
--------------------------------------------------------------------------------
/ell-studio/src/logo.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ell-studio/src/reportWebVitals.js:
--------------------------------------------------------------------------------
1 | const reportWebVitals = onPerfEntry => {
2 | if (onPerfEntry && onPerfEntry instanceof Function) {
3 | import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => {
4 | getCLS(onPerfEntry);
5 | getFID(onPerfEntry);
6 | getFCP(onPerfEntry);
7 | getLCP(onPerfEntry);
8 | getTTFB(onPerfEntry);
9 | });
10 | }
11 | };
12 |
13 | export default reportWebVitals;
14 |
--------------------------------------------------------------------------------
/ell-studio/src/setupTests.js:
--------------------------------------------------------------------------------
1 | // jest-dom adds custom jest matchers for asserting on DOM nodes.
2 | // allows you to do things like:
3 | // expect(element).toHaveTextContent(/react/i)
4 | // learn more: https://github.com/testing-library/jest-dom
5 | import '@testing-library/jest-dom';
6 |
--------------------------------------------------------------------------------
/ell-studio/src/styles/sourceCode.css:
--------------------------------------------------------------------------------
1 | .source-code-container pre {
2 | tab-size: 4;
3 | -moz-tab-size: 4;
4 | }
5 |
6 | .source-code-container code {
7 | display: block;
8 | line-height: 1.5;
9 | }
10 |
11 | .source-code-container code span {
12 | white-space: pre-wrap !important;
13 | }
--------------------------------------------------------------------------------
/ell-studio/src/utils/lmpUtils.js:
--------------------------------------------------------------------------------
1 |
2 | export function getTimeAgo(date) {
3 | const now = new Date();
4 | const secondsPast = (now.getTime() - date.getTime()) / 1000;
5 | if (secondsPast < 60) {
6 | return `${Math.round(secondsPast)} seconds ago`;
7 | }
8 | if (secondsPast < 3600) {
9 | return `${Math.round(secondsPast / 60)} minutes ago`;
10 | }
11 | if (secondsPast <= 86400) {
12 | return `${Math.round(secondsPast / 3600)} hours ago`;
13 | }
14 | if (secondsPast <= 2592000) {
15 | return `${Math.round(secondsPast / 86400)} days ago`;
16 | }
17 | if (secondsPast <= 31536000) {
18 | return `${Math.round(secondsPast / 2592000)} months ago`;
19 | }
20 | return `${Math.round(secondsPast / 31536000)} years ago`;
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/ell-studio/src/utils/lstrCleanStringify.js:
--------------------------------------------------------------------------------
1 | export const lstrCleanStringify = (obj_containing_lstrs, indentLevel = 2) => {
2 | return JSON.stringify(obj_containing_lstrs, (key, value) => {
3 | if (value && value.__lstr === true) {
4 | return value.content;
5 | }
6 | return value;
7 | }, indentLevel)
8 | };
9 |
--------------------------------------------------------------------------------
/ell-studio/tailwind.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('tailwindcss').Config} */
2 | module.exports = {
3 | darkMode: ["class"],
4 | content: [
5 | "./src/**/*.{js,jsx,ts,tsx}",
6 | ],
7 | theme: {
8 | screens: {
9 | 'sm': '640px',
10 | 'md': '786px', // Increased from the default 768px
11 | 'lg': '1024px',
12 | 'xl': '1280px',
13 | '2xl': '1536px',
14 | },
15 | extend: {
16 | colors: {
17 | border: "hsl(var(--border))",
18 | input: "hsl(var(--input))",
19 | ring: "hsl(var(--ring))",
20 | background: "hsl(var(--background))",
21 | foreground: "hsl(var(--foreground))",
22 | primary: {
23 | DEFAULT: "hsl(var(--primary))",
24 | foreground: "hsl(var(--primary-foreground))",
25 | },
26 | secondary: {
27 | DEFAULT: "hsl(var(--secondary))",
28 | foreground: "hsl(var(--secondary-foreground))",
29 | },
30 | destructive: {
31 | DEFAULT: "hsl(var(--destructive))",
32 | foreground: "hsl(var(--destructive-foreground))",
33 | },
34 | muted: {
35 | DEFAULT: "hsl(var(--muted))",
36 | foreground: "hsl(var(--muted-foreground))",
37 | },
38 | accent: {
39 | DEFAULT: "hsl(var(--accent))",
40 | foreground: "hsl(var(--accent-foreground))",
41 | },
42 | popover: {
43 | DEFAULT: "hsl(var(--popover))",
44 | foreground: "hsl(var(--popover-foreground))",
45 | },
46 | card: {
47 | DEFAULT: "hsl(var(--card))",
48 | foreground: "hsl(var(--card-foreground))",
49 | },
50 | },
51 | borderRadius: {
52 | lg: `var(--radius)`,
53 | md: `calc(var(--radius) - 2px)`,
54 | sm: "calc(var(--radius) - 4px)",
55 | },
56 | keyframes: {
57 | "accordion-down": {
58 | from: { height: "0" },
59 | to: { height: "var(--radix-accordion-content-height)" },
60 | },
61 | "accordion-up": {
62 | from: { height: "var(--radix-accordion-content-height)" },
63 | to: { height: "0" },
64 | },
65 | },
66 | animation: {
67 | "accordion-down": "accordion-down 0.2s ease-out",
68 | "accordion-up": "accordion-up 0.2s ease-out",
69 | },
70 | },
71 | },
72 | plugins: [require("tailwindcss-animate")],
73 | }
--------------------------------------------------------------------------------
/examples/.gitignore:
--------------------------------------------------------------------------------
1 | .mid
--------------------------------------------------------------------------------
/examples/bv.py:
--------------------------------------------------------------------------------
1 | from functools import lru_cache
2 | import ell
3 | from ell.stores.sql import SQLiteStore
4 |
5 | CODE_INSTURCTIONS = """
6 |
7 | Other Instructions:
8 | - You only respond in code without any commentary (except in the docstrings.)
9 | - Don't respond in markdown just write code!
10 | - It is extremely important that you don't start you code with ```python <...> """
11 |
12 |
13 | class Tests:
14 | pass
15 |
16 |
17 | test = Tests()
18 |
19 | another_serializeable_global = ["asd"]
20 |
21 |
22 | def get_lmp(z=10):
23 | y = 13
24 | y = z
25 |
26 | @ell.simple("gpt-4o-mini", temperature=0.1, max_tokens=6)
27 | def write_a_complete_python_class(user_spec: str):
28 | return [
29 | ell.system(
30 | f"""You are an mid-tier python programmer capable of interpreting a user's spec and writing a python class to accomidate their request. You should document all your code, and you best practices.
31 | {CODE_INSTURCTIONS} {z} {y} {test} {another_serializeable_global}
32 | """
33 | ),
34 | ell.user(user_spec),
35 | ]
36 |
37 | return write_a_complete_python_class
38 |
39 |
40 | if __name__ == "__main__":
41 | ell.init(verbose=True, store=("./logdir"), autocommit=True)
42 | # test[0] = "modified at execution :O"
43 | w = get_lmp(z=20)
44 | cls_Def = w("A class that represents a bank")
45 |
--------------------------------------------------------------------------------
/examples/claude.py:
--------------------------------------------------------------------------------
1 | import ell # type: ignore
2 |
3 | @ell.simple(model="claude-3-5-sonnet-20241022", max_tokens=100)
4 | def hello_from_claude():
5 | """You are an AI assistant. Your task is to respond to the user's message with a friendly greeting."""
6 | return "Say hello to the world!!!"
7 |
8 |
9 | if __name__ == "__main__":
10 | ell.init(verbose=True, store="./logdir", autocommit=True)
11 | print(hello_from_claude())
12 |
13 |
--------------------------------------------------------------------------------
/examples/client_example.py:
--------------------------------------------------------------------------------
1 | import ell
2 | import os
3 | import openai
4 |
5 | import ell.lmp.simple
6 |
7 |
8 |
9 |
10 | client = openai.Client(api_key=open(os.path.expanduser("~/.oaikey")).read().strip())
11 |
12 | @ell.simple(model="gpt-4o", temperature=0.1, n=1)
13 | def number_to_words(number: int):
14 | """You are an expert in the english language and convert any number to its word representation, for example 123456 would be one hundred and twenty three thousand four hundred fifty six.
15 | You must always return the word representation and nothing else."""
16 | return f"Convert {number} to its word representation."
17 |
18 | (number_to_words(123456, client=client))
--------------------------------------------------------------------------------
/examples/diamond_depencies.py:
--------------------------------------------------------------------------------
1 | import random
2 | from typing import List, Tuple
3 | import ell
4 |
5 |
6 | @ell.simple(model="gpt-4o-mini", temperature=1.0)
7 | def random_number() -> str:
8 | """You are silly robot. Only respond with a number."""
9 | return "Come with up with a random number"
10 |
11 | @ell.simple(model="gpt-4o-mini", temperature=1.0)
12 | def write_a_poem(num : str) -> str:
13 | """You are a badass motherfucker. Write a poem that is 4 lines long."""
14 | return f"Write a poem that is {num} lines long"
15 |
16 | @ell.simple(model="gpt-4o-mini", temperature=1.0)
17 | def write_a_story(num : str) -> str:
18 | """You are a story writer. Write a story that is 5 lines long."""
19 | return f"Write a story that is {num} lines long"
20 |
21 | @ell.simple(model="gpt-4o-mini", temperature=1.0)
22 | def choose_which_is_a_better_piece_of_writing(poem : str, story : str) -> str:
23 | """You are a literature critic choose the better piece of literature"""
24 | return f"""
25 | A: {poem}
26 | B: {story}
27 |
28 | Choose the better piece of literature"""
29 |
30 |
31 |
32 | if __name__ == "__main__":
33 | from ell.stores.sql import SQLiteStore
34 | ell.init(store='./logdir', autocommit=True, verbose=True)
35 |
36 |
37 | num = random_number()
38 |
39 |
40 |
41 | poem = write_a_poem(num[0])
42 | story = write_a_story(num)
43 | better_piece = choose_which_is_a_better_piece_of_writing(poem, story)
44 | print(better_piece)
45 |
46 |
47 |
--------------------------------------------------------------------------------
/examples/evals/classification.py:
--------------------------------------------------------------------------------
1 | from collections import UserDict
2 | import time
3 | import random
4 | from typing import Any, Dict, Iterable, Optional, Protocol, List, Union
5 | import ell
6 | import ell.evaluation
7 | import numpy as np
8 |
9 | import ell.lmp.function
10 |
11 |
12 | dataset = [
13 | {
14 | "input": {"question": "What is the capital of france?"},
15 | "expected_output": "Paris",
16 | },
17 | {
18 | "input": {"question": "What is the capital of italy?"},
19 | "expected_output": "Rome",
20 | },
21 | {
22 | "input": {"question": "What is the capital of spain?"},
23 | "expected_output": "Madrid",
24 | },
25 | {
26 | "input": {"question": "What is the capital of germany?"},
27 | "expected_output": "Berlin",
28 | },
29 | {
30 | "input": {"question": "What is the capital of japan?"},
31 | "expected_output": "Tokyo",
32 | },
33 | {
34 | "input": {"question": "What is the capital of china?"},
35 | "expected_output": "Beijing",
36 | },
37 | {
38 | "input": {"question": "What is the capital of india?"},
39 | "expected_output": "New Delhi",
40 | },
41 | {
42 | "input": {"question": "What is the capital of brazil?"},
43 | "expected_output": "Brasília",
44 | },
45 | {
46 | "input": {"question": "What is the capital of argentina?"},
47 | "expected_output": "Buenos Aires",
48 | },
49 | {"input": {"question": "Hotdog land"}, "expected_output": "Banana"},
50 | ]
51 |
52 | def is_correct(datapoint, output):
53 | label = datapoint["expected_output"]
54 | return float(label.lower() in output.lower())
55 |
56 | eval = ell.evaluation.Evaluation(
57 | name="capital_prediction",
58 | dataset=dataset,
59 | metrics={"score": is_correct, "length": lambda _, output: len(output)},
60 | samples_per_datapoint=1,
61 | )
62 | # ell.init(verbose=True, store='./logdir')
63 | @ell.simple(model="gpt-4o", max_tokens=10)
64 | def predict_capital(question: str):
65 | """
66 | If the quesiton is about hotdog land, answer Banana. Otherwise, answer the question.
67 | """
68 | # print(question[0])
69 | return f"Answer the following question. {question}"
70 |
71 |
72 | if __name__ == "__main__":
73 | ell.init(store="./logdir")
74 | result = eval.run(predict_capital, n_workers=10)
75 | print(result.results.metrics["score"].mean())
76 |
77 |
--------------------------------------------------------------------------------
/examples/evals/poems.py:
--------------------------------------------------------------------------------
1 | from collections import UserDict
2 | import time
3 | import random
4 | from typing import Any, Dict, Iterable, Optional, Protocol, List, Union
5 | import ell
6 | import ell.evaluation
7 | import numpy as np
8 |
9 | import ell.lmp.function
10 | import logging
11 |
12 |
13 |
14 | @ell.simple(model="gpt-4o")
15 | def write_a_bad_poem():
16 | """Your poem must no logner than 60 words."""
17 | return "Write a really poorly written poem "
18 |
19 | @ell.simple(model="gpt-4o")
20 | def write_a_good_poem():
21 | """Your poem must no logner than 60 words."""
22 | return "Write a really well written poem."
23 |
24 | @ell.simple(model="gpt-4o", temperature=0.1)
25 | def is_good_poem(poem: str):
26 | """Include either 'yes' or 'no' at the end of your response. . ."""
27 | return f"Is this a good poem yes/no? {poem}"
28 |
29 | def score(datapoint, output):
30 | return "yes" in is_good_poem(output).lower()
31 |
32 | ell.init(verbose=True, store="./logdir")
33 | # exit()
34 | eval = ell.evaluation.Evaluation(
35 | name="poem_eval",
36 | n_evals=10,
37 | metrics={
38 | "critic_score": score,
39 | "length": lambda _, output: len(output),
40 | "average_word_length": lambda _, output: sum(
41 | len(word) for word in output.split()
42 | )
43 | / len(output.split()),
44 | },
45 | )
46 |
47 |
48 | print("EVALUATING GOOD POEM")
49 | start = time.time()
50 | # run = eval.run(write_a_good_poem, n_workers=10, verbose=False)
51 | # print(f"Average length: {run.results.metrics['length'].mean():.2f}")
52 | # print(f"Average word length: {run.results.metrics['average_word_length'].mean():.2f}")
53 | # print(f"Average critic score: {run.results.metrics['critic_score'].mean():.2f}")
54 | # print(f"Time taken: {time.time() - start:.2f} seconds")
55 | # print("EVALUATING BAD POEM")
56 | run = eval.run(write_a_bad_poem, n_workers=10, verbose=False)
57 | print(f"Average length: {run.results.metrics['length'].mean():.2f}")
58 | print(
59 | f"Average word length: {run.results.metrics['average_word_length'].mean():.2f}"
60 | )
61 | print(f"Average critic score: {run.results.metrics['critic_score'].mean():.2f}")
--------------------------------------------------------------------------------
/examples/evals/psolve.py:
--------------------------------------------------------------------------------
1 | import ell
2 | from ell.evaluation.evaluation import Evaluation
3 |
4 | ell.init(verbose=True, store='./logdir')
5 |
6 |
7 | @ell.simple(model="gpt-4o", temperature=0.7)
8 | def math_problem_solver(problem: str):
9 | """You are an extremely smart math problem solver. You are given a math problem and you need to solve it. Output your answer in the following format
10 | 'Let's think step by step:
11 |
12 | Answer:\\n{Answer}'
13 |
14 | Never incldue any other text except for Answer: new line ...
15 | """
16 | return problem
17 |
18 |
19 |
20 | import random
21 |
22 | # Set fixed random seed for reproducibility
23 | random.seed(42)
24 |
25 | def generate_arithmetic_dataset(num_examples=100):
26 | operations = ['+', '-', '*', '/']
27 | dataset = []
28 |
29 | for _ in range(num_examples):
30 | # Generate random numbers up to 5 digits
31 | num1 = random.randint(0, 99999)
32 | num2 = random.randint(1, 99999) # Avoid 0 for division
33 | op = random.choice(operations)
34 |
35 | # Calculate result
36 | if op == '+':
37 | result = num1 + num2
38 | elif op == '-':
39 | result = num1 - num2
40 | elif op == '*':
41 | result = num1 * num2
42 | else:
43 | # For division, ensure clean division
44 | result = num1 / num2
45 | # Round to 2 decimal places for division
46 | result = round(result, 2)
47 |
48 | problem = f"What is {num1} {op} {num2}?"
49 | dataset.append({
50 | "input": [problem],
51 | "output": f"Answer:\\n{result}"
52 | })
53 |
54 | return dataset
55 |
56 |
57 | def answer_is_close_l2(datapoint, result):
58 | try:
59 | result_val = float(result.split("Answer:")[1].strip().replace("\\n", ""))
60 | expected_val = float(datapoint["output"].split("Answer:")[1].strip().replace("\\n", ""))
61 | return -abs(result_val - expected_val)
62 | except:
63 | return float(-10) # Return worst possible score if parsing fails
64 |
65 | arithmetic_eval = Evaluation(
66 | name="Arithmetic",
67 | dataset=generate_arithmetic_dataset(),
68 | metrics={"answer_is_close_l2": answer_is_close_l2},
69 | criterion=lambda datapoint, result: result.split("Answer:")[1].strip() in datapoint["output"],
70 | )
71 |
72 |
73 | if __name__ == "__main__":
74 | arithmetic_eval.run(math_problem_solver, n_workers=20)
75 | print(math_problem_solver("What is 2 + 2?"))
--------------------------------------------------------------------------------
/examples/evals/vibes.py:
--------------------------------------------------------------------------------
1 | import ell
2 |
3 | from pydantic import BaseModel
4 |
5 | class TweetInput(BaseModel):
6 | input: str
7 |
8 | @ell.simple(model="gpt-4o")
9 | def tweet(obj: TweetInput):
10 | print(obj)
11 | return f"Write a tweet like roon in lower case about {obj.input}"
12 |
13 |
14 | dataset = [
15 | {"input": [TweetInput(input="Polymath")]},
16 | {"input": [TweetInput(input="Dogs")]},
17 | {"input": [TweetInput(input="Intelligenve")]},
18 | ]
19 |
20 |
21 | # # No metrics. We will iterate on by just looking at the output/
22 | eval = ell.evaluation.Evaluation(
23 | name="vibes",
24 | dataset=dataset,
25 | criterion=lambda datapoint, output: "roon" in output.lower(),
26 | )
27 |
28 | if __name__ == "__main__":
29 | ell.init(store="./logdir", verbose=True)
30 | eval.run(tweet)
31 | # tweet("hi")
32 |
--------------------------------------------------------------------------------
/examples/exa/exa.py:
--------------------------------------------------------------------------------
1 | from exa_py import Exa
2 | import ell
3 | from openai import OpenAI
4 | import os
5 | from pydantic import BaseModel, Field
6 | from dotenv import load_dotenv
7 | import json
8 |
9 | load_dotenv()
10 | exa = Exa(os.getenv("EXA_API_KEY"))
11 |
12 | openai = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
13 |
14 | class ArticleReview(BaseModel):
15 | title: str = Field(description="The title of the article")
16 | summary: str = Field(description="A summary of the article")
17 | rating: int = Field(description="A rating of the article from 1 to 10")
18 |
19 | @ell.complex(model="gpt-4o", client=openai, response_format=ArticleReview)
20 | def generate_article_review(article: str, content: str) -> ArticleReview:
21 | """You are a article review generator. Given the name of an article, some content,
22 | you need to return a structured review"""
23 | return f"generate a review for the article {article} with content {content}"
24 |
25 | def exa_search(num_results: int):
26 | result = exa.search_and_contents(
27 | "newest climate change articles",
28 | type="neural",
29 | use_autoprompt=True,
30 | start_published_date="2024-09-01",
31 | num_results=num_results,
32 | text=True,
33 | )
34 | json_data = json.dumps([result.__dict__ for result in result.results])
35 | return json.loads(json_data)
36 |
37 |
38 | def RAG(num_results: int):
39 | search_results = exa_search(num_results)
40 | for i in range(num_results):
41 | result = search_results[i]
42 | review = generate_article_review(result["title"], result["text"])
43 | print(review)
44 |
45 | RAG(3)
46 |
47 |
48 |
49 |
50 |
51 |
--------------------------------------------------------------------------------
/examples/future/catmeme.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/examples/future/catmeme.jpg
--------------------------------------------------------------------------------
/examples/future/limbo.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 | import ell
3 | from ell.types.message import Message
4 |
5 |
6 |
7 | ell.init(verbose=True, store='./logdir', autocommit=True)
8 |
9 |
10 | @ell.tool()
11 | def order_t_shirt(size : str, color : str, address : str):
12 |
13 | # ....\
14 | pass
15 |
16 |
17 | @ell.tool()
18 | def get_order_arrival_date(order_id: str):
19 | """Gets the arrival date of a t-shirt order"""
20 | # ...
21 |
22 |
23 |
24 | @ell.complex(model="gpt-4o", temperature=0.1, tools=[order_t_shirt, get_order_arrival_date])
25 | def limbo_chat_bot(message_history: List[Message]) -> List[Message]:
26 | return [
27 | ell.system("You are a chatbot mimicing the popstar limbo. She is an alien cat girl from outerspace that writes in all lwoer case kawaii! You interact with all her fans and can help them do various things and are always game to hangout and just chat.."),
28 | ] + message_history
29 |
30 |
31 | if __name__ == "__main__":
32 | message_history = []
33 |
34 | while True:
35 | user_message = input("You: ")
36 | message_history.append(ell.user(user_message))
37 | response = limbo_chat_bot(message_history)
38 |
39 | print(response)
40 | # print("Limbo: ", response[-1].content)
41 | message_history.append(response)
42 |
43 | if response.tool_calls:
44 | tool_results = response.call_tools_and_collect_as_message()
45 | print("Tool results: ", tool_results)
46 | message_history.append(tool_results)
47 |
48 | response = limbo_chat_bot(message_history)
49 | message_history.append(response)
50 |
--------------------------------------------------------------------------------
/examples/future/meme_maker.py:
--------------------------------------------------------------------------------
1 | from PIL import Image
2 | import numpy as np
3 | import cv2
4 | import os
5 |
6 | import ell
7 | from ell.util.plot_ascii import plot_ascii
8 |
9 |
10 | # Load the cat meme image using PIL
11 | cat_meme_pil = Image.open(os.path.join(os.path.dirname(__file__), "catmeme.jpg"))
12 |
13 | @ell.simple(model="gpt-4o", temperature=0.5)
14 | def make_a_joke_about_the_image(image: Image.Image):
15 | return [
16 | ell.system("You are a meme maker. You are given an image and you must make a joke about it."),
17 | ell.user(image)
18 | ]
19 |
20 |
21 | if __name__ == "__main__":
22 | ell.init(store='./logdir', autocommit=True, verbose=True)
23 | joke = make_a_joke_about_the_image(cat_meme_pil)
24 | print(joke)
--------------------------------------------------------------------------------
/examples/future/multimodal_tool_use.py:
--------------------------------------------------------------------------------
1 | import ell
2 |
3 | from ell import ContentBlock
4 | from PIL import Image
5 | import numpy as np
6 | from ell.types.message import to_content_blocks
7 |
8 | @ell.tool()
9 | def get_user_name():
10 | """
11 | Return the user's name.
12 | """
13 | return "Isac"
14 |
15 |
16 | def generate_strawberry_image():
17 | # Create a 200x200 white image
18 | img = Image.new('RGB', (200, 200), color='white')
19 | pixels = img.load()
20 |
21 | # Draw a red strawberry shape
22 | for x in range(200):
23 | for y in range(200):
24 | dx = x - 100
25 | dy = y - 100
26 | distance = np.sqrt(dx**2 + dy**2)
27 | if distance < 80:
28 | # Red color for the body
29 | pixels[x, y] = (255, 0, 0)
30 | elif distance < 90 and y < 100:
31 | # Green color for the leaves
32 | pixels[x, y] = (0, 128, 0)
33 |
34 | # Add some seeds
35 | for _ in range(50):
36 | seed_x = np.random.randint(40, 160)
37 | seed_y = np.random.randint(40, 160)
38 | if np.sqrt((seed_x-100)**2 + (seed_y-100)**2) < 80:
39 | pixels[seed_x, seed_y] = (255, 255, 0)
40 |
41 | return img
42 |
43 | @ell.tool()
44 | def get_ice_cream_flavors():
45 | """
46 | Return a list of ice cream flavors.
47 | """
48 | #XXX: Nice coercion function needed
49 | return to_content_blocks([("1. Vanilla"), "2.", (generate_strawberry_image()), ("3. Coconut")])
50 |
51 |
52 | @ell.complex(model="claude-3-5-sonnet-20241022", tools=[get_user_name, get_ice_cream_flavors], max_tokens=1000)
53 | def f(message_history: list[ell.Message]) -> list[ell.Message]:
54 | return [
55 | ell.system(
56 | "You are a helpful assistant that greets the user and asks them what ice cream flavor they want. Call both tools immediately and then greet the user. Some options will be images be sure to interperate them."
57 | ),
58 | ell.user("Do it"),
59 | ] + message_history
60 |
61 |
62 | if __name__ == "__main__":
63 | ell.init(verbose=True)
64 | messages = []
65 | while True:
66 | message = f(messages)
67 | messages.append(message)
68 |
69 | if message.tool_calls:
70 | tool_call_response = message.call_tools_and_collect_as_message(
71 | parallel=True, max_workers=2
72 | )
73 | messages.append(tool_call_response)
74 | else:
75 | break
76 |
77 | # print(messages)
--------------------------------------------------------------------------------
/examples/future/parallel_tool_calls.py:
--------------------------------------------------------------------------------
1 | import ell
2 |
3 |
4 | @ell.tool()
5 | def get_user_name():
6 | return "Isac"
7 |
8 |
9 | @ell.tool()
10 | def get_ice_cream_flavors():
11 | return ["Vanilla", "Strawberry", "Coconut"]
12 |
13 |
14 | @ell.complex(model="gpt-4o", tools=[get_user_name, get_ice_cream_flavors])
15 | def f(message_history: list[ell.Message]) -> list[ell.Message]:
16 | return [
17 | ell.system(
18 | "You are a helpful assistant that greets the user and asks them what ice cream flavor they want. Call both tools immediately and then greet the user"
19 | )
20 | ] + message_history
21 |
22 |
23 | if __name__ == "__main__":
24 | ell.init(verbose=True)
25 | messages = []
26 | while True:
27 | message = f(messages)
28 | messages.append(message)
29 |
30 | if message.tool_calls:
31 | tool_call_response = message.call_tools_and_collect_as_message(
32 | parallel=True, max_workers=2
33 | )
34 | messages.append(tool_call_response)
35 | else:
36 | break
37 |
38 | print(messages)
--------------------------------------------------------------------------------
/examples/future/realtimewebcam.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import time
3 | from PIL import Image
4 | import os
5 | from ell.util.plot_ascii import plot_ascii
6 |
7 |
8 | def clear_console():
9 | os.system('cls' if os.name == 'nt' else 'clear')
10 |
11 | def main():
12 | print("Press Ctrl+C to stop the program.")
13 | cap = cv2.VideoCapture(0) # Change to 0 for default camera
14 |
15 | if not cap.isOpened():
16 | print("Error: Could not open camera.")
17 | return
18 |
19 | try:
20 | while True:
21 | ret, frame = cap.read()
22 | if not ret:
23 | print("Failed to capture image from webcam.")
24 | continue
25 |
26 | frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
27 | frame = Image.fromarray(frame)
28 |
29 | # Resize the frame
30 | # frame = frame.resize((40*4, 30*4), Image.LANCZOS)
31 |
32 | ascii_image = plot_ascii(frame, width=120, color=True)
33 | clear_console()
34 | print("\n".join(ascii_image))
35 |
36 | # Add a small delay to control frame rate
37 | time.sleep(0.05)
38 |
39 | except KeyboardInterrupt:
40 | print("Program stopped by user.")
41 | finally:
42 | cap.release()
43 |
44 | if __name__ == "__main__":
45 | main()
46 |
--------------------------------------------------------------------------------
/examples/future/structured.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 | import ell
3 | from pydantic import BaseModel, Field
4 |
5 |
6 |
7 |
8 | class Test(BaseModel):
9 | name: str = Field(description="The name of the person")
10 | age: int = Field(description="The age of the person")
11 | height_precise: float = Field(description="The height of the person in meters")
12 | is_cool: bool
13 |
14 | @ell.complex(model='gpt-4o-2024-08-06', response_format=Test)
15 | def create_test(text: str):
16 | """You are a test model. You are given a text and you need to return a pydantic object."""
17 | return "do it!"
18 |
19 |
20 | ell.init(verbose=True, store='./logdir')
21 | import json
22 | if __name__ == "__main__":
23 | result = create_test("ads")
24 | print(result)
25 |
26 |
27 |
--------------------------------------------------------------------------------
/examples/future/tool_using_chatbot.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 | from pydantic import BaseModel, Field
3 | import ell
4 | from ell.types import Message
5 | from ell.stores.sql import SQLiteStore
6 |
7 |
8 |
9 | ell.init(verbose=True, store='./logdir', autocommit=True)
10 |
11 |
12 | @ell.tool()
13 | def create_claim_draft(claim_details: str, claim_type: str, claim_amount: float,
14 | claim_date : str = Field(description="The date of the claim in the format YYYY-MM-DD.")):
15 | """Create a claim draft. Returns the claim id created."""
16 | print("Create claim draft", claim_details, claim_type, claim_amount, claim_date)
17 | return "claim_id-123234"
18 |
19 | @ell.tool()
20 | def approve_claim(claim_id : str):
21 | """Approve a claim"""
22 | return "approved"
23 |
24 | @ell.complex(model="claude-3-5-sonnet-20241022", tools=[create_claim_draft, approve_claim], temperature=0.1, max_tokens=400)
25 | def insurance_claim_chatbot(message_history: List[Message]) -> List[Message]:
26 | return [
27 | ell.system( """You are a an insurance adjuster AI. You are given a dialogue with a user and have access to various tools to effectuate the insurance claim adjustment process. Ask question until you have enough information to create a claim draft. Then ask for approval."""),
28 | ] + message_history
29 |
30 |
31 |
32 | if __name__ == "__main__":
33 | message_history = []
34 |
35 | # Run through messages automatically!
36 | user_messages = [
37 | "Hello, I'm a customer",
38 | 'I broke my car',
39 | ' smashed by someone else, today, $5k',
40 | 'please file it.'
41 | ]
42 | for user_message in user_messages:
43 | message_history.append(ell.user(user_message))
44 |
45 | message_history.append(response_message := insurance_claim_chatbot(message_history))
46 |
47 | if response_message.tool_calls:
48 | print("Tool call made")
49 | next_message = response_message.call_tools_and_collect_as_message()
50 | print(repr(next_message))
51 | print(next_message.text)
52 | message_history.append(next_message)
53 | insurance_claim_chatbot(message_history)
54 |
--------------------------------------------------------------------------------
/examples/future/use_tool_once.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | from pydantic import BaseModel, Field
3 |
4 | from bs4 import BeautifulSoup
5 | import ell
6 | import requests
7 |
8 | import ell.lmp.tool
9 |
10 | ell.init(verbose=True, store=("./logdir"), autocommit=True)
11 |
12 |
13 | @ell.tool()
14 | def get_html_content(
15 | url : str = Field(description="The URL to get the HTML content of. Never incldue the protocol (like http:// or https://)"),
16 | ):
17 | """Get the HTML content of a URL."""
18 | response = requests.get("https://" + url)
19 | soup = BeautifulSoup(response.text, 'html.parser')
20 | # print(soup.get_text())~
21 | return soup.get_text()[:100]
22 |
23 |
24 | @ell.complex(model="claude-3-5-sonnet-20241022", tools=[get_html_content], max_tokens=200)
25 | def summarize_website(website :str) -> str:
26 | """You are an agent that can summarize the contents of a website."""
27 | return f"Tell me whats on {website}"
28 |
29 |
30 | if __name__ == "__main__":
31 | output = summarize_website("langchains website")
32 | print(output)
33 | if output.tool_calls:
34 | tool_results = output.call_tools_and_collect_as_message()
35 |
36 | # print(tool_results)
37 | # print(output)
38 |
39 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/examples/future/weather_example.py:
--------------------------------------------------------------------------------
1 | from pydantic import Field
2 | import ell
3 |
4 | ell.init()
5 |
6 | @ell.tool()
7 | def get_weather(location: str = Field(description="The full name of a city and country, e.g. San Francisco, CA, USA")):
8 | """Get the current weather for a given location."""
9 | # Simulated weather API call
10 | return f"The weather in {location} is sunny."
11 |
12 | @ell.complex(model="gpt-4-turbo", tools=[get_weather])
13 | def travel_planner(destination: str):
14 | """Plan a trip based on the destination and current weather."""
15 | return [
16 | ell.system("You are a travel planner. Use the weather tool to provide relevant advice."),
17 | ell.user(f"Plan a trip to {destination}")
18 | ]
19 |
20 | result = travel_planner("Paris")
21 | print(result.text) # Prints travel advice
22 | if result.tool_calls:
23 | # This is done so that we can pass the tool calls to the language model
24 | tool_results = result.call_tools_and_collect_as_message()
25 | print("Weather info:", (tool_results.text))
--------------------------------------------------------------------------------
/examples/future/webcam_activity_describer.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 | import cv2
3 | import time
4 | from PIL import Image
5 | import ell
6 | from ell.types.message import ImageContent
7 | from ell.util.plot_ascii import plot_ascii
8 |
9 | ell.init(verbose=True, store='./logdir', autocommit=True)
10 |
11 | @ell.simple(model="gpt-4o", temperature=0.1)
12 | def describe_activity(image: Image.Image):
13 | return [
14 | ell.system("You are VisionGPT. Answer <5 words all lower case."),
15 | ell.user(["Describe what the person in the image is doing:", ImageContent(image=image, detail="low")])
16 | ]
17 |
18 |
19 | def capture_webcam_image():
20 | cap = cv2.VideoCapture(0)
21 | for _ in range(10):
22 | ret, frame = cap.read()
23 | ret, frame = cap.read()
24 |
25 | cap.release()
26 | if ret:
27 | image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
28 | # Resize the image to a smaller 16:9 size, e.g., 160x90
29 | return image.resize((160, 90), Image.LANCZOS)
30 | return None
31 |
32 | if __name__ == "__main__":
33 |
34 | print("Press Ctrl+C to stop the program.")
35 | try:
36 | while True:
37 | image = capture_webcam_image()
38 | if image:
39 | description = describe_activity(image)
40 | print(f"Activity: {description}")
41 | else:
42 | print("Failed to capture image from webcam.")
43 | time.sleep(1)
44 | except KeyboardInterrupt:
45 | print("Program stopped by user.")
46 |
47 |
--------------------------------------------------------------------------------
/examples/hello_postgres.py:
--------------------------------------------------------------------------------
1 | import ell
2 | import numpy as np
3 |
4 | from ell.stores.sql import PostgresStore
5 |
6 | class MyPrompt:
7 | x : int
8 |
9 | def get_random_length():
10 | return int(np.random.beta(2, 6) * 1500)
11 |
12 | @ell.simple(model="gpt-4o-mini")
13 | def hello(world : str):
14 | """Your goal is to be really mean to the other guy while saying hello"""
15 | name = world.capitalize()
16 | number_of_chars_in_name = get_random_length()
17 |
18 | return f"Say hello to {name} in {number_of_chars_in_name} characters or more!"
19 |
20 |
21 | if __name__ == "__main__":
22 | ell.init(verbose=True, store=PostgresStore('postgresql://postgres:postgres@localhost:5432/ell'), autocommit=True)
23 |
24 | greeting = hello("sam altman") # > "hello sama! ... "
25 |
--------------------------------------------------------------------------------
/examples/hello_world.py:
--------------------------------------------------------------------------------
1 | import ell
2 | import random
3 |
4 | ell.init(store='./logdir', autocommit=True, verbose=True)
5 |
6 | def get_random_adjective():
7 | adjectives = ["enthusiastic", "cheerful", "warm", "friendly", "heartfelt", "sincere"]
8 | return random.choice(adjectives)
9 |
10 | def get_random_punctuation():
11 | return random.choice(["!", "!!", "!!!"])
12 |
13 | @ell.simple(model="gpt-4o")
14 | def hello(name: str):
15 | # """You are a helpful and expressive assistant."""
16 | adjective = get_random_adjective()
17 | punctuation = get_random_punctuation()
18 | return f"Say a {adjective} hello to {name}{punctuation}"
19 |
20 | greeting = hello("Sam Altman")
21 | print(greeting)
--------------------------------------------------------------------------------
/examples/joke.py:
--------------------------------------------------------------------------------
1 |
2 | import ell
3 |
4 | import random
5 | import numpy as np
6 |
7 | from ell.stores.sql import SQLiteStore
8 |
9 | @ell.simple(model="gpt-4o-mini")
10 | def come_up_with_a_premise_for_a_joke_about(topic : str):
11 | """You are an incredibly funny comedian. Come up with a premise for a joke about topic"""
12 | return f"come up with a premise for a joke about {topic}"
13 |
14 |
15 | def get_random_length():
16 | return int(np.random.beta(2, 5) * 300)
17 |
18 | @ell.simple(model="gpt-4o-mini")
19 | def joke(topic : str):
20 | """You are a funny comedian. You respond in scripts for a standup comedy skit."""
21 | return f"Act out a full joke. Make your script {get_random_length()} words long. Here's the premise: {come_up_with_a_premise_for_a_joke_about(topic)}"
22 |
23 |
24 | if __name__ == "__main__":
25 | ell.init(verbose=True, store='./logdir', autocommit=False)
26 | # Todo: Figure configuration for automcommititng.
27 | joke("minecraft") #
--------------------------------------------------------------------------------
/examples/multilmp.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 | import ell
3 |
4 | from ell.stores.sql import SQLiteStore
5 |
6 |
7 |
8 | @ell.simple(model="gpt-4o-mini", temperature=1.0)
9 | def generate_story_ideas(about : str):
10 | """You are an expert story ideator. Only answer in a single sentence."""
11 | return f"Generate a story idea about {about}."
12 |
13 | @ell.simple(model="gpt-4o-mini", temperature=1.0)
14 | def write_a_draft_of_a_story(idea : str):
15 | """You are an adept story writer. The story should only be 3 paragraphs."""
16 | return f"Write a story about {idea}."
17 |
18 | @ell.simple(model="gpt-4o", temperature=0.1)
19 | def choose_the_best_draft(drafts : List[str]):
20 | """You are an expert fiction editor."""
21 | return f"Choose the best draft from the following list: {'\n'.join(drafts)}."
22 |
23 | @ell.simple(model="gpt-4-turbo", temperature=0.2)
24 | def write_a_really_good_story(about : str):
25 | ideas = generate_story_ideas(about, api_params=(dict(n=4)))
26 |
27 | drafts = [write_a_draft_of_a_story(idea) for idea in ideas]
28 |
29 | best_draft = choose_the_best_draft(drafts)
30 |
31 | """You are an expert novelist that writes in the style of Hemmingway. You write in lowercase."""
32 | return f"Make a final revision of this story in your voice: {best_draft}."
33 |
34 | if __name__ == "__main__":
35 | from ell.stores.sql import SQLiteStore
36 | ell.init(store='./logdir', autocommit=True, verbose=True)
37 |
38 | # with ell.cache(write_a_really_good_story):
39 | story = write_a_really_good_story("a dog")
40 |
--------------------------------------------------------------------------------
/examples/o1.py:
--------------------------------------------------------------------------------
1 | import ell
2 |
3 | @ell.simple(model="o1-preview")
4 | def solve_complex_math_problem(equation: str, variables: dict, constraints: list, optimization_goal: str):
5 | return f"""You are an expert mathematician and problem solver. Please solve the following complex mathematical problem:
6 |
7 | Equation: {equation}
8 | Variables: {variables}
9 | Constraints: {constraints}
10 | Optimization Goal: {optimization_goal}"""
11 |
12 | @ell.simple(model="o1-preview")
13 | def write_plot_code_for_problem_and_solution(solution :str):
14 | return f"""You are an expert programmer and problem solver.
15 | Please write code in python with matplotlib to plot the solution to the following problem: It should work in the terminal. Full script with imports.
16 | IMPORTANT: Do not include any other text only the code.
17 | Solution to plot: {solution}"""
18 |
19 | def solve_and_plot(**kwargs):
20 | solution = solve_complex_math_problem(**kwargs)
21 | plot_code = write_plot_code_for_problem_and_solution(solution)
22 | # remove backticks and ```python
23 | plot_code = plot_code.replace("```python", "").replace("```", "").strip()
24 | exec(plot_code)
25 | return solution
26 |
27 | if __name__ == "__main__":
28 |
29 | ell.init(store='./logdir', autocommit=True, verbose=True)
30 | result = solve_and_plot(
31 | equation="y = ax^2 + bx + c",
32 | variables={"a": 1, "b": -5, "c": 6},
33 | constraints=["x >= 0", "x <= 10"],
34 | optimization_goal="Find the minimum value of y within the given constraints"
35 | )
36 | print(result)
--------------------------------------------------------------------------------
/examples/openai_audio.py:
--------------------------------------------------------------------------------
1 | # import ell
2 |
3 |
4 | # # ell.init(verbose=True)
5 |
6 | # # @ell.complex("gpt-4o-audio-preview")
7 | # # def test():
8 | # # return [ell.user("Hey! Could you talk to me in spanish? I'd like to hear how you say 'ell'.")]
9 |
10 | if __name__ == "__main__":
11 | # # response = test()
12 | # # print(response.audios[0])
13 | pass
14 |
15 |
--------------------------------------------------------------------------------
/examples/openai_prompt_caching.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 | import ell
3 |
4 |
5 | @ell.simple(model="gpt-4o-2024-08-06", store=True)
6 | def cached_chat(history : List[str], new_message : str) -> str:
7 | """You are a helpful assistant who chats with the user.
8 | Your response should < 2 sentences."""
9 |
10 | return f"""Here is the chat history: {'\n'.join(history)}.
11 | Please respond to this message:
12 | {new_message}"""
13 |
14 |
15 |
16 | if __name__ == "__main__":
17 | pass
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 | ell.init(verbose=True, store='./logdir')
46 |
47 |
48 | if __name__ == "__main__":
49 | while True:
50 | history = []
51 | simulate_user_messages = [
52 | "Hello, how are you?",
53 | "What's the weather like today?",
54 | "Can you recommend a good book?",
55 | "Tell me a joke.",
56 | "What's your favorite color?",
57 | "How do you make pancakes?",
58 | ]
59 |
60 | for message in simulate_user_messages:
61 | response = cached_chat(history, message)
62 | history.append("User: " + message + "\n")
63 | history.append("Assistant: " + response + "\n")
64 |
65 |
--------------------------------------------------------------------------------
/examples/providers/anthropic_ex.py:
--------------------------------------------------------------------------------
1 | """
2 | Anthropic example: pip install ell-ai[anthropic]
3 | """
4 | import ell
5 | import anthropic
6 |
7 | ell.init(verbose=True)
8 |
9 | # custom client
10 | client = anthropic.Anthropic()
11 |
12 | @ell.simple(model='claude-3-5-sonnet-20241022', client=client, max_tokens=10)
13 | def chat(prompt: str) -> str:
14 | return prompt
15 |
16 | print(chat("Hello, how are you?"))
17 |
18 | # Models are automatically registered!
19 | @ell.simple(model='claude-3-5-sonnet-20241022', max_tokens=10)
20 | def use_default_client(prompt: str) -> str:
21 | return prompt
22 |
23 | print(use_default_client("Hello, how are you?"))
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/examples/providers/azure_ex.py:
--------------------------------------------------------------------------------
1 | import ell
2 | import openai
3 | import os
4 | ell.init(verbose=True, store='./logdir')
5 |
6 | # your subscription key
7 | subscription_key = os.getenv("AZURE_OPENAI_API_KEY")
8 | # Your Azure OpenAI resource https://.openai.azure.com/
9 | azure_endpoint = "https://.openai.azure.com/"
10 | # Option 2: Use a client directly
11 | azureClient = openai.AzureOpenAI(
12 | azure_endpoint = azure_endpoint,
13 | api_key = subscription_key,
14 | api_version = "2024-05-01-preview",
15 | )
16 | # (Recommended) Option 1: Register all the models on your Azure resource & use your models automatically
17 | ell.config.register_model("", azureClient)
18 |
19 | @ell.simple(model="")
20 | def write_a_story(about : str):
21 | return f"write me a story about {about}!"
22 |
23 | write_a_story("cats")
24 |
25 |
26 | # Option 2: Use a client directly
27 | azureClient = openai.AzureOpenAI(
28 | azure_endpoint = azure_endpoint,
29 | api_key = subscription_key,
30 | api_version = "2024-05-01-preview",
31 | )
32 |
33 | @ell.simple(model="", client=azureClient)
34 | def write_a_story(about : str):
35 | return f"write me a story about {about}"
36 |
37 | write_a_story("cats")
38 |
--------------------------------------------------------------------------------
/examples/providers/gemini_ex.py:
--------------------------------------------------------------------------------
1 | """
2 | Google Gemini example: pip install ell-ai[google]
3 | """
4 | import ell
5 | from google import genai
6 |
7 | ell.init(verbose=True)
8 |
9 | # custom client
10 |
11 | from PIL import Image, ImageDraw
12 |
13 | # Create a new image with white background
14 | img = Image.new('RGB', (512, 512), 'white')
15 |
16 | # Create a draw object
17 | draw = ImageDraw.Draw(img)
18 |
19 | # Draw a red dot in the middle (using a small filled circle)
20 | center = (256, 256) # Middle of 512x512
21 | radius = 5 # Size of the dot
22 | draw.ellipse([center[0]-radius, center[1]-radius,
23 | center[0]+radius, center[1]+radius],
24 | fill='red')
25 |
26 |
27 | @ell.simple(model='gemini-2.0-flash', max_tokens=10000)
28 | def chat(prompt: str):
29 | return [ell.user([prompt + " what is in this image", img])]
30 |
31 | print(chat("Write me a really long story about"))
--------------------------------------------------------------------------------
/examples/providers/groq_ex.py:
--------------------------------------------------------------------------------
1 | """
2 | Groq example: pip install ell-ai[groq]
3 | """
4 | import ell
5 | import groq
6 |
7 |
8 | ell.init(verbose=True, store='./logdir')
9 |
10 | # (Recomended) Option 1: Register all groq models.
11 | ell.models.groq.register() # use GROQ_API_KEY env var
12 | # ell.models.groq.register(api_key="gsk-") #
13 |
14 | @ell.simple(model="llama3-8b-8192", temperature=0.1)
15 | def write_a_story(about : str):
16 | """You are a helpful assistant."""
17 | return f"write me a story about {about}"
18 |
19 | write_a_story("cats")
20 |
21 | # Option 2: Use a client directly
22 | client = groq.Groq()
23 |
24 | @ell.simple(model="llama3-8b-8192", temperature=0.1, client=client)
25 | def write_a_story_with_client(about : str):
26 | """You are a helpful assistant."""
27 | return f"write me a story about {about}"
28 |
29 | write_a_story_with_client("cats")
30 |
31 |
--------------------------------------------------------------------------------
/examples/providers/instructor_ex.py:
--------------------------------------------------------------------------------
1 | """
2 | The following example shows how to implement your own provider to use ell with instructor.
3 | These type of changes won't be added to ell but you can use this as a starting point to
4 | implement your own provider!
5 | """
6 |
7 | from typing import Any, Callable, Dict, Optional, Tuple, cast
8 | import instructor
9 | from openai import OpenAI
10 | from pydantic import BaseModel
11 |
12 | from ell.provider import EllCallParams, Metadata, Provider
13 | from ell.providers.openai import OpenAIProvider
14 | from ell.types.message import ContentBlock, Message
15 |
16 | import ell
17 |
18 | # Patch the OpenAI client with Instructor
19 | client = instructor.from_openai(OpenAI())
20 |
21 | class InstructorProvider(OpenAIProvider):
22 | def translate_to_provider(self, *args, **kwargs):
23 | """ This translates ell call parameters to the provider call parameters. In this case instructor is just an openai client,
24 | so we can use the openai provider to do the translation. We just need to modify a few parameters because instructor doesn't support streaming."""
25 | api_params= super().translate_to_provider(*args, **kwargs)
26 | # Streaming is not allowed by instructor.
27 | api_params.pop("stream", None)
28 | api_params.pop("stream_options", None)
29 | return api_params
30 |
31 | def translate_from_provider(self,provider_response,
32 | ell_call : EllCallParams,
33 | provider_call_params : Dict[str, Any],
34 | origin_id : str,
35 | logger : Optional[Callable] = None) -> Tuple[Message, Metadata]:
36 | """This translates the provider response (the result of calling client.chat.completions.create with the parameters from translate_to_provider)
37 | to an an ell message. In this case instructor just returns a pydantic type which we can use to create an ell response model. """
38 | instructor_response = cast(BaseModel, provider_response) # This just means that the type is a pydantic BaseModel.
39 | if logger: logger(instructor_response.model_dump_json()) # Don't forget to log for verbose mode!
40 | return Message(role="assistant", content=ContentBlock(parsed=instructor_response)), {}
41 |
42 | # We then register the provider with ell. We will use InstructorProvider any time an instructor.Instructor type client is used.
43 | ell.register_provider(InstructorProvider(), instructor.Instructor)
44 |
45 | class UserDetail(BaseModel):
46 | name: str
47 | age: int
48 |
49 |
50 | @ell.complex(model="gpt-4-turbo-preview", client=client, response_model=UserDetail)
51 | def extract_user(details : str):
52 | return f"Extract {details}"
53 |
54 | print(extract_user("Jason is 25 years old"))
55 | ell.init(verbose=True)
56 |
57 |
58 |
--------------------------------------------------------------------------------
/examples/providers/ollama_ex.py:
--------------------------------------------------------------------------------
1 | """
2 | Ollama example.
3 | """
4 | import ell
5 |
6 | ell.init(verbose=True, store='./logdir')
7 | # Use models automatically registered by asking ollama
8 | ell.models.ollama.register(base_url="http://localhost:11434/v1")
9 |
10 | # in terminal run ollama list to see available models
11 | @ell.simple(model="llama3.1:latest", temperature=0.1)
12 | def write_a_story():
13 | return "write me a story"
14 |
15 | # Or use the client directly
16 | import openai
17 | client = openai.Client(
18 | base_url="http://localhost:11434/v1", api_key="ollama" # required but not used
19 | )
20 | @ell.simple(model="llama3.1:latest", temperature=0.1, max_tokens=100, client=client)
21 | def write_a_story_with_client():
22 | return "write me a short story"
23 |
24 |
--------------------------------------------------------------------------------
/examples/providers/openai_ex.py:
--------------------------------------------------------------------------------
1 | import ell
2 | import openai
3 |
4 | ell.init(verbose=True)
5 |
6 | # custom client
7 | client = openai.Client()
8 |
9 | @ell.simple(model='gpt-4o', client=client)
10 | def chat(prompt: str) -> str:
11 | return prompt
12 |
13 | print(chat("Hello, how are you?"))
14 |
15 | # Models are automatically registered!
16 | @ell.simple(model='gpt-4o')
17 | def use_default_client(prompt: str) -> str:
18 | return prompt
19 |
20 | print(use_default_client("Hello, how are you?"))
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/examples/providers/openrouter_ex.py:
--------------------------------------------------------------------------------
1 | """
2 | OpenRouter example using OpenAI client.
3 | """
4 | from os import getenv
5 | from openai import OpenAI
6 | import ell
7 |
8 | # Initialize OpenAI client with OpenRouter's base URL and API key
9 | openrouter_client = OpenAI(
10 | api_key=getenv("OPENROUTER_API_KEY"),
11 | base_url="https://openrouter.ai/api/v1",
12 | )
13 |
14 | # OpenRouter-specific request parameters passed via `extra_body` (optional)
15 | # For detailed documentation, see "Using OpenAI SDK" at https://openrouter.ai/docs/frameworks
16 | extra_body = {
17 | "provider": {
18 | "allow_fallbacks": True,
19 | "data_collection": "deny",
20 | "order": ["Hyperbolic", "Together"],
21 | "ignore": ["Fireworks"],
22 | "quantizations": ["bf16", "fp8"]
23 | },
24 | # Additional OpenRouter parameters can be added here, e.g.:
25 | # "transforms": ["middle-out"]
26 | }
27 |
28 | @ell.simple(model="meta-llama/llama-3.1-8b-instruct", client=openrouter_client, extra_body=extra_body)
29 | def generate_greeting(name: str) -> str:
30 | """You are a friendly AI assistant."""
31 | return f"Generate a warm, concise greeting for {name}"
32 |
33 | print(f"OpenRouter Preferences Example: {generate_greeting('Mark Zuckerberg')}")
--------------------------------------------------------------------------------
/examples/providers/vllm_ex.py:
--------------------------------------------------------------------------------
1 | """
2 | vLLM example.
3 | """
4 | from openai import OpenAI
5 | import ell
6 | # vllm serve NousResearch/Meta-Llama-3-8B-Instruct --dtype auto --api-key token-abc123
7 |
8 | client = OpenAI(
9 | base_url="http://localhost:8000/v1",
10 | api_key="token-abc123",
11 | )
12 |
13 | @ell.simple(model="NousResearch/Meta-Llama-3-8B-Instruct", client=client, temperature=0.1)
14 | def write_a_story(about : str):
15 | return f"write me a story about {about}"
16 |
17 | # or register models
18 | ell.config.register_model("NousResearch/Meta-Llama-3-8B-Instruct", client)
19 |
20 | # no need to specify client!
21 | @ell.simple(model="NousResearch/Meta-Llama-3-8B-Instruct", temperature=0.1)
22 | def write_a_story_no_client(about : str):
23 | return f"write me a story about {about}"
24 |
25 | write_a_story()
--------------------------------------------------------------------------------
/examples/providers/xai_ex.py:
--------------------------------------------------------------------------------
1 | import ell
2 | import openai
3 | import os
4 |
5 | ell.init(verbose=True)
6 |
7 | # Models are automatically registered, so we can use them without specifying the client
8 | # set XAI_API_KEY=your_api_key in your environment to run this example
9 | @ell.simple(model='grok-2-mini')
10 | def use_default_xai_client(prompt: str) -> str:
11 | return prompt
12 |
13 | print(use_default_xai_client("Tell me a joke, Grok!"))
14 |
15 |
16 | # If you want to use a custom client you can.
17 | # Custom client for X.AI
18 | xai_client = openai.Client(base_url="https://api.x.ai/v1", api_key=your_api_key)
19 |
20 | @ell.simple(model='grok-2', client=xai_client)
21 | def chat_xai(prompt: str) -> str:
22 | return prompt
--------------------------------------------------------------------------------
/examples/quick_chat.py:
--------------------------------------------------------------------------------
1 | import random
2 | from typing import List, Tuple
3 | import ell
4 |
5 |
6 |
7 | names_list = [
8 | "Alice",
9 | "Bob",
10 | "Charlie",
11 | "Diana",
12 | "Eve",
13 | "George",
14 | "Grace",
15 | "Hank",
16 | "Ivy",
17 | "Jack",
18 | ]
19 |
20 |
21 |
22 | @ell.simple(model="gpt-4o-2024-08-06", temperature=1.0)
23 | def create_personality() -> str:
24 | """You are backstoryGPT. You come up with a backstory for a character incljuding name. Choose a completely random name from the list. Format as follows.
25 |
26 | Name:
27 | Backstory: <3 sentence backstory>'""" # System prompt
28 |
29 | return "Come up with a backstory about " + random.choice(names_list) # User prompt
30 |
31 |
32 |
33 |
34 | def format_message_history(message_history : List[Tuple[str, str]]) -> str:
35 | return "\n".join([f"{name}: {message}" for name, message in message_history])
36 |
37 | @ell.simple(model="gpt-4o-2024-08-06", temperature=0.3, max_tokens=20)
38 | def chat(message_history : List[Tuple[str, str]], *, personality : str):
39 |
40 | return [
41 | ell.system(f"""Here is your description.
42 | {personality}.
43 |
44 | Your goal is to come up with a response to a chat. Only respond in one sentence (should be like a text message in informality.) Never use Emojis."""),
45 | ell.user(format_message_history(message_history)),
46 | ]
47 |
48 |
49 |
50 | if __name__ == "__main__":
51 | from ell.stores.sql import SQLiteStore
52 | ell.init(store='./logdir', autocommit=True, verbose=True)
53 |
54 | messages : List[Tuple[str, str]]= []
55 | personalities = [create_personality(), create_personality()]
56 |
57 |
58 | # lstr (str), keeps track of its "orginator"
59 | names = []
60 | backstories = []
61 | for personality in personalities:
62 | parts = list(filter(None, personality.split("\n")))
63 | names.append(parts[0].split(": ")[1])
64 | backstories.append(parts[1].split(": ")[1])
65 | print(names)
66 |
67 |
68 | whos_turn = 0
69 | for _ in range(10):
70 |
71 | personality_talking = personalities[whos_turn]
72 | messages.append(
73 | (names[whos_turn], chat(messages, personality=personality_talking)))
74 |
75 | whos_turn = (whos_turn + 1) % len(personalities)
76 | print(messages)
77 |
--------------------------------------------------------------------------------
/examples/rag/rag.py:
--------------------------------------------------------------------------------
1 | # you'll need to install sklearn as its not a dependency of ell
2 | from sklearn.feature_extraction.text import TfidfVectorizer
3 | from sklearn.metrics.pairwise import cosine_similarity
4 | import numpy as np
5 | import ell
6 |
7 |
8 | class VectorStore:
9 | def __init__(self, vectorizer, tfidf_matrix, documents):
10 | self.vectorizer = vectorizer
11 | self.tfidf_matrix = tfidf_matrix
12 | self.documents = documents
13 |
14 | @classmethod
15 | def from_documents(cls, documents):
16 | vectorizer = TfidfVectorizer()
17 | tfidf_matrix = vectorizer.fit_transform(documents)
18 | return cls(vectorizer, tfidf_matrix, documents)
19 |
20 | def search(self, query: str, k: int = 2) -> list[dict]:
21 | query_vector = self.vectorizer.transform([query])
22 | similarities = cosine_similarity(query_vector, self.tfidf_matrix).flatten()
23 | top_k_indices = np.argsort(similarities)[-k:][::-1]
24 | return [
25 | {"document": self.documents[i], "relevan": float(similarities[i])}
26 | for i in top_k_indices
27 | ]
28 |
29 |
30 | @ell.simple(model="gpt-4o-mini")
31 | def rag(query: str, context: str) -> str:
32 | """You are an AI assistant using Retrieval-Augmented Generation (RAG).
33 | RAG enhances your responses by retrieving relevant information from a knowledge base.
34 | You will be provided with a query and relevant context. Use this context to inform your response,
35 | but also draw upon your general knowledge when appropriate.
36 | Always strive to provide accurate, helpful, and context-aware answers."""
37 |
38 | return f"""
39 | Given the following query and relevant context, please provide a comprehensive and accurate response:
40 |
41 | Query: {query}
42 |
43 | Relevant context:
44 | {context}
45 |
46 | Response:
47 | """
48 |
49 |
50 | if __name__ == "__main__":
51 |
52 |
53 | documents = [
54 | "ell is a cool new framework written by will",
55 | "will writes a lot of the code while on x.com the everything app",
56 | "ell will someday be go-to tool for getting things done",
57 | "george washington is the current president of the United states of America",
58 | ]
59 |
60 | vector_store = VectorStore.from_documents(documents)
61 |
62 | query = "who created ell?"
63 | context = vector_store.search(query)
64 |
65 | question1 = rag(query, context)
66 |
67 | query = "who is the president of america?"
68 | context = vector_store.search(query)
69 |
70 | question2 = rag(query, context)
71 |
--------------------------------------------------------------------------------
/examples/server_example.py:
--------------------------------------------------------------------------------
1 | from flask import Flask
2 |
3 | import ell
4 |
5 | @ell.simple(model="gpt-4o-mini")
6 | def hello(name: str):
7 | """You are a helpful assistant"""
8 | return f"Write a welcome message for {name}."
9 |
10 | app = Flask(__name__)
11 |
12 |
13 | @app.route('/')
14 | def home():
15 | return hello("world")
16 |
17 | if __name__ == '__main__':
18 | app.run(debug=True)
19 |
--------------------------------------------------------------------------------
/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/logo.png
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "ell-ai"
3 | version = "0.0.17"
4 | description = "ell - the language model programming library"
5 | authors = ["William Guss "]
6 | license = "MIT"
7 | readme = "README.md"
8 | repository = "https://github.com/MadcowD/ell"
9 | packages = [
10 | { include = "ell", from = "src" }
11 | ]
12 | homepage = "https://docs.ell.so"
13 | classifiers = [
14 | "Development Status :: 3 - Alpha",
15 | "Intended Audience :: Developers",
16 | "License :: OSI Approved :: MIT License",
17 | "Programming Language :: Python :: 3",
18 | "Programming Language :: Python :: 3.7",
19 | "Programming Language :: Python :: 3.8",
20 | "Programming Language :: Python :: 3.9"
21 | ]
22 | include = [
23 | { path = "src/ell/studio/static", format = ["sdist", "wheel"] },
24 | { path = "src/ell/studio/static/**/*", format = ["sdist", "wheel"] },
25 | { path = "src/ell/util/char_bitmaps.npy", format = ["sdist", "wheel"] }
26 | ]
27 |
28 | [tool.poetry.dependencies]
29 | python = ">=3.9"
30 | numpy = "*"
31 | dill = "*"
32 | colorama = "*"
33 | cattrs = "*"
34 | openai = ">=1.51.0"
35 | requests = "*"
36 | typing-extensions = "*"
37 | black = "*"
38 | pillow = ">=10.4.0"
39 | psutil = "*"
40 |
41 | # Providers
42 | anthropic = { version = ">=0.34.2", optional = true }
43 | groq = { version = ">=0.11.0", optional = true }
44 |
45 | # Storage
46 | psycopg2 = { version = ">=2.7", optional = true }
47 | sqlmodel = { version = ">=0.0.21, <0.1.0", optional = true }
48 |
49 | # Studio
50 | fastapi = { version = "^0.111.1", optional = true }
51 | uvicorn = { version = "^0.30.3", optional = true }
52 | alembic = { version = "^1.14.0", optional = true }
53 | google-genai = { version = "^1.2.0", optional = true }
54 |
55 | [tool.poetry.group.dev.dependencies]
56 | pytest = "*"
57 | sphinx = "*"
58 | sphinx-rtd-theme = "*"
59 |
60 | [tool.poetry.extras]
61 | # N.B. The `openai` dep is always required, but explicitly providing it via an e.g. "openai" extra
62 | # causes poetry to mark it as optional = true (even if explicitly specified optional = false).
63 | anthropic = ["anthropic"]
64 | google = ["google-genai"]
65 | groq = ["groq"]
66 | sqlite = [ 'sqlmodel', 'alembic' ]
67 | postgres = ['sqlmodel', 'psycopg2', 'alembic']
68 | studio = ['fastapi', 'uvicorn', 'sqlmodel', 'alembic']
69 | all = [
70 | "anthropic",
71 | "groq",
72 | "google-genai",
73 | # default storage dependencies
74 | 'sqlmodel',
75 | # allow running stduio by default
76 | 'fastapi',
77 | 'uvicorn',
78 | 'alembic',
79 | ]
80 |
81 | [build-system]
82 | requires = ["poetry-core>=1.0.0"]
83 | build-backend = "poetry.core.masonry.api"
84 |
85 | [tool.poetry.scripts]
86 | ell-studio = "ell.studio.__main__:main"
87 |
88 | # [tool.poetry.build]
89 | # # script = "build.py"
90 |
--------------------------------------------------------------------------------
/src/ell/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | ell is a Python library for language model programming (LMP). It provides a simple
3 | and intuitive interface for working with large language models.
4 | """
5 |
6 | from ell.lmp import simple, tool, complex
7 | from ell.types import system, user, assistant, Message, ContentBlock
8 | from ell.__version__ import __version__
9 | from ell.evaluation import Evaluation
10 |
11 | # Import all providers
12 | from ell import providers
13 |
14 | # Import all models
15 | from ell import models
16 |
17 |
18 | # Import from configurator
19 | from ell.configurator import (
20 | Config,
21 | config,
22 | init,
23 | get_store,
24 | register_provider,
25 | set_store,
26 | )
27 |
28 | __all__ = [
29 | "simple",
30 | "tool",
31 | "complex",
32 | "system",
33 | "user",
34 | "assistant",
35 | "Message",
36 | "ContentBlock",
37 | "__version__",
38 | "providers",
39 | "models",
40 | "Config",
41 | "config",
42 | "init",
43 | "get_store",
44 | "register_provider",
45 | "set_store",
46 | ]
47 |
--------------------------------------------------------------------------------
/src/ell/__version__.py:
--------------------------------------------------------------------------------
1 | try:
2 | from importlib.metadata import version
3 | except ImportError:
4 | from importlib_metadata import version
5 |
6 | __version__ = version("ell-ai")
7 |
--------------------------------------------------------------------------------
/src/ell/contrib/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/src/ell/contrib/__init__.py
--------------------------------------------------------------------------------
/src/ell/evaluation/__init__.py:
--------------------------------------------------------------------------------
1 | from ell.evaluation.evaluation import Evaluation
--------------------------------------------------------------------------------
/src/ell/evaluation/util.py:
--------------------------------------------------------------------------------
1 | from functools import wraps
2 | from ell.evaluation.results import Any, Callable, Datapoint, Dict, List
3 | from ell.configurator import config
4 |
5 | from typing import Any, Dict, List, Union
6 |
7 | from ell.types.message import LMP
8 |
9 | def get_lmp_output(
10 | data_point: Datapoint,
11 | lmp: LMP,
12 | lmp_params: Dict[str, Any],
13 | required_params: bool,
14 | ) -> Union[List[Any], Any]:
15 | if not required_params:
16 | return lmp(**lmp_params)
17 |
18 | inp = data_point.get("input", None)
19 | if isinstance(inp, list):
20 | return lmp(*inp, **lmp_params)
21 | elif isinstance(inp, dict):
22 | return lmp(**inp, **lmp_params)
23 | elif inp is None:
24 | return lmp(**lmp_params)
25 | else:
26 | raise ValueError(f"Invalid input type: {type(inp)}")
27 |
28 |
29 | def validate_callable_dict(
30 | items: Union[Dict[str, Callable], List[Callable]], item_type: str
31 | ) -> Dict[str, Callable]:
32 | if isinstance(items, list):
33 | items_dict = {}
34 | for item in items:
35 | if not callable(item):
36 | raise ValueError(
37 | f"Each {item_type} must be a callable, got {type(item)}"
38 | )
39 | if not hasattr(item, "__name__") or item.__name__ == "":
40 | raise ValueError(
41 | f"Each {item_type} in a list must have a name (not a lambda)"
42 | )
43 | items_dict[item.__name__] = item
44 | return items_dict
45 | elif isinstance(items, dict):
46 | for name, item in items.items():
47 | if not callable(item):
48 | raise ValueError(
49 | f"{item_type.capitalize()} '{name}' must be a callable, got {type(item)}"
50 | )
51 | return items
52 | else:
53 | raise ValueError(
54 | f"{item_type}s must be either a list of callables or a dictionary, got {type(items)}"
55 | )
56 |
57 |
58 | def needs_store(f):
59 | @wraps(f)
60 | def wrapper(*args, **kwargs):
61 | if not config.store:
62 | return
63 | return f(*args, **kwargs)
64 | return wrapper
--------------------------------------------------------------------------------
/src/ell/lmp/__init__.py:
--------------------------------------------------------------------------------
1 | from ell.lmp.simple import simple
2 | from ell.lmp.complex import complex
3 | from ell.lmp.tool import tool
4 |
5 | __all__ = ["simple", "complex", "tool"]
6 |
--------------------------------------------------------------------------------
/src/ell/lmp/function.py:
--------------------------------------------------------------------------------
1 | from functools import wraps
2 | from typing import Any, Callable
3 | from ell.configurator import config
4 | from ell.lmp._track import _track
5 | from ell.stores.models import LMPType
6 | from ell.util.verbosity import model_usage_logger_pre
7 |
8 | def function(*, exempt_from_tracking: bool = False, _exempt_from_logging: bool = False, type = LMPType.FUNCTION, **function_kwargs):
9 | def function_decorator(fn: Callable[..., Any]):
10 |
11 | @wraps(fn)
12 | def wrapper(*args, _invocation_origin: str = None, **kwargs):
13 | should_log = not exempt_from_tracking and config.verbose and not _exempt_from_logging
14 | if should_log:
15 | model_usage_logger_pre(fn, args, kwargs, "[]", [])
16 |
17 | result = fn(*args, **kwargs)
18 |
19 | return result, {}, {}
20 |
21 | wrapper.__ell_func__ = fn
22 | wrapper.__ell_type__ = type
23 | wrapper.__ell_exempt_from_tracking = exempt_from_tracking
24 |
25 | if exempt_from_tracking:
26 | return wrapper
27 | else:
28 | return _track(wrapper)
29 |
30 | return function_decorator
31 |
32 | # XXX: Fix wrapping of the wrong functional decorator.
--------------------------------------------------------------------------------
/src/ell/models/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Attempts to registeres model names with their respective API client bindings. This allows for the creation of a unified interface for interacting with different LLM providers.
3 |
4 | For example, to register an OpenAI model:
5 | @ell.simple(model='gpt-4o-mini') -> @ell.simple(model='gpt-4o-mini', client=openai.OpenAI())
6 |
7 | """
8 |
9 | from ell.models import openai, anthropic, ollama, groq, bedrock, xai, google
10 |
11 | __all__ = ["openai", "anthropic", "ollama", "groq", "bedrock", "xai", "google"]
12 |
13 |
--------------------------------------------------------------------------------
/src/ell/models/anthropic.py:
--------------------------------------------------------------------------------
1 | from ell.configurator import config
2 | import logging
3 |
4 | logger = logging.getLogger(__name__)
5 |
6 |
7 | try:
8 | import anthropic
9 |
10 | def register(client: anthropic.Anthropic):
11 | """
12 | Register Anthropic models with the provided client.
13 |
14 | This function takes an Anthropic client and registers various Anthropic models
15 | with the global configuration. It allows the system to use these models
16 | for different AI tasks.
17 |
18 | Args:
19 | client (anthropic.Anthropic): An instance of the Anthropic client to be used
20 | for model registration.
21 |
22 | Note:
23 | The function doesn't return anything but updates the global
24 | configuration with the registered models.
25 | """
26 | model_data = [
27 | ('claude-3-opus-20240229', 'anthropic'),
28 | ('claude-3-sonnet-20240229', 'anthropic'),
29 | ('claude-3-haiku-20240307', 'anthropic'),
30 | ('claude-3-5-sonnet-20240620', 'anthropic'),
31 | ('claude-3-5-sonnet-20241022', 'anthropic'),
32 | ('claude-3-5-sonnet-latest', 'anthropic'),
33 | ]
34 | for model_id, owned_by in model_data:
35 | config.register_model(model_id, client)
36 |
37 | try:
38 | default_client = anthropic.Anthropic()
39 | register(default_client)
40 | except Exception as e:
41 | # logger.warning(f"Failed to create default Anthropic client: {e}")
42 | pass
43 |
44 |
45 | except ImportError:
46 | pass
--------------------------------------------------------------------------------
/src/ell/models/bedrock.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 | from ell.configurator import config
3 | import logging
4 |
5 | logger = logging.getLogger(__name__)
6 |
7 |
8 | def register(client: Any):
9 | """
10 | Register Bedrock models with the provided client.
11 |
12 | This function takes an boto3 client and registers various Bedrock models
13 | with the global configuration. It allows the system to use these models
14 | for different AI tasks.
15 |
16 | Args:
17 | client (boto3.client): An instance of the bedrock client to be used
18 | for model registration.
19 |
20 | Note:
21 | The function doesn't return anything but updates the global
22 | configuration with the registered models.
23 | """
24 | model_data = [
25 | ('anthropic.claude-3-opus-20240229-v1:0', 'bedrock'),
26 | ('anthropic.claude-3-sonnet-20240229-v1:0', 'bedrock'),
27 | ('anthropic.claude-3-haiku-20240307-v1:0', 'bedrock'),
28 | ('anthropic.claude-3-5-sonnet-20240620-v1:0', 'bedrock'),
29 |
30 | ('mistral.mistral-7b-instruct-v0:2', 'bedrock'),
31 | ('mistral.mixtral-8x7b-instruct-v0:1', 'bedrock'),
32 | ('mistral.mistral-large-2402-v1:0', 'bedrock'),
33 | ('mistral.mistral-small-2402-v1:0', 'bedrock'),
34 |
35 |
36 | ('ai21.jamba-instruct-v1:0','bedrock'),
37 | ('ai21.j2-ultra-v1', 'bedrock'),
38 | ('ai21.j2-mid-v1', 'bedrock'),
39 |
40 | ('amazon.titan-embed-text-v1', 'bedrock'),
41 | ('amazon.titan-text-lite-v1', 'bedrock'),
42 | ('amazon.titan-text-express-v1', 'bedrock'),
43 | ('amazon.titan-image-generator-v2:0', 'bedrock'),
44 | ('amazon.titan-image-generator-v1', 'bedrock'),
45 |
46 | ('cohere.command-r-plus-v1:0', 'bedrock'),
47 | ('cohere.command-r-v1:0', 'bedrock'),
48 | ('cohere.embed-english-v3', 'bedrock'),
49 | ('cohere.embed-multilingual-v3', 'bedrock'),
50 | ('cohere.command-text-v14', 'bedrock'),
51 |
52 | ('meta.llama3-8b-instruct-v1:0', 'bedrock'),
53 | ('meta.llama3-70b-instruct-v1:0', 'bedrock'),
54 | ('meta.llama2-13b-chat-v1', 'bedrock'),
55 | ('meta.llama2-70b-chat-v1', 'bedrock'),
56 | ('meta.llama2-13b-v1', 'bedrock'),
57 |
58 | ]
59 |
60 | for model_id, owned_by in model_data:
61 | config.register_model(name=model_id, default_client=client, supports_streaming=True)
62 |
63 | default_client = None
64 | try:
65 |
66 | import boto3
67 | default_client = boto3.client('bedrock-runtime')
68 | except Exception as e:
69 | pass
70 |
71 | register(default_client)
72 |
--------------------------------------------------------------------------------
/src/ell/models/groq.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | from ell.configurator import config
3 |
4 | try:
5 | from groq import Groq
6 | def register(client: Optional[Groq] = None, **client_kwargs):
7 | if client is None:
8 | client = Groq(**client_kwargs)
9 | for model in client.models.list().data:
10 | config.register_model(model.id, default_client=client, supports_streaming=True)
11 | except ImportError:
12 | pass
--------------------------------------------------------------------------------
/src/ell/models/ollama.py:
--------------------------------------------------------------------------------
1 | from ell.configurator import config
2 | import openai
3 | import requests
4 | import logging
5 |
6 | #XXX: May be deprecated soon because of the new provider framework.
7 | logger = logging.getLogger(__name__)
8 | client = None
9 |
10 | def register(base_url):
11 | """
12 | Registers Ollama models with the provided base URL.
13 |
14 | This function sets up the Ollama client with the given base URL and
15 | fetches available models from the Ollama API. It then registers these
16 | models with the global configuration, allowing them to be used within
17 | the ell framework.
18 |
19 | Args:
20 | base_url (str): The base URL of the Ollama API endpoint.
21 |
22 | Note:
23 | This function updates the global client and configuration.
24 | It logs any errors encountered during the process.
25 | """
26 | global client
27 | client = openai.Client(base_url=base_url, api_key="ollama")
28 |
29 | try:
30 | response = requests.get(f"{base_url}/../api/tags")
31 | response.raise_for_status()
32 | models = response.json().get("models", [])
33 |
34 | for model in models:
35 | config.register_model(model["name"], client)
36 | except requests.RequestException as e:
37 | logger.error(f"Failed to fetch models from {base_url}: {e}")
38 | except Exception as e:
39 | logger.error(f"An error occurred: {e}")
40 |
41 |
--------------------------------------------------------------------------------
/src/ell/models/xai.py:
--------------------------------------------------------------------------------
1 | """
2 | This module handles the registration of OpenAI models within the ell framework.
3 |
4 | It provides functionality to register various OpenAI models with a given OpenAI client,
5 | making them available for use throughout the system. The module also sets up a default
6 | client behavior for unregistered models.
7 |
8 | Key features:
9 | 1. Registration of specific OpenAI models with their respective types (system, openai, openai-internal).
10 | 2. Utilization of a default OpenAI client for any unregistered models,
11 |
12 | The default client behavior ensures that even if a specific model is not explicitly
13 | registered, the system can still attempt to use it with the default OpenAI client.
14 | This fallback mechanism provides flexibility in model usage while maintaining a
15 | structured approach to model registration.
16 |
17 | Note: The actual model availability may depend on your OpenAI account's access and the
18 | current offerings from OpenAI.
19 |
20 | Additionally, due to the registration of default mdoels, the OpenAI client may be used for
21 | anthropic, cohere, groq, etc. models if their clients are not registered or fail
22 | to register due to an error (lack of API keys, rate limits, etc.)
23 | """
24 |
25 | import os
26 | from ell.configurator import config
27 | import openai
28 |
29 | import logging
30 | import colorama
31 |
32 | logger = logging.getLogger(__name__)
33 |
34 | def register(client: openai.Client):
35 | """
36 | Register OpenAI models with the provided client.
37 |
38 | This function takes an OpenAI client and registers various OpenAI models
39 | with the global configuration. It allows the system to use these models
40 | for different AI tasks.
41 |
42 | Args:
43 | client (openai.Client): An instance of the OpenAI client to be used
44 | for model registration.
45 |
46 | Note:
47 | The function doesn't return anything but updates the global
48 | configuration with the registered models.
49 | """
50 | standard_models = [
51 | 'grok-2-mini',
52 | 'grok-2',
53 | 'grok-2-mini-public',
54 | 'grok-2-public',
55 | ]
56 | for model_id in standard_models:
57 | config.register_model(model_id, client)
58 |
59 |
60 | default_client = None
61 | try:
62 |
63 | xai_api_key = os.environ.get("XAI_API_KEY")
64 | if not xai_api_key:
65 | raise openai.OpenAIError("XAI_API_KEY not found in environment variables")
66 | default_client = openai.Client(base_url="https://api.x.ai/v1", api_key=xai_api_key)
67 | except openai.OpenAIError as e:
68 | pass
69 |
70 | register(default_client)
71 | config.default_client = default_client
--------------------------------------------------------------------------------
/src/ell/providers/__init__.py:
--------------------------------------------------------------------------------
1 | from ell.providers import anthropic, bedrock, groq, openai, google
2 |
3 | # Disabled providers
4 | # from ell.providers import mistral, cohere, gemini, elevenlabs, replicate, huggingface
5 |
6 | __all__ = ["openai", "groq", "anthropic", "bedrock", "google"]
7 |
--------------------------------------------------------------------------------
/src/ell/providers/groq.py:
--------------------------------------------------------------------------------
1 | """
2 | Groq provider.
3 | """
4 |
5 | from ell.providers.openai import OpenAIProvider
6 | from ell.configurator import register_provider
7 |
8 |
9 | try:
10 | import groq
11 | class GroqProvider(OpenAIProvider):
12 | dangerous_disable_validation = True
13 | def translate_to_provider(self, *args, **kwargs):
14 | params = super().translate_to_provider(*args, **kwargs)
15 | params.pop('stream_options', None)
16 | assert 'response_format' not in params, 'Groq does not support response_format.'
17 | params['messages'] = messages_to_groq_message_format(params['messages'])
18 | return params
19 |
20 | def translate_from_provider(self, *args, **kwargs):
21 | res, meta = super().translate_from_provider(*args, **kwargs)
22 | if not meta['usage']:
23 | meta['usage'] = meta['x_groq']['usage']
24 | return res, meta
25 | register_provider(GroqProvider(), groq.Client)
26 | except ImportError:
27 | pass
28 |
29 | def messages_to_groq_message_format(messages):
30 | """Assistant messages to Groq must take the format: {'role': 'assistant', 'content': }"""
31 | # XXX: Issue #289: groq.BadRequestError: Error code: 400 - {'error': {'message': "'messages.1' : for 'role:assistant' the following must be satisfied[('messages.1.content' : value must be a string)]", 'type': 'invalid_request_error'}}
32 | new_messages = []
33 | for message in messages:
34 | if message['role'] == 'assistant':
35 | # Assistant messages must be strings
36 | # If content is a list, only one string element is allowed
37 | if isinstance(message['content'], str):
38 | new_messages.append({'role': 'assistant', 'content': message['content']})
39 | elif isinstance(message['content'], list) and len(message['content']) == 1 and message['content'][0]['type'] == 'text':
40 | new_messages.append({'role': 'assistant', 'content': message['content'][0]['text']})
41 | else:
42 | raise ValueError('Groq assistant messages must contain exactly one string content.')
43 | else:
44 | new_messages.append(message)
45 |
46 | return new_messages
47 |
48 |
--------------------------------------------------------------------------------
/src/ell/py.typed:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/src/ell/py.typed
--------------------------------------------------------------------------------
/src/ell/stores/__init__.py:
--------------------------------------------------------------------------------
1 | try:
2 | import sqlmodel
3 | except ImportError:
4 | raise ImportError(
5 | 'ell.stores has missing dependencies. Install them with `pip install -U "ell-ai[sqlite]"` or `pip install -U "ell-ai[postgres]"`. More info: https://docs.ell.so/installation#custom-installation')
6 |
--------------------------------------------------------------------------------
/src/ell/stores/migrations/README.md:
--------------------------------------------------------------------------------
1 | # ell Database Migrations
2 |
3 | This directory contains the database migration utilities for ell's SQL storage backend. The migration system uses Alembic to handle schema changes and version control for the database.
4 |
5 | ## Overview
6 |
7 | The migration system handles:
8 | - Initial schema creation
9 | - Schema updates and changes
10 | - Version tracking of database changes
11 | - Automatic migration detection and application
12 |
13 | ## Key Components
14 |
15 | - `versions/`: Contains individual migration scripts
16 | - `env.py`: Alembic environment configuration
17 | - `script.py.mako`: Template for generating new migrations
18 | - `make.py`: Utility script for creating new migrations
19 |
20 | ## Usage
21 |
22 | ### Creating a New Migration
23 | ```bash
24 | python -m ell.stores.migrations.make "your migration message"
25 | ```
26 |
27 | This will:
28 | 1. Create a temporary SQLite database
29 | 2. Detect schema changes
30 | 3. Generate a new migration file in the versions directory
31 |
32 | ### Applying Migrations
33 |
34 | Migrations are automatically handled by the `init_or_migrate_database()` function in `ell.stores.sql`. When initializing an ell store, it will:
35 |
36 | 1. Check for existing tables
37 | 2. Apply any pending migrations
38 | 3. Initialize new databases with the latest schema
39 |
40 | ## Migration Files
41 |
42 | Each migration file contains:
43 | - Unique revision ID
44 | - Dependencies on other migrations
45 | - `upgrade()` function for applying changes
46 | - `downgrade()` function for reverting changes
47 |
48 | For examples, see the existing migrations in the `versions/` directory.
--------------------------------------------------------------------------------
/src/ell/stores/migrations/make.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from sqlalchemy import create_engine
3 | from ell.stores.migrations import get_alembic_config
4 | from alembic import command
5 |
6 | def main():
7 | parser = argparse.ArgumentParser(description='Create a new database migration')
8 | parser.add_argument('message', help='Migration message/description')
9 |
10 | args = parser.parse_args()
11 |
12 | # Create temporary directory for SQLite database
13 | import tempfile
14 | from pathlib import Path
15 |
16 | with tempfile.TemporaryDirectory() as tmpdir:
17 | db_path = Path(tmpdir) / "temp.db"
18 | engine = create_engine(f"sqlite:///{db_path}")
19 | alembic_cfg = get_alembic_config(str(engine.url))
20 |
21 | # First, upgrade to head to get to latest migration state
22 | command.upgrade(alembic_cfg, "head")
23 |
24 | # Now generate new migration
25 | command.revision(alembic_cfg,
26 | message=args.message,
27 | autogenerate=True)
28 |
29 | print(f"✨ Created new migration with message: {args.message}")
30 |
31 | if __name__ == '__main__':
32 | main()
--------------------------------------------------------------------------------
/src/ell/stores/migrations/script.py.mako:
--------------------------------------------------------------------------------
1 | """${message}
2 |
3 | Revision ID: ${up_revision}
4 | Revises: ${down_revision | comma,n}
5 | Create Date: ${create_date}
6 |
7 | """
8 | from typing import Sequence, Union
9 |
10 | from alembic import op
11 | import sqlalchemy as sa
12 | import sqlmodel
13 | import ell.stores.models.core
14 | ${imports if imports else ""}
15 |
16 | # revision identifiers, used by Alembic.
17 | revision: str = ${repr(up_revision)}
18 | down_revision: Union[str, None] = ${repr(down_revision)}
19 | branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
20 | depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
21 |
22 |
23 | def upgrade() -> None:
24 | ${upgrades if upgrades else "pass"}
25 |
26 |
27 | def downgrade() -> None:
28 | ${downgrades if downgrades else "pass"}
29 |
--------------------------------------------------------------------------------
/src/ell/stores/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .core import *
2 | from .evaluations import *
3 |
--------------------------------------------------------------------------------
/src/ell/studio/__init__.py:
--------------------------------------------------------------------------------
1 | try:
2 | import fastapi
3 | import ell.stores
4 | except ImportError:
5 | raise ImportError(
6 | 'ell.studio is missing dependencies. Install them with `pip install -U "ell-ai[studio]"`. More info: https://docs.ell.so/installation#custom-installation')
7 |
--------------------------------------------------------------------------------
/src/ell/studio/config.py:
--------------------------------------------------------------------------------
1 | from functools import lru_cache
2 | import os
3 | from typing import Optional
4 | from pydantic import BaseModel
5 |
6 | import logging
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 |
11 | # todo. maybe we default storage dir and other things in the future to a well-known location
12 | # like ~/.ell or something
13 | @lru_cache
14 | def ell_home() -> str:
15 | return os.path.join(os.path.expanduser("~"), ".ell")
16 |
17 |
18 | class Config(BaseModel):
19 | pg_connection_string: Optional[str] = None
20 | storage_dir: Optional[str] = None
21 |
22 | @classmethod
23 | def create(
24 | cls,
25 | storage_dir: Optional[str] = None,
26 | pg_connection_string: Optional[str] = None,
27 | ) -> 'Config':
28 | pg_connection_string = pg_connection_string or os.getenv("ELL_PG_CONNECTION_STRING")
29 | storage_dir = storage_dir or os.getenv("ELL_STORAGE_DIR")
30 |
31 | # Enforce that we use either sqlite or postgres, but not both
32 | if pg_connection_string is not None and storage_dir is not None:
33 | raise ValueError("Cannot use both sqlite and postgres")
34 |
35 | # For now, fall back to sqlite if no PostgreSQL connection string is provided
36 | if pg_connection_string is None and storage_dir is None:
37 | # This intends to honor the default we had set in the CLI
38 | storage_dir = os.getcwd()
39 |
40 | return cls(pg_connection_string=pg_connection_string, storage_dir=storage_dir)
41 |
--------------------------------------------------------------------------------
/src/ell/studio/connection_manager.py:
--------------------------------------------------------------------------------
1 | from fastapi import WebSocket
2 |
3 |
4 | class ConnectionManager:
5 | def __init__(self):
6 | self.active_connections = []
7 |
8 | async def connect(self, websocket: WebSocket):
9 | await websocket.accept()
10 | self.active_connections.append(websocket)
11 |
12 | def disconnect(self, websocket: WebSocket):
13 | self.active_connections.remove(websocket)
14 |
15 | async def broadcast(self, message: str):
16 | for connection in self.active_connections:
17 | print(f"Broadcasting message to {connection} {message}")
18 | await connection.send_text(message)
--------------------------------------------------------------------------------
/src/ell/types/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | The primary types used in ell
3 | """
4 |
5 | from ell.types.message import (
6 | InvocableTool,
7 | AnyContent,
8 | ToolResult,
9 | ToolCall,
10 | ImageContent,
11 | ContentBlock,
12 | to_content_blocks,
13 | Message,
14 | system,
15 | user,
16 | assistant,
17 | _content_to_text_only,
18 | _content_to_text,
19 | )
20 | from ell.types._lstr import _lstr
21 |
22 | __all__ = [
23 | "InvocableTool",
24 | "AnyContent",
25 | "ToolResult",
26 | "ToolCall",
27 | "ImageContent",
28 | "ContentBlock",
29 | "to_content_blocks",
30 | "Message",
31 | "system",
32 | "user",
33 | "assistant",
34 | "_content_to_text_only",
35 | "_content_to_text",
36 | "_lstr",
37 | ]
38 |
--------------------------------------------------------------------------------
/src/ell/types/lmp.py:
--------------------------------------------------------------------------------
1 | import enum
2 |
3 |
4 | class LMPType(str, enum.Enum):
5 | LM = "LM"
6 | TOOL = "TOOL"
7 | LABELER = "LABELER"
8 | FUNCTION = "FUNCTION"
9 | OTHER = "OTHER"
--------------------------------------------------------------------------------
/src/ell/util/WARNING:
--------------------------------------------------------------------------------
1 | THIS MODULE WILL BE DEPRECATED WE ARE MOVING TO LOCAL UTIL MODULES
2 | ```
3 | project/
4 | │
5 | ├── module1/
6 | │ ├── __init__.py
7 | │ ├── core.py
8 | │ └── util.py
9 | │
10 | ├── module2/
11 | │ ├── __init__.py
12 | │ ├── main.py
13 | │ └── util.py
14 | │
15 | ├── module3/
16 | │ ├── __init__.py
17 | │ ├── handler.py
18 | │ └── util.py
19 | │
20 | └── common/
21 | ├── __init__.py
22 | └── util.py
23 | ```
--------------------------------------------------------------------------------
/src/ell/util/__init__.py:
--------------------------------------------------------------------------------
1 | from .closure import lexically_closured_source
2 |
3 | __all__ = ["lexically_closured_source"]
4 |
--------------------------------------------------------------------------------
/src/ell/util/char_bitmaps.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/src/ell/util/char_bitmaps.npy
--------------------------------------------------------------------------------
/src/ell/util/should_import.py:
--------------------------------------------------------------------------------
1 | import importlib.util
2 | import os
3 | import site
4 | import sys
5 | import sysconfig
6 | from pathlib import Path
7 |
8 |
9 | def should_import(module_name: str, raise_on_error: bool = False) -> bool:
10 | """
11 | Determines whether a module should be imported based on its origin.
12 | Excludes local modules and standard library modules.
13 |
14 | Args:
15 | module_name (str): The name of the module to check.
16 |
17 | Returns:
18 | bool: True if the module should be imported (i.e., it's a third-party module), False otherwise.
19 | """
20 | if module_name.startswith("ell"):
21 | return True
22 | try:
23 | try:
24 | spec = importlib.util.find_spec(module_name)
25 | except ValueError:
26 | return False
27 | if spec is None:
28 | return False
29 |
30 | origin = spec.origin
31 | if origin is None:
32 | return False
33 | if spec.has_location:
34 | origin_path = Path(origin).resolve()
35 |
36 | site_packages = list(site.getsitepackages()) + (list(site.getusersitepackages()) if isinstance(site.getusersitepackages(), list) else [site.getusersitepackages()])
37 |
38 | additional_paths = [Path(p).resolve() for p in sys.path if Path(p).resolve() not in map(Path, site_packages)]
39 |
40 | project_root = Path(os.environ.get("ELL_PROJECT_ROOT", os.getcwd())).resolve()
41 |
42 | site_packages_paths = [Path(p).resolve() for p in site_packages]
43 | stdlib_path = sysconfig.get_paths().get("stdlib")
44 | if stdlib_path:
45 | site_packages_paths.append(Path(stdlib_path).resolve())
46 |
47 | additional_paths = [Path(p).resolve() for p in additional_paths]
48 | local_paths = [project_root]
49 |
50 | cwd = Path.cwd().resolve()
51 | additional_paths = [path for path in additional_paths if path != cwd]
52 |
53 | for pkg in site_packages_paths:
54 | if origin_path.is_relative_to(pkg):
55 | return True
56 |
57 | for path in additional_paths:
58 | if origin_path.is_relative_to(path):
59 | return False
60 |
61 | for local in local_paths:
62 | if origin_path.is_relative_to(local):
63 | return False
64 |
65 | return True
66 |
67 | except Exception as e:
68 | if raise_on_error:
69 | raise e
70 | return True
--------------------------------------------------------------------------------
/src/ell/util/tqdm.py:
--------------------------------------------------------------------------------
1 | """Copyright (c) 2024, the tiny corp"""
2 | import math
3 | import shutil
4 | import sys
5 | import time
6 | from typing import Optional
7 |
8 |
9 | class tqdm:
10 | def __init__(self, iterable=None, desc:str='', disable:bool=False, unit:str='it', unit_scale=False, total:Optional[int]=None, rate:int=100):
11 | self.iterable, self.disable, self.unit, self.unit_scale, self.rate = iterable, disable, unit, unit_scale, rate
12 | self.st, self.i, self.n, self.skip, self.t = time.perf_counter(), -1, 0, 1, getattr(iterable, "__len__", lambda:0)() if total is None else total
13 | self.set_description(desc)
14 | self.update(0)
15 | def __iter__(self):
16 | for item in self.iterable:
17 | yield item
18 | self.update(1)
19 | self.update(close=True)
20 | def set_description(self, desc:str): self.desc = f"{desc}: " if desc else ""
21 | def update(self, n:int=0, close:bool=False):
22 | self.n, self.i = self.n+n, self.i+1
23 | if self.disable or (not close and self.i % self.skip != 0): return
24 | prog, elapsed, ncols = self.n/self.t if self.t else 0, time.perf_counter()-self.st, shutil.get_terminal_size().columns
25 | if self.i/elapsed > self.rate and self.i: self.skip = max(int(self.i/elapsed)//self.rate,1)
26 | def HMS(t): return ':'.join(f'{x:02d}' if i else str(x) for i,x in enumerate([int(t)//3600,int(t)%3600//60,int(t)%60]) if i or x)
27 | def SI(x): return (f"{x/1000**int(g:=math.log(x,1000)):.{int(3-3*math.fmod(g,1))}f}"[:4].rstrip('.')+' kMGTPEZY'[int(g)].strip()) if x else '0.00'
28 | prog_text = f'{SI(self.n)}{f"/{SI(self.t)}" if self.t else self.unit}' if self.unit_scale else f'{self.n}{f"/{self.t}" if self.t else self.unit}'
29 | elapsed_text = HMS(elapsed) + (f'<{HMS(elapsed/prog-elapsed) if self.n else "?"}' if self.t else '')
30 | it_text = (SI(self.n/elapsed) if self.unit_scale else f"{self.n/elapsed:5.2f}") if self.n else "?"
31 | suf = f'{prog_text} [{elapsed_text}, {it_text}{self.unit}/s]'
32 | sz = max(ncols-len(self.desc)-3-2-2-len(suf), 1)
33 | bar = '\r' + self.desc + (f'{100*prog:3.0f}%|{("█"*int(num:=sz*prog)+" ▏▎▍▌▋▊▉"[int(8*num)%8].strip()).ljust(sz," ")}| ' if self.t else '') + suf
34 | print(bar[:ncols+1], flush=True, end='\n'*close, file=sys.stderr)
35 |
36 | class trange(tqdm):
37 | def __init__(self, n:int, **kwargs): super().__init__(iterable=range(n), total=n, **kwargs)
--------------------------------------------------------------------------------
/tailwind.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | darkMode: ["class"],
3 | content: [
4 | "./src/**/*.{js,jsx,ts,tsx}",
5 | ],
6 | theme: {
7 | extend: {
8 | colors: {
9 | border: "hsl(var(--border))",
10 | input: "hsl(var(--input))",
11 | ring: "hsl(var(--ring))",
12 | background: "hsl(var(--background))",
13 | foreground: "hsl(var(--foreground))",
14 | primary: {
15 | DEFAULT: "hsl(var(--primary))",
16 | foreground: "hsl(var(--primary-foreground))",
17 | },
18 | secondary: {
19 | DEFAULT: "hsl(var(--secondary))",
20 | foreground: "hsl(var(--secondary-foreground))",
21 | },
22 | destructive: {
23 | DEFAULT: "hsl(var(--destructive))",
24 | foreground: "hsl(var(--destructive-foreground))",
25 | },
26 | muted: {
27 | DEFAULT: "hsl(var(--muted))",
28 | foreground: "hsl(var(--muted-foreground))",
29 | },
30 | accent: {
31 | DEFAULT: "hsl(var(--accent))",
32 | foreground: "hsl(var(--accent-foreground))",
33 | },
34 | popover: {
35 | DEFAULT: "hsl(var(--popover))",
36 | foreground: "hsl(var(--popover-foreground))",
37 | },
38 | card: {
39 | DEFAULT: "hsl(var(--card))",
40 | foreground: "hsl(var(--card-foreground))",
41 | },
42 | },
43 | keyframes: {
44 | highlight: {
45 | '0%': { backgroundColor: 'rgba(59, 130, 246, 0.5)' },
46 | '100%': { backgroundColor: 'rgba(59, 130, 246, 0)' },
47 | }
48 | },
49 | animation: {
50 | highlight: 'highlight 1s ease-in-out',
51 | }
52 | },
53 | },
54 | plugins: [
55 | function ({ addUtilities }) {
56 | const newUtilities = {
57 | '.text-shadow': {
58 | textShadow: '0 1px 2px rgba(0, 0, 0, 0.2)',
59 | },
60 | };
61 | addUtilities(newUtilities, ['responsive', 'hover']);
62 | },
63 | ],
64 | }
--------------------------------------------------------------------------------
/tests/.exampleignore:
--------------------------------------------------------------------------------
1 | # .exampleignore
2 | webcam*.py
3 | o1_graph.py
4 | realtimewebcam.py
5 | webcam_activity_describer.py
6 | openaigym.py
7 | o1.py
8 | chord*
9 | server_example.py
10 | limbo.py
11 | providers/bedrock_ex.py
12 | azure_ex.py
13 | openrouter_ex.py
14 | vllm_ex.py
15 | *_ex.py
16 | bedrock_hello.py
17 | hello_postgres.py
18 | exa/exa.py
19 | exa.py
20 | wikipedia_mini_rag.py
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/tests/__init__.py
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import os
3 | from unittest.mock import patch
4 |
5 | @pytest.fixture(autouse=True)
6 | def setup_test_env():
7 | yield
8 |
--------------------------------------------------------------------------------
/tests/test_results.py:
--------------------------------------------------------------------------------
1 | from ell.evaluation.results import _ResultDatapoint, EvaluationResults, Label
2 | from ell.stores.models.evaluations import EvaluationLabelerType
3 | import numpy as np
4 |
5 | def test_evaluation_results_from_rowar_results():
6 | # Test that from_rowar_results correctly converts rowar_results to EvaluationResults
7 | rowar_results = [
8 | _ResultDatapoint(
9 | output=("output1", "id1"),
10 | labels=[
11 | Label(name="metric1", type=EvaluationLabelerType.METRIC, label=(0.95, "id1")),
12 | Label(name="annotation1", type=EvaluationLabelerType.ANNOTATION, label=("anno1", "id1")),
13 | Label(name="criterion", type=EvaluationLabelerType.CRITERION, label=(True, "id1"))
14 | ]
15 | ),
16 | _ResultDatapoint(
17 | output=("output2", "id2"),
18 | labels=[
19 | Label(name="metric1", type=EvaluationLabelerType.METRIC, label=(0.85, "id2")),
20 | Label(name="annotation1", type=EvaluationLabelerType.ANNOTATION, label=("anno2", "id2")),
21 | Label(name="criterion", type=EvaluationLabelerType.CRITERION, label=(False, "id2"))
22 | ]
23 | ),
24 | ]
25 | results = EvaluationResults.from_rowar_results(rowar_results)
26 |
27 | assert results.outputs == ["output1", "output2"]
28 | assert (results.metrics["metric1"] == np.array([0.95, 0.85])).all()
29 | assert (results.annotations["annotation1"] == np.array(["anno1", "anno2"])).all()
30 | assert (results.criterion == np.array([True, False])).all()
31 |
32 | # Check invocation_ids
33 | assert results.invocation_ids is not None
34 | assert results.invocation_ids.outputs == ["id1", "id2"]
35 | assert (results.invocation_ids.metrics["metric1"] == np.array(["id1", "id2"])).all()
36 | assert (results.invocation_ids.annotations["annotation1"] == np.array(["id1", "id2"])).all()
37 | assert (results.invocation_ids.criterion == np.array(["id1", "id2"])).all()
38 |
--------------------------------------------------------------------------------
/tests/test_tools.py:
--------------------------------------------------------------------------------
1 |
2 | from typing import Dict
3 |
4 | from ell.types.message import ContentBlock, ToolResult
5 |
6 |
7 | def test_tool_json_dumping_behavior():
8 | from ell import tool
9 | import json
10 |
11 | # Create a mock tool function
12 | @tool(exempt_from_tracking=False)
13 | def mock_tool_function(data : Dict[str, str]):
14 | return data
15 |
16 | # Test case where result is a string and _invocation_origin is provided
17 | # with patch('json.dumps') as mock_json_dumps:
18 | result= mock_tool_function(
19 | # _invocation_origin="test_origin",
20 | _tool_call_id="tool_123",
21 | data={"key": "value"}
22 | )
23 | # Expect json.dumps to be called since result is a string and _invocation_origin is provided
24 | # mock_json_dumps.assert_called_once_with({"key": "value"})
25 | assert isinstance(result, ToolResult)
26 | assert result.tool_call_id == "tool_123"
27 | assert result.result == [ContentBlock(text=json.dumps({"key": "value"}))]
28 | # Test case where _invocation_origin is not provided
29 | @tool(exempt_from_tracking=False)
30 | def mock_tool_no_origin():
31 | return "Simple string result"
32 |
33 | result = mock_tool_no_origin(
34 | _tool_call_id="tool_789",
35 | )
36 | assert isinstance(result, ToolResult)
37 | assert result.tool_call_id == "tool_789"
38 | # XXX: We will json dump for now.
39 | assert result.result == [ContentBlock(text=json.dumps("Simple string result"))]
40 |
41 | # Test case where result is a list of ContentBlocks
42 | @tool(exempt_from_tracking=False)
43 | def mock_tool_content_blocks():
44 | return [ContentBlock(text="Block 1"), ContentBlock(text="Block 2")]
45 |
46 | result = mock_tool_content_blocks(
47 | _tool_call_id="tool_101",
48 | )
49 | # Expect json.dumps not to be called since result is already a list of ContentBlocks
50 |
51 | assert isinstance(result, ToolResult)
52 | assert result.tool_call_id == "tool_101"
53 | assert result.result == [ContentBlock(text="Block 1"), ContentBlock(text="Block 2")]
--------------------------------------------------------------------------------
/x/README.md:
--------------------------------------------------------------------------------
1 | This folder contains experimental clients for various LLM providers that may at some point be integrated into ell.
--------------------------------------------------------------------------------
/x/openai_realtime/.gitignore:
--------------------------------------------------------------------------------
1 | reference/
2 |
--------------------------------------------------------------------------------
/x/openai_realtime/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 OpenAI
4 | Copyright (c) 2024 William Guss
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 |
--------------------------------------------------------------------------------
/x/openai_realtime/examples/run_bot.py:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Configure logging
4 | log() {
5 | echo "$(date '+%Y-%m-%d %H:%M:%S') - $1"
6 | }
7 |
8 | run_discord_gpt4o() {
9 | while true; do
10 | log "INFO - Starting discord_gpt4o script..."
11 | python3 discord_gpt4o.py 2>&1 | while IFS= read -r line; do
12 | echo "$line"
13 | if echo "$line" | grep -qE "Unknown ssrc|error|Traceback \(most recent call last\):"; then
14 | log "WARNING - Detected 'Unknown ssrc', error, or error trace in output. Restarting..."
15 | pkill -f "python3 discord_gpt4o.py"
16 | break
17 | fi
18 | done
19 |
20 | if [ $? -ne 0 ]; then
21 | log "ERROR - discord_gpt4o encountered an error or needs restart. Restarting..."
22 | else
23 | log "INFO - discord_gpt4o finished successfully. Restarting..."
24 | fi
25 |
26 | log "INFO - Waiting for 5 seconds before restarting..."
27 | sleep 5
28 | done
29 | }
30 |
31 | log "INFO - Starting run_discord_gpt4o script"
32 | run_discord_gpt4o
33 |
--------------------------------------------------------------------------------
/x/openai_realtime/examples/run_bot.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Configure logging
4 | log() {
5 | echo "$(date '+%Y-%m-%d %H:%M:%S') - $1"
6 | }
7 |
8 | run_discord_gpt4o() {
9 | while true; do
10 | log "INFO - Starting discord_gpt4o script..."
11 | python3 discord_gpt4o.py 2>&1 | while IFS= read -r line; do
12 | echo "$line"
13 | if echo "$line" | grep -qE "Unknown ssrc|error|Traceback \(most recent call last\):"; then
14 | log "WARNING - Detected 'Unknown ssrc', error, or error trace in output. Restarting..."
15 | pkill -f "python3 discord_gpt4o.py"
16 | break
17 | fi
18 | done
19 |
20 | if [ $? -ne 0 ]; then
21 | log "ERROR - discord_gpt4o encountered an error or needs restart. Restarting..."
22 | else
23 | log "INFO - discord_gpt4o finished successfully. Restarting..."
24 | fi
25 |
26 | log "INFO - Waiting for 5 seconds before restarting..."
27 | sleep 5
28 | done
29 | }
30 |
31 | log "INFO - Starting run_discord_gpt4o script"
32 | run_discord_gpt4o
33 |
--------------------------------------------------------------------------------
/x/openai_realtime/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "openai-realtime"
3 | version = "0.0.1"
4 | description = "A real-time API client for OpenAI using websockets"
5 | authors = ["William Guss"]
6 | license = "MIT"
7 | readme = "README.md"
8 | packages = [{include = "openai_realtime", from = "src"}]
9 |
10 | [tool.poetry.dependencies]
11 | python = "^3.12" # Updated to match your Python version
12 | websockets = "^10.4"
13 | aiohttp = "^3.8.4"
14 | asyncio = "^3.4.3"
15 | pydub = "^0.25.1" # Note: This package has a deprecation warning for Python 3.13+
16 |
17 | [tool.poetry.dev-dependencies]
18 | pytest = "^7.3.0"
19 | pytest-asyncio = "^0.21.0" # Added pytest-asyncio
20 | black = "^23.3.0"
21 | isort = "^5.12.0"
22 |
23 | [build-system]
24 | requires = ["poetry-core>=1.0.0"]
25 | build-backend = "poetry.core.masonry.api"
26 |
27 | [tool.black]
28 | line-length = 88
29 | target-version = ['py312'] # Updated to Python 3.12
30 |
31 | [tool.isort]
32 | profile = "black"
33 | line_length = 88
34 |
35 | [tool.pytest.ini_options]
36 | asyncio_mode = "auto" # Added configuration for pytest-asyncio
37 |
--------------------------------------------------------------------------------
/x/openai_realtime/src/openai_realtime/__init__.py:
--------------------------------------------------------------------------------
1 | from .client import RealtimeClient
2 | from .api import RealtimeAPI
3 | from .conversation import RealtimeConversation
4 | from .event_handler import RealtimeEventHandler
5 | from .utils import RealtimeUtils
6 |
7 | __all__ = [
8 | "RealtimeClient",
9 | "RealtimeAPI",
10 | "RealtimeConversation",
11 | "RealtimeEventHandler",
12 | "RealtimeUtils"
13 | ]
14 |
--------------------------------------------------------------------------------
/x/openai_realtime/src/openai_realtime/event_handler.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from typing import Callable, Dict, List, Any
3 |
4 | class RealtimeEventHandler:
5 | def __init__(self):
6 | self.event_handlers: Dict[str, List[Callable]] = {}
7 | self.next_event_handlers: Dict[str, List[Callable]] = {}
8 |
9 | def clear_event_handlers(self):
10 | self.event_handlers.clear()
11 | self.next_event_handlers.clear()
12 | return True
13 |
14 | def on(self, event_name: str, callback: Callable = None):
15 | def decorator(func):
16 | if event_name not in self.event_handlers:
17 | self.event_handlers[event_name] = []
18 | self.event_handlers[event_name].append(func)
19 | return func
20 |
21 | if callback is None:
22 | return decorator
23 | else:
24 | return decorator(callback)
25 |
26 | def on_next(self, event_name: str, callback: Callable):
27 | if event_name not in self.next_event_handlers:
28 | self.next_event_handlers[event_name] = []
29 | self.next_event_handlers[event_name].append(callback)
30 |
31 | def off(self, event_name: str, callback: Callable = None):
32 | if event_name in self.event_handlers:
33 | if callback:
34 | self.event_handlers[event_name].remove(callback)
35 | else:
36 | del self.event_handlers[event_name]
37 | return True
38 |
39 | def off_next(self, event_name: str, callback: Callable = None):
40 | if event_name in self.next_event_handlers:
41 | if callback:
42 | self.next_event_handlers[event_name].remove(callback)
43 | else:
44 | del self.next_event_handlers[event_name]
45 | return True
46 |
47 | async def wait_for_next(self, event_name: str, timeout: float = None):
48 | next_event = None
49 | def set_next_event(event):
50 | nonlocal next_event
51 | next_event = event
52 |
53 | self.on_next(event_name, set_next_event)
54 |
55 | start_time = asyncio.get_event_loop().time()
56 | while not next_event:
57 | if timeout and asyncio.get_event_loop().time() - start_time > timeout:
58 | return None
59 | await asyncio.sleep(0.001)
60 |
61 | return next_event
62 |
63 | def dispatch(self, event_name: str, event: Any):
64 | handlers = self.event_handlers.get(event_name, []).copy()
65 | for handler in handlers:
66 | handler(event)
67 |
68 | next_handlers = self.next_event_handlers.pop(event_name, [])
69 | for next_handler in next_handlers:
70 | next_handler(event)
71 |
72 | return True
--------------------------------------------------------------------------------
/x/openai_realtime/src/openai_realtime/utils.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import numpy as np
3 |
4 | class RealtimeUtils:
5 | @staticmethod
6 | def float_to_16bit_pcm(float32_array):
7 | int16_array = (np.clip(float32_array, -1, 1) * 32767).astype(np.int16)
8 | return int16_array.tobytes()
9 |
10 | @staticmethod
11 | def base64_to_array_buffer(base64_string):
12 | return base64.b64decode(base64_string)
13 |
14 | @staticmethod
15 | def array_buffer_to_base64(array_buffer):
16 | if isinstance(array_buffer, np.ndarray):
17 | if array_buffer.dtype == np.float32:
18 | array_buffer = RealtimeUtils.float_to_16bit_pcm(array_buffer)
19 | elif array_buffer.dtype == np.int16:
20 | array_buffer = array_buffer.tobytes()
21 | return base64.b64encode(array_buffer).decode('utf-8')
22 |
23 | @staticmethod
24 | def merge_int16_arrays(left, right):
25 | if isinstance(left, bytes):
26 | left = np.frombuffer(left, dtype=np.int16)
27 | if isinstance(right, bytes):
28 | right = np.frombuffer(right, dtype=np.int16)
29 | if not isinstance(left, np.ndarray) or not isinstance(right, np.ndarray):
30 | raise ValueError("Both items must be numpy arrays or bytes objects")
31 | return np.concatenate((left, right))
32 |
33 | @staticmethod
34 | def generate_id(prefix, length=21):
35 | import random
36 | chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
37 | return prefix + ''.join(random.choice(chars) for _ in range(length - len(prefix)))
--------------------------------------------------------------------------------
/x/openai_realtime/tests/samples/toronto.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MadcowD/ell/298dff343429f24118cb224c7cc0322d5b02bef7/x/openai_realtime/tests/samples/toronto.mp3
--------------------------------------------------------------------------------