├── .flake8
├── .github
├── _wf
│ └── lint.yml
├── dependabot.yml
└── workflows
│ ├── release.yml
│ ├── security.yml
│ └── test.yml
├── .gitignore
├── .pre-commit-config.yaml
├── LICENSE
├── Readme.md
├── examples
├── __init__.py
├── app.py
├── ex1.py
├── ex10_cc.py
├── ex11_create_extraction_chain.py
├── ex12.py
├── ex13.py
├── ex14.py
├── ex2.py
├── ex3.py
├── ex4.py
├── ex5.py
├── ex6.py
├── ex7_agent.py
├── ex8.py
├── ex9_double_chain.py
└── vercel.py
├── langcorn
├── __init__.py
├── __main__.py
└── server
│ ├── __init__.py
│ ├── api.py
│ └── test_api.py
├── poetry.lock
├── pyproject.toml
├── requirements.txt
├── tests.http
└── vercel.json
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length = 160
3 | per-file-ignores =
4 | # Ignore docstring lints for tests
5 | *: D100, D101, D102, D103, D104, D107
6 |
--------------------------------------------------------------------------------
/.github/_wf/lint.yml:
--------------------------------------------------------------------------------
1 | name: lint
2 |
3 | on:
4 | push:
5 | branches: [main]
6 | pull_request:
7 |
8 | env:
9 | POETRY_VERSION: "1.4.0"
10 | OPENAI_API_KEY: "sk-fake"
11 |
12 | jobs:
13 | build:
14 | runs-on: ubuntu-latest
15 | strategy:
16 | matrix:
17 | python-version:
18 | - "3.10"
19 | - "3.11"
20 | steps:
21 | - uses: actions/checkout@v3
22 | - name: Install poetry
23 | run: |
24 | pipx install poetry==$POETRY_VERSION
25 | - name: Set up Python ${{ matrix.python-version }}
26 | uses: actions/setup-python@v4
27 | with:
28 | python-version: ${{ matrix.python-version }}
29 | cache: poetry
30 | - name: Install dependencies
31 | run: |
32 | poetry install
33 | - name: Analysing the code with our lint
34 | run: |
35 | poetry run pre-commit run --all-files
36 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # To get started with Dependabot version updates, you'll need to specify which
2 | # package ecosystems to update and where the package manifests are located.
3 | # Please see the documentation for all configuration options:
4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
5 |
6 | version: 2
7 | updates:
8 | - package-ecosystem: "pip" # See documentation for possible values
9 | directory: "/" # Location of package manifests
10 | schedule:
11 | interval: "weekly"
12 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: release
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | # Sequence of patterns matched against refs/tags
8 | tags:
9 | - 0.*
10 |
11 | env:
12 | POETRY_VERSION: "1.4.0"
13 |
14 | jobs:
15 | if_release:
16 | if: |
17 | true
18 | runs-on: ubuntu-latest
19 | steps:
20 | - uses: actions/checkout@v3
21 | - name: Install poetry
22 | run: pipx install poetry==$POETRY_VERSION
23 | - name: Set up Python 3.10
24 | uses: actions/setup-python@v4
25 | with:
26 | python-version: "3.10"
27 | cache: "poetry"
28 | - name: Build project for distribution
29 | run: poetry build --format sdist
30 | - name: Check Version
31 | id: check-version
32 | run: |
33 | echo version=$(poetry version --short) >> $GITHUB_OUTPUT
34 | - name: Publish to PyPI
35 | env:
36 | POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
37 | run: |
38 | poetry publish --skip-existing
39 |
--------------------------------------------------------------------------------
/.github/workflows/security.yml:
--------------------------------------------------------------------------------
1 | name: PyCharm Python Security Scanner
2 |
3 | on:
4 | schedule:
5 | - cron: "0 0 * * *"
6 |
7 | jobs:
8 | security_checks:
9 | runs-on: ubuntu-latest
10 | name: Execute the pycharm-security action
11 | steps:
12 | - uses: actions/checkout@v1
13 | - name: PyCharm Python Security Scanner
14 | uses: tonybaloney/pycharm-security@1.19.0
15 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: test
2 |
3 | on:
4 | push:
5 | branches: [main]
6 | pull_request:
7 | branches: [main]
8 |
9 | env:
10 | POETRY_VERSION: "1.4.0"
11 | OPENAI_API_KEY: "sk-fake"
12 |
13 | jobs:
14 | build:
15 | runs-on: ubuntu-latest
16 | strategy:
17 | matrix:
18 | python-version:
19 | - "3.9"
20 | - "3.10"
21 | - "3.11"
22 | steps:
23 | - uses: actions/checkout@v3
24 | - name: Install poetry
25 | run: pipx install poetry==$POETRY_VERSION
26 | - name: Set up Python ${{ matrix.python-version }}
27 | uses: actions/setup-python@v4
28 | with:
29 | python-version: ${{ matrix.python-version }}
30 | cache: "poetry"
31 | - name: Install dependencies
32 | run: poetry install
33 | - name: Run unit tests
34 | run: |
35 | poetry run pytest .
36 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules/*
2 | */ponicode/*
3 | *.png
4 | *.jpg
5 | *.onnx
6 | *.sqlite
7 | *.csv
8 | *.json
9 | # worker/
10 | .pytest_cache
11 |
12 | # Logs
13 | logs
14 | *.log
15 | npm-debug.log*
16 | yarn-debug.log*
17 | yarn-error.log*
18 | lerna-debug.log*
19 |
20 | # Mac
21 | .DS_Store
22 |
23 | # VSCode
24 | .vscode
25 | .chroma
26 | .ruff_cache
27 |
28 | # Diagnostic reports (https://nodejs.org/api/report.html)
29 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
30 |
31 | # Runtime data
32 | pids
33 | *.pid
34 | *.seed
35 | *.pid.lock
36 |
37 | # Directory for instrumented libs generated by jscoverage/JSCover
38 | lib-cov
39 |
40 | # Coverage directory used by tools like istanbul
41 | coverage
42 | *.lcov
43 |
44 | # nyc test coverage
45 | .nyc_output
46 |
47 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
48 | .grunt
49 |
50 | # Bower dependency directory (https://bower.io/)
51 | bower_components
52 |
53 | # node-waf configuration
54 | .lock-wscript
55 |
56 | # Compiled binary addons (https://nodejs.org/api/addons.html)
57 | build/Release
58 |
59 | # Dependency directories
60 | node_modules/
61 | jspm_packages/
62 |
63 | # TypeScript v1 declaration files
64 | typings/
65 |
66 | # TypeScript cache
67 | *.tsbuildinfo
68 |
69 | # Optional npm cache directory
70 | .npm
71 |
72 | # Optional eslint cache
73 | .eslintcache
74 |
75 | # Microbundle cache
76 | .rpt2_cache/
77 | .rts2_cache_cjs/
78 | .rts2_cache_es/
79 | .rts2_cache_umd/
80 |
81 | # Optional REPL history
82 | .node_repl_history
83 |
84 | # Output of 'npm pack'
85 | *.tgz
86 |
87 | # Yarn Integrity file
88 | .yarn-integrity
89 |
90 | # dotenv environment variables file
91 | .env
92 | .env.test
93 |
94 | # parcel-bundler cache (https://parceljs.org/)
95 | .cache
96 |
97 | # Next.js build output
98 | .next
99 |
100 | # Nuxt.js build / generate output
101 | .nuxt
102 | dist
103 |
104 | # Gatsby files
105 | .cache/
106 | # Comment in the public line in if your project uses Gatsby and *not* Next.js
107 | # https://nextjs.org/blog/next-9-1#public-directory-support
108 | # public
109 |
110 | # vuepress build output
111 | .vuepress/dist
112 |
113 | # Serverless directories
114 | .serverless/
115 |
116 | # FuseBox cache
117 | .fusebox/
118 |
119 | # DynamoDB Local files
120 | .dynamodb/
121 |
122 | # TernJS port file
123 | .tern-port
124 | # Byte-compiled / optimized / DLL files
125 | __pycache__/
126 | *.py[cod]
127 | *$py.class
128 | notebooks
129 |
130 | # C extensions
131 | *.so
132 |
133 | # Distribution / packaging
134 | .Python
135 | build/
136 | develop-eggs/
137 | dist/
138 | downloads/
139 | eggs/
140 | .eggs/
141 | lib/
142 | lib64/
143 | parts/
144 | sdist/
145 | var/
146 | wheels/
147 | pip-wheel-metadata/
148 | share/python-wheels/
149 | *.egg-info/
150 | .installed.cfg
151 | *.egg
152 | MANIFEST
153 |
154 | # PyInstaller
155 | # Usually these files are written by a python script from a template
156 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
157 | *.manifest
158 | *.spec
159 |
160 | # Installer logs
161 | pip-log.txt
162 | pip-delete-this-directory.txt
163 |
164 | # Unit test / coverage reports
165 | htmlcov/
166 | .tox/
167 | .nox/
168 | .coverage
169 | .coverage.*
170 | .cache
171 | nosetests.xml
172 | coverage.xml
173 | *.cover
174 | *.py,cover
175 | .hypothesis/
176 | .pytest_cache/
177 |
178 | # Translations
179 | *.mo
180 | *.pot
181 |
182 | # Django stuff:
183 | *.log
184 | local_settings.py
185 | db.sqlite3
186 | db.sqlite3-journal
187 |
188 | # Flask stuff:
189 | instance/
190 | .webassets-cache
191 |
192 | # Scrapy stuff:
193 | .scrapy
194 |
195 | # Sphinx documentation
196 | docs/_build/
197 |
198 | # PyBuilder
199 | target/
200 |
201 | # Jupyter Notebook
202 | .ipynb_checkpoints
203 |
204 | # IPython
205 | profile_default/
206 | ipython_config.py
207 |
208 | # pyenv
209 | .python-version
210 |
211 | # pipenv
212 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
213 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
214 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
215 | # install all needed dependencies.
216 | #Pipfile.lock
217 |
218 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
219 | __pypackages__/
220 |
221 | # Celery stuff
222 | celerybeat-schedule
223 | celerybeat.pid
224 |
225 | # SageMath parsed files
226 | *.sage.py
227 |
228 | # Environments
229 | .env
230 | .venv
231 | env/
232 | venv/
233 | ENV/
234 | env.bak/
235 | venv.bak/
236 |
237 | # Spyder project settings
238 | .spyderproject
239 | .spyproject
240 |
241 | # Rope project settings
242 | .ropeproject
243 |
244 | # mkdocs documentation
245 | /site
246 |
247 | # mypy
248 | .mypy_cache/
249 | .dmypy.json
250 | dmypy.json
251 |
252 | # Poetry
253 | .testenv/*
254 | .vercel
255 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 |
2 | default_language_version:
3 | python: python3
4 |
5 | repos:
6 |
7 | - repo: https://github.com/asottile/pyupgrade
8 | rev: v2.31.1
9 | hooks:
10 | - id: pyupgrade
11 | args: [--py39-plus]
12 |
13 | - repo: https://github.com/psf/black
14 | rev: 22.8.0
15 | hooks:
16 | - id: black
17 | language_version: python3.9
18 |
19 | - repo: https://github.com/pycqa/flake8
20 | rev: 5.0.4
21 | hooks:
22 | - id: flake8
23 | language_version: python3
24 | additional_dependencies: [flake8-docstrings]
25 |
26 | - repo: https://github.com/PyCQA/isort
27 | rev: 5.12.0
28 | hooks:
29 | - id: isort
30 | args: [--profile, black]
31 |
32 | - repo: https://github.com/pre-commit/pre-commit-hooks
33 | rev: v4.3.0
34 | hooks:
35 | - id: check-ast
36 | exclude: '^(third_party)/'
37 | - id: check-json
38 | exclude: '.devcontainer/devcontainer.json' # this supports JSON with comments
39 | - id: check-toml
40 | - id: check-xml
41 | - id: check-yaml
42 | - id: check-merge-conflict
43 | - id: check-symlinks
44 | - id: check-executables-have-shebangs
45 | - id: check-shebang-scripts-are-executable
46 | - id: check-added-large-files
47 | args: ['--maxkb=100']
48 |
49 | - repo: https://github.com/executablebooks/mdformat
50 | rev: 0.7.14
51 | hooks:
52 | - id: mdformat
53 | name: mdformat
54 | entry: mdformat .
55 | language_version: python3
56 |
57 |
58 | - repo: https://github.com/myint/docformatter
59 | rev: v1.4
60 | hooks:
61 | - id: docformatter
62 | args: [--in-place]
63 |
64 | - repo: https://github.com/hadialqattan/pycln
65 | rev: v2.1.1 # Possible releases: https://github.com/hadialqattan/pycln/releases
66 | hooks:
67 | - id: pycln
68 |
69 | - repo: https://github.com/isidentical/teyit
70 | rev: 0.4.3
71 | hooks:
72 | - id: teyit
73 |
74 |
75 | - repo: https://github.com/python-poetry/poetry
76 | rev: '1.6.0'
77 | hooks:
78 | - id: poetry-check
79 | - id: poetry-lock
80 | name: validate poetry lock
81 | args:
82 | - --check
83 |
84 |
85 | - repo: https://github.com/codespell-project/codespell
86 | rev: v2.2.5
87 | hooks:
88 | - id: codespell
89 | exclude: '^(third_party/)|(poetry.lock)'
90 | args:
91 | # if you've got a short variable name that's getting flagged, add it here
92 | - -L bu,ro,te,ue,alo,hda,ois,nam,nams,ned,som,parm,setts,inout,warmup,bumb,nd,sie
93 | - --builtins clear,rare,informal,usage,code,names,en-GB_to_en-US
94 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License
2 |
3 | Copyright (c) Alexander Miasoiedov / Myasoedov
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in
13 | all copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 | THE SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Readme.md:
--------------------------------------------------------------------------------
1 | # Langcorn
2 |
3 | LangCorn is an API server that enables you to serve LangChain models and pipelines with ease, leveraging the power of FastAPI for a robust and efficient experience.
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 | ## Features
16 |
17 | - Easy deployment of LangChain models and pipelines
18 | - Ready to use auth functionality
19 | - High-performance FastAPI framework for serving requests
20 | - Scalable and robust solution for language processing applications
21 | - Supports custom pipelines and processing
22 | - Well-documented RESTful API endpoints
23 | - Asynchronous processing for faster response times
24 |
25 | ## 📦 Installation
26 |
27 | To get started with LangCorn, simply install the package using pip:
28 |
29 | ```shell
30 |
31 | pip install langcorn
32 | ```
33 |
34 | ## ⛓️ Quick Start
35 |
36 | Example LLM chain ex1.py
37 |
38 | ```python
39 |
40 | import os
41 |
42 | from langchain import LLMMathChain, OpenAI
43 |
44 | os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY", "sk-********")
45 |
46 | llm = OpenAI(temperature=0)
47 | chain = LLMMathChain(llm=llm, verbose=True)
48 | ```
49 |
50 | Run your LangCorn FastAPI server:
51 |
52 | ```shell
53 | langcorn server examples.ex1:chain
54 |
55 |
56 | [INFO] 2023-04-18 14:34:56.32 | api:create_service:75 | Creating service
57 | [INFO] 2023-04-18 14:34:57.51 | api:create_service:85 | lang_app='examples.ex1:chain':LLMChain(['product'])
58 | [INFO] 2023-04-18 14:34:57.51 | api:create_service:104 | Serving
59 | [INFO] 2023-04-18 14:34:57.51 | api:create_service:106 | Endpoint: /docs
60 | [INFO] 2023-04-18 14:34:57.51 | api:create_service:106 | Endpoint: /examples.ex1/run
61 | INFO: Started server process [27843]
62 | INFO: Waiting for application startup.
63 | INFO: Application startup complete.
64 | INFO: Uvicorn running on http://127.0.0.1:8718 (Press CTRL+C to quit)
65 | ```
66 |
67 | or as an alternative
68 |
69 | ```shell
70 | python -m langcorn server examples.ex1:chain
71 |
72 | ```
73 |
74 | Run multiple chains
75 |
76 | ```shell
77 | python -m langcorn server examples.ex1:chain examples.ex2:chain
78 |
79 |
80 | [INFO] 2023-04-18 14:35:21.11 | api:create_service:75 | Creating service
81 | [INFO] 2023-04-18 14:35:21.82 | api:create_service:85 | lang_app='examples.ex1:chain':LLMChain(['product'])
82 | [INFO] 2023-04-18 14:35:21.82 | api:create_service:85 | lang_app='examples.ex2:chain':SimpleSequentialChain(['input'])
83 | [INFO] 2023-04-18 14:35:21.82 | api:create_service:104 | Serving
84 | [INFO] 2023-04-18 14:35:21.82 | api:create_service:106 | Endpoint: /docs
85 | [INFO] 2023-04-18 14:35:21.82 | api:create_service:106 | Endpoint: /examples.ex1/run
86 | [INFO] 2023-04-18 14:35:21.82 | api:create_service:106 | Endpoint: /examples.ex2/run
87 | INFO: Started server process [27863]
88 | INFO: Waiting for application startup.
89 | INFO: Application startup complete.
90 | INFO: Uvicorn running on http://127.0.0.1:8718 (Press CTRL+C to quit)
91 | ```
92 |
93 | Import the necessary packages and create your FastAPI app:
94 |
95 | ```python
96 |
97 | from fastapi import FastAPI
98 | from langcorn import create_service
99 |
100 | app:FastAPI = create_service("examples.ex1:chain")
101 | ```
102 |
103 | Multiple chains
104 |
105 | ```python
106 |
107 | from fastapi import FastAPI
108 | from langcorn import create_service
109 |
110 | app:FastAPI = create_service("examples.ex2:chain", "examples.ex1:chain")
111 | ```
112 |
113 | or
114 |
115 | ```python
116 | from fastapi import FastAPI
117 | from langcorn import create_service
118 |
119 | app: FastAPI = create_service(
120 | "examples.ex1:chain",
121 | "examples.ex2:chain",
122 | "examples.ex3:chain",
123 | "examples.ex4:sequential_chain",
124 | "examples.ex5:conversation",
125 | "examples.ex6:conversation_with_summary",
126 | "examples.ex7_agent:agent",
127 | )
128 |
129 | ```
130 |
131 | Run your LangCorn FastAPI server:
132 |
133 | ```shell
134 |
135 | uvicorn main:app --host 0.0.0.0 --port 8000
136 | ```
137 |
138 | Now, your LangChain models and pipelines are accessible via the LangCorn API server.
139 |
140 | ## Docs
141 |
142 | Automatically served FastAPI doc
143 | [Live example](https://langcorn-ift9ub8zg-msoedov.vercel.app/docs#/) hosted on vercel.
144 |
145 | 
146 |
147 | ## Auth
148 |
149 | It possible to add a static api token auth by specifying `auth_token`
150 |
151 | ```shell
152 | python langcorn server examples.ex1:chain examples.ex2:chain --auth_token=api-secret-value
153 | ```
154 |
155 | or
156 |
157 | ```python
158 | app:FastAPI = create_service("examples.ex1:chain", auth_token="api-secret-value")
159 | ```
160 |
161 | ## Custom API KEYs
162 |
163 | ```shell
164 | POST http://0.0.0.0:3000/examples.ex6/run
165 | X-LLM-API-KEY: sk-******
166 | Content-Type: application/json
167 | ```
168 |
169 | ## Handling memory
170 |
171 | ```json
172 | {
173 | "history": "string",
174 | "input": "What is brain?",
175 | "memory": [
176 | {
177 | "type": "human",
178 | "data": {
179 | "content": "What is memory?",
180 | "additional_kwargs": {}
181 | }
182 | },
183 | {
184 | "type": "ai",
185 | "data": {
186 | "content": " Memory is the ability of the brain to store, retain, and recall information. It is the capacity to remember past experiences, facts, and events. It is also the ability to learn and remember new information.",
187 | "additional_kwargs": {}
188 | }
189 | }
190 | ]
191 | }
192 |
193 | ```
194 |
195 | Response:
196 |
197 | ```json
198 | {
199 | "output": " The brain is an organ in the human body that is responsible for controlling thought, memory, emotion, and behavior. It is composed of billions of neurons that communicate with each other through electrical and chemical signals. It is the most complex organ in the body and is responsible for all of our conscious and unconscious actions.",
200 | "error": "",
201 | "memory": [
202 | {
203 | "type": "human",
204 | "data": {
205 | "content": "What is memory?",
206 | "additional_kwargs": {}
207 | }
208 | },
209 | {
210 | "type": "ai",
211 | "data": {
212 | "content": " Memory is the ability of the brain to store, retain, and recall information. It is the capacity to remember past experiences, facts, and events. It is also the ability to learn and remember new information.",
213 | "additional_kwargs": {}
214 | }
215 | },
216 | {
217 | "type": "human",
218 | "data": {
219 | "content": "What is brain?",
220 | "additional_kwargs": {}
221 | }
222 | },
223 | {
224 | "type": "ai",
225 | "data": {
226 | "content": " The brain is an organ in the human body that is responsible for controlling thought, memory, emotion, and behavior. It is composed of billions of neurons that communicate with each other through electrical and chemical signals. It is the most complex organ in the body and is responsible for all of our conscious and unconscious actions.",
227 | "additional_kwargs": {}
228 | }
229 | }
230 | ]
231 | }
232 | ```
233 |
234 | ## LLM kwargs
235 |
236 | To override the default LLM params per request
237 |
238 | ```shell
239 | POST http://0.0.0.0:3000/examples.ex1/run
240 | X-LLM-API-KEY: sk-******
241 | X-LLM-TEMPERATURE: 0.7
242 | X-MAX-TOKENS: 256
243 | X-MODEL-NAME: gpt5
244 | Content-Type: application/json
245 | ```
246 |
247 | ## Custom run function
248 |
249 | See ex12.py
250 |
251 | ```python
252 |
253 | chain = LLMChain(llm=llm, prompt=prompt, verbose=True)
254 |
255 |
256 | # Run the chain only specifying the input variable.
257 |
258 |
259 | def run(query: str) -> Joke:
260 | output = chain.run(query)
261 | return parser.parse(output)
262 |
263 | app: FastAPI = create_service("examples.ex12:run")
264 |
265 | ```
266 |
267 | ## Documentation
268 |
269 | For more detailed information on how to use LangCorn, including advanced features and customization options, please refer to the official documentation.
270 |
271 | ## 👋 Contributing
272 |
273 | Contributions to LangCorn are welcome! If you'd like to contribute, please follow these steps:
274 |
275 | - Fork the repository on GitHub
276 | - Create a new branch for your changes
277 | - Commit your changes to the new branch
278 | - Push your changes to the forked repository
279 | - Open a pull request to the main LangCorn repository
280 |
281 | Before contributing, please read the contributing guidelines.
282 |
283 | ## License
284 |
285 | LangCorn is released under the MIT License.
286 |
--------------------------------------------------------------------------------
/examples/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/msoedov/langcorn/f7fde47ee609c0623e7cc629eb01348bde3cf9c1/examples/__init__.py
--------------------------------------------------------------------------------
/examples/app.py:
--------------------------------------------------------------------------------
1 | from fastapi import FastAPI
2 |
3 | from langcorn import create_service
4 |
5 | app: FastAPI = create_service(
6 | "examples.ex1:chain",
7 | "examples.ex2:chain",
8 | "examples.ex3:chain",
9 | "examples.ex4:sequential_chain",
10 | "examples.ex5:conversation",
11 | "examples.ex6:conversation_with_summary",
12 | "examples.ex7_agent:agent",
13 | "examples.ex8:qa",
14 | "examples.ex9_double_chain:chain1",
15 | "examples.ex9_double_chain:chain2",
16 | "examples.ex10_cc:chain",
17 | "examples.ex12:run",
18 | # "examples.ex13:chain",
19 | "examples.ex14:chain",
20 | )
21 |
--------------------------------------------------------------------------------
/examples/ex1.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from langchain.chains import LLMChain
4 | from langchain.llms import OpenAI
5 | from langchain.prompts import PromptTemplate
6 |
7 | os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY", "sk-********")
8 |
9 | llm = OpenAI(temperature=0.9)
10 | prompt = PromptTemplate(
11 | input_variables=["product"],
12 | template="What is a good name for a company that makes {product}?",
13 | )
14 |
15 |
16 | chain = LLMChain(llm=llm, prompt=prompt)
17 |
18 | if __name__ == "__main__":
19 | # Run the chain only specifying the input variable.
20 | print(chain.run("colorful socks"))
21 |
--------------------------------------------------------------------------------
/examples/ex10_cc.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Optional
2 |
3 | from langchain.callbacks.manager import CallbackManagerForChainRun
4 | from langchain.chains.base import Chain
5 |
6 |
7 | class CustomChain(Chain):
8 | input_key: str = "input"
9 | output_key: str = "output"
10 |
11 | def _call(
12 | self,
13 | inputs: dict[str, Any],
14 | run_manager: Optional[CallbackManagerForChainRun] = None,
15 | ) -> dict[str, str]:
16 | print("input:", inputs)
17 | return {self.output_key: "Hello", "other": "test"}
18 |
19 | @property
20 | def input_keys(self) -> list[str]:
21 | return [self.input_key]
22 |
23 | @property
24 | def output_keys(self) -> list[str]:
25 | """Meta private."""
26 | return [self.output_key, "other"]
27 |
28 |
29 | chain = CustomChain()
30 |
--------------------------------------------------------------------------------
/examples/ex11_create_extraction_chain.py:
--------------------------------------------------------------------------------
1 | from langchain.chains import create_extraction_chain
2 | from langchain.llms import OpenAI
3 |
4 | schema = {
5 | "properties": {
6 | "name": {"type": "string"},
7 | "height": {"type": "integer"},
8 | "hair_color": {"type": "string"},
9 | },
10 | "required": ["name", "height"],
11 | }
12 |
13 | # Input
14 | inp = """Alex is 5 feet tall. Claudia is 1 feet taller Alex and jumps higher than him. Claudia is a brunette and Alex is blonde."""
15 |
16 | # Run chain
17 |
18 | llm = OpenAI(temperature=0)
19 |
20 | chain = create_extraction_chain(schema, llm, verbose=True)
21 |
22 | if __name__ == "__main__":
23 | chain.run(input=inp)
24 |
--------------------------------------------------------------------------------
/examples/ex12.py:
--------------------------------------------------------------------------------
1 | from langchain.chains import LLMChain
2 | from langchain.llms import OpenAI
3 | from langchain.output_parsers import PydanticOutputParser
4 | from langchain.prompts import PromptTemplate
5 | from pydantic import BaseModel, Field, validator
6 |
7 |
8 | class Joke(BaseModel):
9 | setup: str = Field(description="question to set up a joke")
10 | punchline: str = Field(description="answer to resolve the joke")
11 |
12 | # You can add custom validation logic easily with Pydantic.
13 | @validator("setup")
14 | def question_ends_with_question_mark(cls, field):
15 | if field[-1] != "?":
16 | raise ValueError("Badly formed question!")
17 | return field
18 |
19 |
20 | llm = OpenAI(temperature=0)
21 | joke_query = "Tell me a joke."
22 |
23 | parser = PydanticOutputParser(pydantic_object=Joke)
24 |
25 | prompt = PromptTemplate(
26 | template="Answer the user query.\n{format_instructions}\n{query}\n",
27 | input_variables=["query"],
28 | partial_variables={"format_instructions": parser.get_format_instructions()},
29 | )
30 |
31 | _input = prompt.format_prompt(query=joke_query)
32 |
33 | chain = LLMChain(llm=llm, prompt=prompt, verbose=True)
34 |
35 |
36 | # Run the chain only specifying the input variable.
37 |
38 |
39 | def run(query: str) -> Joke:
40 | output = chain.run(query)
41 | return parser.parse(output)
42 |
43 |
44 | if __name__ == "__main__":
45 | print(run(joke_query))
46 |
--------------------------------------------------------------------------------
/examples/ex13.py:
--------------------------------------------------------------------------------
1 | import langchain
2 | from langchain.cache import InMemoryCache
3 | from langchain.chains import LLMChain
4 | from langchain.llms import OpenAI
5 | from langchain.prompts import PromptTemplate
6 |
7 | langchain.llm_cache = InMemoryCache()
8 |
9 |
10 | llm = OpenAI(temperature=0.9)
11 | prompt = PromptTemplate(
12 | input_variables=["product"],
13 | template="What is a good name for a company that makes {product}?",
14 | )
15 |
16 |
17 | chain = LLMChain(llm=llm, prompt=prompt)
18 |
19 | if __name__ == "__main__":
20 | # Run the chain only specifying the input variable.
21 | print(chain.run("colorful socks"))
22 |
--------------------------------------------------------------------------------
/examples/ex14.py:
--------------------------------------------------------------------------------
1 | from langchain.chains import ConversationalRetrievalChain
2 | from langchain.chat_models import ChatOpenAI
3 | from langchain.document_loaders import TextLoader
4 | from langchain.embeddings.fake import FakeEmbeddings
5 | from langchain.text_splitter import CharacterTextSplitter
6 | from langchain.vectorstores import Chroma
7 |
8 | loader = TextLoader("Readme.md")
9 | documents = loader.load()
10 | text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
11 | texts = text_splitter.split_documents(documents)
12 |
13 | embeddings = FakeEmbeddings(size=1504)
14 | docsearch = Chroma.from_documents(texts, embeddings)
15 |
16 | chain = ConversationalRetrievalChain.from_llm(
17 | llm=ChatOpenAI(model="gpt-3.5-turbo"),
18 | retriever=docsearch.as_retriever(search_kwargs={"k": 1}),
19 | )
20 |
21 |
22 | if __name__ == "__main__":
23 | # Run the chain only specifying the input variable.
24 | print(chain.run(question="colorful socks", chat_history=[]))
25 |
--------------------------------------------------------------------------------
/examples/ex2.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from langchain.chains import LLMChain, SimpleSequentialChain
4 | from langchain.llms import OpenAI
5 | from langchain.prompts import PromptTemplate
6 |
7 | os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY", "sk-********")
8 |
9 |
10 | # This is an LLMChain to write a synopsis given a title of a play.
11 | llm = OpenAI(temperature=0.7)
12 | synopsis_template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
13 |
14 | Title: {title}
15 | Playwright: This is a synopsis for the above play:"""
16 | synopsis_prompt_template = PromptTemplate(
17 | input_variables=["title"], template=synopsis_template
18 | )
19 | synopsis_chain = LLMChain(llm=llm, prompt=synopsis_prompt_template)
20 |
21 | # This is an LLMChain to write a review of a play given a synopsis.
22 | review_template = """You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.
23 |
24 | Play Synopsis:
25 | {synopsis}
26 | Review from a New York Times play critic of the above play:"""
27 | review_prompt_template = PromptTemplate(
28 | input_variables=["synopsis"], template=review_template
29 | )
30 | review_chain = LLMChain(llm=llm, prompt=review_prompt_template)
31 |
32 |
33 | chain = SimpleSequentialChain(chains=[synopsis_chain, review_chain], verbose=True)
34 |
35 | if __name__ == "__main__":
36 | review = chain.run("Tragedy at sunset on the beach")
37 |
--------------------------------------------------------------------------------
/examples/ex3.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from langchain.chains import LLMMathChain
4 | from langchain.llms import OpenAI
5 |
6 | os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY", "sk-********")
7 |
8 | llm = OpenAI(temperature=0)
9 | chain = LLMMathChain.from_llm(llm=llm, verbose=True)
10 |
11 | if __name__ == "__main__":
12 | chain.run("What is 13 raised to the .3432 power?")
13 |
--------------------------------------------------------------------------------
/examples/ex4.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from langchain.chains import LLMChain, LLMRequestsChain, SequentialChain
4 | from langchain.llms import OpenAI
5 | from langchain.prompts import PromptTemplate
6 |
7 | os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY", "sk-********")
8 |
9 |
10 | class RequestsChain(LLMRequestsChain):
11 | """Chain that hits a URL and then uses an LLM to parse results."""
12 |
13 | llm_chain: LLMChain = None
14 | requests_key: str = None
15 |
16 | def _call(self, inputs: dict[str, str]) -> dict[str, str]:
17 | from bs4 import BeautifulSoup
18 |
19 | url = inputs[self.input_key]
20 | res = self.requests_wrapper.get(url)
21 | # extract the text from the html
22 | soup = BeautifulSoup(res, "html.parser")
23 | return {self.output_key: soup.get_text()[: self.text_length]}
24 |
25 |
26 | requests_chain = RequestsChain(
27 | input_key="url",
28 | output_key="output",
29 | )
30 |
31 | search_template = """Between >>> and <<< are the raw search result text from google search html page.
32 | Extract the answer to the question '{query}'. Please cleanup the answer to remove any extra text unrelated to the answer.
33 |
34 | Use the format
35 | Extracted: answer
36 | >>> {output} <<<
37 | Extracted:"""
38 |
39 | llm = OpenAI()
40 | PROMPT = PromptTemplate(
41 | input_variables=["query", "output"],
42 | template=search_template,
43 | )
44 |
45 | llm_chain = LLMChain(
46 | llm=llm,
47 | prompt=PROMPT,
48 | output_key="text",
49 | )
50 |
51 |
52 | sequential_chain = SequentialChain(
53 | chains=[requests_chain, llm_chain],
54 | input_variables=["query", "url"],
55 | output_variables=["text"],
56 | verbose=True,
57 | )
58 | question = "IPL matches scheduled for Royal Challengers Bangalore in April"
59 |
60 | if __name__ == "__main__":
61 | sequential_chain.run(
62 | {
63 | "query": question,
64 | "url": "https://www.google.com/search?q=" + question.replace(" ", "+"),
65 | }
66 | )
67 |
--------------------------------------------------------------------------------
/examples/ex5.py:
--------------------------------------------------------------------------------
1 | from langchain.chains import ConversationChain
2 | from langchain.llms import OpenAI
3 | from langchain.memory import ConversationBufferMemory
4 | from langchain.prompts import (
5 | ChatPromptTemplate,
6 | HumanMessagePromptTemplate,
7 | MessagesPlaceholder,
8 | SystemMessagePromptTemplate,
9 | )
10 |
11 | prompt = ChatPromptTemplate.from_messages(
12 | [
13 | SystemMessagePromptTemplate.from_template(
14 | "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know." # noqa
15 | ),
16 | MessagesPlaceholder(variable_name="history"),
17 | HumanMessagePromptTemplate.from_template("{input}"),
18 | ]
19 | )
20 |
21 | llm = OpenAI(temperature=0)
22 | memory = ConversationBufferMemory(return_messages=True)
23 | conversation = ConversationChain(memory=memory, prompt=prompt, llm=llm)
24 |
25 | if __name__ == "__main__":
26 | print(conversation.run(input="Hi there!"))
27 |
--------------------------------------------------------------------------------
/examples/ex6.py:
--------------------------------------------------------------------------------
1 | from langchain.chains import ConversationChain
2 | from langchain.llms import OpenAI
3 | from langchain.memory import ConversationSummaryMemory
4 |
5 | llm = OpenAI(temperature=0)
6 | conversation_with_summary = ConversationChain(
7 | llm=llm, memory=ConversationSummaryMemory(llm=OpenAI()), verbose=True
8 | )
9 |
10 | if __name__ == "__main__":
11 | conversation_with_summary.predict(input="Hi, what's up?")
12 |
--------------------------------------------------------------------------------
/examples/ex7_agent.py:
--------------------------------------------------------------------------------
1 | from langchain.agents.tools import Tool
2 | from langchain.chains import LLMMathChain
3 | from langchain.chat_models import ChatOpenAI
4 | from langchain.llms import OpenAI
5 | from langchain_experimental.plan_and_execute import (
6 | PlanAndExecute,
7 | load_agent_executor,
8 | load_chat_planner,
9 | )
10 |
11 | llm = OpenAI(temperature=0)
12 | llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)
13 | tools = [
14 | Tool(
15 | name="Calculator",
16 | func=llm_math_chain.run,
17 | description="useful for when you need to answer questions about math",
18 | ),
19 | ]
20 |
21 | model = ChatOpenAI(temperature=0)
22 |
23 | planner = load_chat_planner(model)
24 | executor = load_agent_executor(model, tools, verbose=True)
25 | agent = PlanAndExecute(planner=planner, executor=executor, verbose=True)
26 |
27 |
28 | if __name__ == "__main__":
29 | agent.run(
30 | "Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?"
31 | )
32 |
--------------------------------------------------------------------------------
/examples/ex8.py:
--------------------------------------------------------------------------------
1 | from langchain.chains import RetrievalQA
2 | from langchain.document_loaders import TextLoader
3 | from langchain.embeddings.fake import FakeEmbeddings
4 | from langchain.llms import OpenAI
5 | from langchain.text_splitter import CharacterTextSplitter
6 | from langchain.vectorstores import Chroma
7 |
8 | loader = TextLoader("Readme.md")
9 | documents = loader.load()
10 | text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
11 | texts = text_splitter.split_documents(documents)
12 |
13 | embeddings = FakeEmbeddings(size=1504)
14 | docsearch = Chroma.from_documents(texts, embeddings)
15 |
16 |
17 | qa = RetrievalQA.from_chain_type(
18 | llm=OpenAI(),
19 | chain_type="stuff",
20 | retriever=docsearch.as_retriever(),
21 | return_source_documents=True,
22 | )
23 |
--------------------------------------------------------------------------------
/examples/ex9_double_chain.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from langchain.chains import LLMMathChain
4 | from langchain.llms import OpenAI
5 |
6 | os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY", "sk-********")
7 |
8 | llm = OpenAI(temperature=0)
9 | chain1 = LLMMathChain.from_llm(llm=llm, verbose=True)
10 | chain2 = LLMMathChain.from_llm(llm=llm, verbose=True)
11 |
--------------------------------------------------------------------------------
/examples/vercel.py:
--------------------------------------------------------------------------------
1 | from fastapi import FastAPI
2 |
3 | from langcorn import create_service
4 |
5 | app: FastAPI = create_service(
6 | "examples.ex1:chain",
7 | "examples.ex2:chain",
8 | "examples.ex3:chain",
9 | "examples.ex4:sequential_chain",
10 | "examples.ex5:conversation",
11 | "examples.ex6:conversation_with_summary",
12 | "examples.ex7_agent:agent",
13 | "examples.ex9_double_chain:chain1",
14 | "examples.ex9_double_chain:chain2",
15 | "examples.ex10_cc:chain",
16 | )
17 |
--------------------------------------------------------------------------------
/langcorn/__init__.py:
--------------------------------------------------------------------------------
1 | from langcorn.server.api import create_service
2 |
3 | __all__ = ["create_service"]
4 |
--------------------------------------------------------------------------------
/langcorn/__main__.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 |
4 | import fire
5 | import uvicorn
6 |
7 | from langcorn.server import api
8 |
9 |
10 | class T:
11 | def server(self, *lc: str, port=8718, auth_token=""):
12 | sys.path.append(os.path.dirname("."))
13 | app = api.create_service(*lc, auth_token=auth_token)
14 | config = uvicorn.Config(app, port=port, log_level="info")
15 | server = uvicorn.Server(config)
16 | server.run()
17 | return
18 |
19 |
20 | def entrypoint():
21 | fire.Fire(T())
22 |
23 |
24 | if __name__ == "__main__":
25 | entrypoint()
26 |
--------------------------------------------------------------------------------
/langcorn/server/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/msoedov/langcorn/f7fde47ee609c0623e7cc629eb01348bde3cf9c1/langcorn/server/__init__.py
--------------------------------------------------------------------------------
/langcorn/server/api.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import types
4 | from typing import Any, Union
5 |
6 | from fastapi import Depends, FastAPI, Header, HTTPException, Request
7 | from fastapi.security.utils import get_authorization_scheme_param
8 | from langchain.callbacks import get_openai_callback
9 | from langchain.chains.base import Chain
10 | from langchain.schema import messages_from_dict, messages_to_dict
11 | from loguru import logger
12 | from pydantic import BaseModel
13 | from uvicorn.importer import import_from_string
14 |
15 | TRACK_USAGE = True
16 |
17 | # TODO: improve logging
18 | logger.remove(0)
19 | logger.add(
20 | sys.stderr,
21 | format="[{level}] {time:YYYY-MM-DD HH:mm:ss.SS} | {module}:{function}:{line} | {message}",
22 | colorize=True,
23 | level="INFO",
24 | )
25 |
26 |
27 | class LangRequest(BaseModel):
28 | prompt: str
29 |
30 |
31 | class MemoryData(BaseModel):
32 | content: str
33 | additional_kwargs: dict[str, Any]
34 |
35 |
36 | class Memory(BaseModel):
37 | type: str
38 | data: MemoryData
39 |
40 |
41 | class LangResponse(BaseModel):
42 | output: Union[str, dict[str, str]]
43 | error: str
44 | memory: list[Memory]
45 |
46 |
47 | class LangResponseDocuments(LangResponse):
48 | source_documents: list[str]
49 |
50 |
51 | def authenticate_or_401(auth_token):
52 | if not auth_token:
53 | # Auth is not enabled.
54 | def dummy():
55 | return
56 |
57 | return dummy
58 |
59 | def verify_auth(authorization: str = Header(...)):
60 | scheme, credentials = get_authorization_scheme_param(authorization)
61 | if auth_token != credentials:
62 | logger.info("Authorized using integration token")
63 | return
64 | raise HTTPException(status_code=401, detail="Token verification failed")
65 |
66 | return verify_auth
67 |
68 |
69 | class FnWrapper:
70 | memory = []
71 | output_key = ["output"]
72 | input_variables = ["query"]
73 | output_variables = "output"
74 | output_keys = ["output"]
75 |
76 | def __init__(self, fn):
77 | self.fn = fn
78 |
79 | def run(self, prompt: str):
80 | r = self.fn(query=prompt)
81 | return r.dict()
82 |
83 |
84 | def _derive_output(language_app: Chain) -> list[str]:
85 | if hasattr(language_app, "output_variables"):
86 | return language_app.output_variables
87 | elif hasattr(language_app, "output_keys"):
88 | return language_app.output_keys
89 | elif hasattr(language_app, "output_key"):
90 | return language_app.output_key
91 | return ["output"]
92 |
93 |
94 | def derive_fields(language_app: Chain) -> (list[str], list[str]):
95 | if hasattr(language_app, "input_variables"):
96 | return language_app.input_variables, _derive_output(language_app)
97 | elif hasattr(language_app, "prompt"):
98 | return language_app.prompt.input_variables, [language_app.output_key]
99 | elif hasattr(language_app, "input_keys"):
100 | return language_app.input_keys, _derive_output(language_app)
101 | return [language_app.input_key], _derive_output(language_app)
102 |
103 |
104 | def derive_class(name, fields, add_memory=False) -> BaseModel:
105 | annotations = {f: str for f in fields}
106 | if add_memory:
107 | annotations["memory"] = list[dict]
108 | if "chat_history" in annotations:
109 | annotations["chat_history"] = list[list[str]]
110 | return type(f"Lang{name}", (BaseModel,), {"__annotations__": annotations})
111 |
112 |
113 | def set_openai_key(new_key: str) -> str:
114 | if not new_key:
115 | return
116 | import openai
117 |
118 | prev = openai.api_key
119 | openai.api_key = new_key
120 | return prev
121 |
122 |
123 | def extract_llm_kwargs(http_headers: dict[str, str]) -> dict[str, Any]:
124 | llm_kwargs = {}
125 | if "x-llm-temperature" in http_headers:
126 | llm_kwargs["temperature"] = float(http_headers["x-llm-temperature"])
127 | if "x-max-tokens" in http_headers:
128 | llm_kwargs["max_tokens"] = int(http_headers["x-max-tokens"])
129 | if "x-model-name" in http_headers:
130 | llm_kwargs["model_name"] = http_headers["x-model-name"]
131 | return llm_kwargs
132 |
133 |
134 | def configure_llm(chain, http_headers: dict[str, str]):
135 | llm_kwargs = extract_llm_kwargs(http_headers)
136 | if not llm_kwargs:
137 | return
138 | # TODO: refactor to type switch
139 | if hasattr(chain, "llm_chain"):
140 | return configure_llm(chain.llm_chain, http_headers)
141 | elif hasattr(chain, "llm_kwargs"):
142 | chain.llm_kwargs = llm_kwargs
143 | return True
144 | elif hasattr(chain, "llm"):
145 | for k, v in llm_kwargs.items():
146 | setattr(chain.llm, k, v)
147 | return True
148 | return False
149 |
150 |
151 | def add_chat_history(run_params):
152 | if "chat_history" in run_params:
153 | run_params["chat_history"] = [tuple(t) for t in run_params["chat_history"]]
154 |
155 |
156 | def make_handler(request_cls, chain):
157 | async def handler(request: request_cls, http_request: Request):
158 | llm_api_key = http_request.headers.get("x-llm-api-key")
159 | retrieval_chain = len(chain.output_keys) > 1
160 | try:
161 | api_key = set_openai_key(llm_api_key)
162 | configure_llm(chain, http_request.headers)
163 | run_params = request.dict()
164 | memory = run_params.pop("memory", [])
165 | if chain.memory and memory and memory[0]:
166 | chain.memory.chat_memory.messages = messages_from_dict(memory)
167 | add_chat_history(run_params)
168 | with get_openai_callback() as cb:
169 | if not retrieval_chain:
170 | output = chain.run(run_params)
171 | else:
172 | output = chain(run_params)
173 | if TRACK_USAGE:
174 | print(cb)
175 |
176 | # add error handling
177 | memory = (
178 | []
179 | if not chain.memory
180 | else messages_to_dict(chain.memory.chat_memory.messages)
181 | )
182 | except Exception as e:
183 | raise HTTPException(status_code=500, detail=dict(error=str(e)))
184 | finally:
185 | set_openai_key(api_key)
186 | if retrieval_chain:
187 | return LangResponseDocuments(
188 | output=output.get("result", output),
189 | error="",
190 | memory=memory,
191 | source_documents=[str(t) for t in output.get("source_documents", [])],
192 | )
193 | return LangResponse(output=output, error="", memory=memory)
194 |
195 | return handler
196 |
197 |
198 | def create_service(*lc_apps, auth_token: str = "", app: FastAPI = None):
199 | # Make local modules discoverable
200 | sys.path.append(os.path.dirname("."))
201 | logger.info("Creating service")
202 | app = app or FastAPI()
203 | endpoints = ["/docs"]
204 |
205 | _authenticate_or_401 = Depends(authenticate_or_401(auth_token=auth_token))
206 | if lc_apps and isinstance(import_from_string(lc_apps[0]), FastAPI):
207 | raise RuntimeError(
208 | "Improperly configured: FastAPI instance passed instead of LangChain interface"
209 | )
210 | for lang_app in lc_apps:
211 | chain = import_from_string(lang_app)
212 | if isinstance(chain, types.FunctionType):
213 | chain = FnWrapper(chain)
214 | inn, out = derive_fields(chain)
215 | logger.debug(f"inputs:{inn=}")
216 | logger.info(f"{lang_app=}:{chain.__class__.__name__}({inn})")
217 | endpoint_prefix = lang_app.replace(":", ".")
218 | cls_name = "".join([c.capitalize() for c in endpoint_prefix.split(".")])
219 | request_cls = derive_class(cls_name, inn, add_memory=chain.memory)
220 | logger.debug(f"{request_cls=}")
221 |
222 | endpoints.append(f"/{endpoint_prefix}/run")
223 | # avoid hoisting issues with handler(request)
224 | app.post(
225 | f"/{endpoint_prefix}/run",
226 | response_model=LangResponse,
227 | dependencies=[_authenticate_or_401],
228 | name=lang_app,
229 | )(make_handler(request_cls, chain))
230 |
231 | @app.get("/ht")
232 | async def health_check():
233 | return dict(functions=[*lc_apps])
234 |
235 | logger.info("Serving")
236 | for endpoint in endpoints:
237 | logger.info(f"Endpoint: {endpoint}")
238 | return app
239 |
--------------------------------------------------------------------------------
/langcorn/server/test_api.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import patch
2 |
3 | import pytest
4 | from fastapi.testclient import TestClient
5 | from langchain.llms.fake import FakeListLLM
6 |
7 | from examples import app
8 |
9 | from .api import create_service
10 |
11 | client = TestClient(create_service("examples.ex1:chain"))
12 |
13 |
14 | @pytest.fixture(autouse=True)
15 | def suppress_openai():
16 | llm = FakeListLLM(responses=["FakeListLLM" for i in range(100)])
17 | with patch("langchain.llms.OpenAI._generate", new=llm._generate), patch(
18 | "langchain.llms.OpenAI._agenerate", new=llm._agenerate
19 | ):
20 | yield
21 |
22 |
23 | @pytest.fixture(autouse=True)
24 | def suppress_openai_math():
25 | llm = FakeListLLM(responses=["Answer: 1" for i in range(100)])
26 | with patch("langchain.llms.OpenAI._generate", new=llm._generate), patch(
27 | "langchain.llms.OpenAI._agenerate", new=llm._agenerate
28 | ):
29 | yield
30 |
31 |
32 | @pytest.fixture(autouse=True)
33 | def example_app():
34 | yield TestClient(
35 | app.app,
36 | headers={"x-llm-temperature": "0.7", "x-max-tokens": "100", "x-random": "1"},
37 | )
38 |
39 |
40 | @pytest.fixture(
41 | scope="session",
42 | )
43 | def fn_executor():
44 | yield None
45 |
46 |
47 | class TestRoutes:
48 | def test_examples(self, example_app):
49 | response = example_app.get("/")
50 | assert response.status_code == 404
51 |
52 | def test_read_main(self):
53 | response = client.get("/")
54 | assert response.status_code == 404
55 |
56 | def test_state(self):
57 | response = client.get("/ht")
58 | assert response.status_code == 200
59 | assert response.json()
60 |
61 | # TODO: add error handling
62 | @pytest.mark.parametrize(
63 | "apps",
64 | [("examples.ex1:chain",), ("examples.ex2:chain", "examples.ex1:chain")],
65 | )
66 | def test_create_service(self, apps):
67 | client = TestClient(create_service(*apps))
68 | response = client.get("/")
69 | assert response.status_code == 404
70 |
71 | def test_chain_x(self, suppress_openai, example_app):
72 | response = example_app.post("/examples.ex8.qa/run", json=dict(query="query"))
73 | assert response.status_code == 200, response.text
74 | assert response.json()
75 |
76 | @pytest.mark.parametrize(
77 | "endpoint, query",
78 | [
79 | ("/examples.ex1.chain/run", dict(product="QUERY")),
80 | (
81 | "/examples.ex2.chain/run",
82 | dict(
83 | input="QUERY",
84 | url="https://github.com/msoedov/langcorn/blob/main/examples/ex7_agent.py",
85 | ),
86 | ),
87 | # ("/examples.ex3.chain/run", dict(question="QUERY")), # requires llm response format
88 | (
89 | "/examples.ex4.sequential_chain/run",
90 | dict(
91 | query="QUERY",
92 | url="https://github.com/msoedov/langcorn/blob/main/examples/ex7_agent.py",
93 | ),
94 | ),
95 | (
96 | "/examples.ex5.conversation/run",
97 | dict(input="QUERY", history="", memory=[]),
98 | ),
99 | (
100 | "/examples.ex6.conversation_with_summary/run",
101 | dict(input="QUERY", history="", memory=[]),
102 | ),
103 | # ("/examples.ex7_agent.agent/run", dict(input="QUERY")), # requires llm response format
104 | ("/examples.ex8.qa/run", dict(query="QUERY")),
105 | ],
106 | )
107 | def test_chain_e2e(self, suppress_openai, example_app, endpoint, query):
108 | response = example_app.post(endpoint, json=dict(**query))
109 | assert response.status_code == 200, response.text
110 | assert response.json()
111 |
112 | def test_double_chain(self, suppress_openai_math, example_app):
113 | client = TestClient(
114 | create_service(
115 | "examples.ex9_double_chain:chain1", "examples.ex9_double_chain:chain2"
116 | )
117 | )
118 | response = client.post(
119 | "/examples.ex9_double_chain.chain1/run", json=dict(question="QUERY")
120 | )
121 | assert response.status_code == 200, response.text
122 | assert response.json()
123 | response = client.post(
124 | "/examples.ex9_double_chain.chain2/run", json=dict(question="QUERY")
125 | )
126 | assert response.status_code == 200, response.text
127 | assert response.json()
128 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "langcorn"
3 | version = "0.0.22"
4 | description = "A Python package creating rest api interface for LangChain"
5 | authors = ["Alexander Miasoiedov "]
6 | maintainers = ["Alexander Miasoiedov "]
7 | repository = "https://github.com/msoedov/langcorn"
8 | license = "MIT"
9 | readme = "Readme.md"
10 | keywords = ["nlp", "langchain", "openai", "gpt", "fastapi", "llm", "llmops"]
11 | packages = [{ include = "langcorn", from = "." }]
12 |
13 |
14 | [tool.poetry.scripts]
15 | langcorn = "langcorn.__main__:entrypoint"
16 |
17 | [tool.poetry.dependencies]
18 | python = "^3.9"
19 | fastapi = ">=0.104.1,<0.110.0"
20 | uvicorn = "^0.23.2"
21 | langchain = "^0.0.331"
22 | openai = "^0.28.1"
23 | fire = "^0.5.0"
24 | loguru = "^0.7.2"
25 | bs4 = "0.0.1" # required for ex4.py
26 | langchain-experimental = "^0.0.37"
27 | certifi = "^2023.7.22"
28 | numexpr = "^2.8.7"
29 | pydantic = "^1.10.13"
30 |
31 | [tool.poetry.group.dev.dependencies]
32 | black = ">=23.10.1,<25.0.0"
33 | mypy = "^1.6.1"
34 | httpx = "^0.25.1"
35 | pytest = "^7.4.3"
36 | pre-commit = "^3.5.0"
37 | chromadb = "^0.3.2.6" # required for ex8.py
38 |
39 | [tool.ruff]
40 | line-length = 120
41 |
42 | [build-system]
43 | requires = ["poetry-core"]
44 | build-backend = "poetry.core.masonry.api"
45 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | langcorn
2 |
--------------------------------------------------------------------------------
/tests.http:
--------------------------------------------------------------------------------
1 | ###
2 |
3 | POST http://0.0.0.0:3000/examples.ex6/run
4 | Content-Type: application/json
5 |
6 | {
7 | "history": "string",
8 | "input": "string",
9 | "memory": [
10 | {}
11 | ]
12 | }
13 |
14 |
15 | ###
16 |
17 | POST http://0.0.0.0:3000/examples.ex6/run
18 | X-LLM-API-KEY: sk-invalid-key
19 | Content-Type: application/json
20 |
21 | {
22 | "history": "string",
23 | "input": "string",
24 | "memory": [
25 | {}
26 | ]
27 | }
28 |
29 | ###
30 |
31 | POST http://0.0.0.0:8718/examples.ex14.chain/run
32 | X-LLM-API-KEY: sk-invalid-key
33 | Content-Type: application/json
34 |
35 | {
36 | "chat_history": [[
37 | "string",
38 | "string"
39 | ]],
40 | "question": "string",
41 | "memory": [
42 | {}
43 | ]
44 | }
45 |
--------------------------------------------------------------------------------
/vercel.json:
--------------------------------------------------------------------------------
1 | {
2 | "devCommand": "uvicorn vercel:app --host 0.0.0.0 --port 3000",
3 | "builds": [
4 | {
5 | "src": "examples/vercel.py",
6 | "use": "@vercel/python"
7 | }
8 | ],
9 | "routes": [
10 | {
11 | "src": "/(.*)",
12 | "dest": "examples/vercel.py"
13 | }
14 | ]
15 | }
16 |
--------------------------------------------------------------------------------