├── .flake8 ├── .github └── workflows │ ├── lint.yml │ └── test.yml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── langchain_visualizer ├── __init__.py ├── agents │ ├── __init__.py │ └── tools.py ├── chains │ ├── __init__.py │ └── base.py ├── embeddings │ └── __init__.py ├── hijacking.py ├── ice.py ├── jupyter.py ├── llms │ ├── __init__.py │ └── base.py ├── prompts │ ├── __init__.py │ ├── few_shot.py │ └── prompt.py ├── py.typed └── visualize.py ├── poetry.lock ├── pyproject.toml ├── screenshots └── serp_screenshot.png └── tests ├── __init__.py ├── agents ├── __init__.py ├── mrkl_chat_demo.yaml ├── openai_functions_demo.yaml ├── openai_multifunctions_demo.yaml ├── search_agent_demo.yaml ├── structured_tool_chat_demo.yaml ├── test_langchain_getting_started.py ├── test_langchain_without_vcr.py ├── test_mrkl_chat.py ├── test_openai_functions.py ├── test_openai_multifunctions.py └── test_structured_tool_chat_agent.py ├── chains ├── __init__.py ├── bash_chain_demo.yaml ├── foundational │ ├── __init__.py │ ├── router_demo.yaml │ ├── router_embedding_demo.yaml │ ├── test_router.py │ └── test_router_embedding.py ├── langchain_getting_started │ ├── __init__.py │ ├── custom_chain_demo.yaml │ ├── llm_chain_demo.yaml │ ├── sequential_chain_demo.yaml │ ├── test_custom_chain.py │ ├── test_llm_chain.py │ └── test_sequential_chain.py ├── langchain_how_to │ ├── __init__.py │ ├── combine_documents_chains │ │ ├── __init__.py │ │ ├── map_rerank_demo.yaml │ │ ├── mapreduce_demo.yaml │ │ ├── quickstart_demo.yaml │ │ ├── refine_demo.yaml │ │ ├── test_map_rerank.py │ │ ├── test_mapreduce.py │ │ ├── test_quickstart.py │ │ └── test_refine.py │ ├── sequential_chain_demo.yaml │ ├── simple_sequential_chain_demo.yaml │ ├── test_async.py │ ├── test_async_api_demo.yaml │ ├── test_sequential_chain.py │ ├── test_simple_sequential_chain.py │ └── utility_chains │ │ ├── __init__.py │ │ └── llm_checker_chain.py └── test_bash_chain.py ├── demo.ipynb ├── dummy_viz.py ├── llms ├── __init__.py ├── chatgpt_demo.yaml ├── getting_started_demo.yaml ├── test_chatgpt.py └── test_langchain_getting_started.py ├── memory ├── __init__.py └── langchain_getting_started │ ├── __init__.py │ ├── conversation_summary_memory_demo.yaml │ └── test_conversation_summary_memory.py ├── prompts ├── __init__.py ├── langchain_getting_started │ ├── __init__.py │ ├── dynamic_prompt_demo.yaml │ ├── few_shot_prompt_demo.yaml │ ├── multiple_inputs_prompt_demo.yaml │ ├── no_inputs_prompt_demo.yaml │ ├── one_input_prompt_demo.yaml │ ├── test_dynamic_prompt.py │ ├── test_few_shot.py │ ├── test_multiple_inputs.py │ ├── test_no_inputs.py │ └── test_one_input.py ├── partial │ ├── __init__.py │ ├── test_partial_with_strings.yaml │ └── test_with_strings.py ├── test_few_shot_prompt_template_f.py └── test_prompt_template_f.py ├── resources ├── Chinook.db ├── sotu_faiss.pkl └── state_of_the_union.txt ├── sotu.py └── test_cli_args.py /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude = 3 | venv 4 | .venv 5 | __pycache__ 6 | notebooks 7 | # Recommend matching the black line length (default 88), 8 | # rather than using the flake8 default of 79: 9 | max-line-length = 88 10 | extend-ignore = 11 | # See https://github.com/PyCQA/pycodestyle/issues/373 12 | E203, 13 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: lint 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | 8 | env: 9 | POETRY_VERSION: "1.3.1" 10 | 11 | jobs: 12 | build: 13 | name: Lint 14 | runs-on: ubuntu-latest 15 | strategy: 16 | matrix: 17 | python-version: 18 | - "3.9" 19 | - "3.10" 20 | - "3.11" 21 | steps: 22 | - uses: actions/checkout@v3 23 | - name: Install poetry 24 | run: | 25 | pipx install poetry==$POETRY_VERSION 26 | - name: Set up Python ${{ matrix.python-version }} 27 | uses: actions/setup-python@v4 28 | with: 29 | python-version: ${{ matrix.python-version }} 30 | cache: poetry 31 | - name: Install dependencies 32 | run: | 33 | poetry install 34 | - name: Analysing the code with our lint 35 | run: | 36 | make lint 37 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | 8 | env: 9 | POETRY_VERSION: "1.3.1" 10 | OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} 11 | SERPAPI_API_KEY: ${{ secrets.SERPAPI_API_KEY }} 12 | 13 | jobs: 14 | build: 15 | name: Test 16 | runs-on: ubuntu-latest 17 | strategy: 18 | matrix: 19 | python-version: 20 | - "3.9" 21 | - "3.10" 22 | - "3.11" 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Install poetry 26 | run: pipx install poetry==$POETRY_VERSION 27 | - name: Set up Python ${{ matrix.python-version }} 28 | uses: actions/setup-python@v4 29 | with: 30 | python-version: ${{ matrix.python-version }} 31 | cache: "poetry" 32 | - name: Install dependencies 33 | run: poetry install 34 | - name: Download playwright browsers 35 | run: poetry run playwright install 36 | - name: Run unit tests 37 | run: | 38 | make tests-ci 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Amos Ng 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: format lint tests 2 | 3 | all: format lint test 4 | 5 | format: 6 | poetry run autoflake . 7 | poetry run black . 8 | poetry run isort . 9 | 10 | lint: 11 | poetry run mypy . 12 | poetry run black . --check 13 | poetry run isort . --check 14 | poetry run flake8 . 15 | 16 | test: tests 17 | tests: 18 | poetry run pytest -v -k 'not network' 19 | 20 | tests-ci: 21 | poetry run pytest -v 22 | 23 | clean: 24 | find . -type f -name '*.py[co]' -delete -o -type d -name __pycache__ -delete 25 | 26 | clean-tests: 27 | find . -name "*.yaml" -type f | xargs rm -f 28 | 29 | release: 30 | test -z "$$(git status --porcelain)" 31 | git checkout main 32 | git pull 33 | poetry version patch 34 | git checkout -b "release/v$$(poetry version -s)" 35 | git commit -am "Releasing version v$$(poetry version -s)" 36 | git tag -a -m "Releasing version v$$(poetry version -s)" "v$$(poetry version -s)" 37 | poetry publish --build --username $$PYPI_USERNAME --password $$PYPI_PASSWORD 38 | # git push at the very end to get Github PR link 39 | git push --set-upstream origin "release/v$$(poetry version -s)" 40 | # --follow-tags seems to suppress Github message output 41 | git push --follow-tags 42 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LangChain Visualizer 2 | 3 | Adapts [Ought's ICE visualizer](https://github.com/oughtinc/ice) for use with [LangChain](https://github.com/hwchase17/langchain) so that you can view LangChain interactions with a beautiful UI. 4 | 5 | ![Screenshot of an execution run](screenshots/serp_screenshot.png "SERP agent demonstration") 6 | 7 | You can now 8 | 9 | - See the full prompt text being sent with every interaction with the LLM 10 | - Tell from the coloring which parts of the prompt are hardcoded and which parts are templated substitutions 11 | - Inspect the execution flow and observe when each function goes up the stack 12 | - See the costs of each LLM call, and of the entire run, if you are using OpenAI's `text-davinci-003` model 13 | 14 | ## Quickstart 15 | 16 | Install this library: 17 | 18 | ```bash 19 | pip install langchain-visualizer 20 | ``` 21 | 22 | Note that if you're on a Linux distribution, you may need to install libyaml first: 23 | 24 | ```bash 25 | apt install -y libyaml-dev 26 | ``` 27 | 28 | Then: 29 | 30 | 1. Add `import langchain_visualizer` as **the first import** in your Python entrypoint file 31 | 2. Write an async function to visualize whichever workflow you're running 32 | 3. Call `langchain_visualizer.visualize` on that function 33 | 34 | For an example, see below instructions on reproducing the screenshot. 35 | 36 | 37 | ### Running the example screenshot 38 | 39 | To run the example you see in the screenshot, first install this library and optional dependencies: 40 | 41 | ```bash 42 | pip install langchain-visualizer google-search-results openai 43 | ``` 44 | 45 | If you haven't yet set up your [OpenAI API keys](https://openai.com/api/) or SERP API keys, you can [replay the recorded interactions](https://github.com/amosjyng/vcr-langchain) by cloning this repository and running 46 | 47 | ```bash 48 | $ pip install vcr-langchain 49 | $ OPENAI_API_KEY=dummy python tests/agents/test_langchain_getting_started.py 50 | ``` 51 | 52 | If you have set them up, you can run the following script (adapted from [LangChain docs](https://langchain.readthedocs.io/en/latest/modules/agents/getting_started.html)): 53 | 54 | ```python 55 | import langchain_visualizer 56 | import asyncio 57 | from langchain.agents import initialize_agent, load_tools 58 | from langchain.llms import OpenAI 59 | 60 | llm = OpenAI(temperature=0.7) 61 | tools = load_tools(["serpapi", "llm-math"], llm=llm) 62 | agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True) 63 | async def search_agent_demo(): 64 | return agent.run( 65 | "Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 " 66 | "power?" 67 | ) 68 | 69 | langchain_visualizer.visualize(search_agent_demo) 70 | ``` 71 | 72 | A browser window will open up, and you can actually see the agent execute happen in real-time! 73 | 74 | ### Jupyter notebook support 75 | 76 | Jupyter notebooks are now supported! To use this inside a Jupyter notebook, **make sure to import the `visualize` function from `langchain_visualizer.jupyter` instead.** 77 | 78 | Please look at [the demo notebook](/tests/demo.ipynb) to see an example of how it can be used in Jupyter. 79 | 80 | ### Visualizing embeddings 81 | 82 | If you want to also visualize documents being chunked up for embeddings, you can now do so by calling the `visualize_embeddings` function before you visualize the main chain: 83 | 84 | ```python 85 | from langchain_visualizer import visualize, visualize_embeddings 86 | 87 | async def run_chain(): 88 | ... 89 | 90 | visualize_embeddings() 91 | visualize(run_chain) 92 | ``` 93 | 94 | ## Why not just use LangChain's built-in tracer? 95 | 96 | For me personally: 97 | 98 | - I prefer the ICE UI. In particular: 99 | - I like the colored highlighting of parts of the prompt that are filled-in template variables 100 | - I like the ability to quickly inspect different LLM calls without leaving the trace page 101 | - I prefer the visualization of my agent logic to remain static when LLM calls are cached 102 | - I prefer seeing when the tool (e.g. `PythonREPL`) actually gets called, rather than just the high-level execution of the chain (e.g. `LLMMathChain`) 103 | 104 | That being said, LangChain's tracer is definitely better supported. **Please note that there is a lot of langchain functionality that I haven't gotten around to hijacking for visualization.** If there's anything you need to show up in the execution trace, please open a PR or issue. 105 | 106 | ## My other projects 107 | 108 | Please check out [VCR LangChain](https://github.com/amosjyng/vcr-langchain), a library that lets you record LLM interactions for your tests and demos! 109 | -------------------------------------------------------------------------------- /langchain_visualizer/__init__.py: -------------------------------------------------------------------------------- 1 | # override ICE to_json_value before anything else starts importing other ICE stuff 2 | # isort: off 3 | 4 | from .ice import to_json_value # noqa 5 | 6 | # isort: on 7 | 8 | from .agents.tools import SerpAPIWrapper # noqa 9 | from .chains.base import Chain # noqa 10 | from .embeddings import visualize_embeddings # noqa 11 | from .llms.base import BaseLLM # noqa 12 | from .prompts.few_shot import FewShotPromptTemplate # noqa 13 | from .prompts.prompt import new_format # noqa 14 | from .visualize import visualize 15 | 16 | __all__ = [ 17 | "visualize", 18 | ] 19 | -------------------------------------------------------------------------------- /langchain_visualizer/agents/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/langchain_visualizer/agents/__init__.py -------------------------------------------------------------------------------- /langchain_visualizer/agents/tools.py: -------------------------------------------------------------------------------- 1 | from langchain.python import PythonREPL 2 | from langchain.serpapi import SerpAPIWrapper 3 | from langchain.sql_database import SQLDatabase 4 | from langchain.tools.playwright.click import ClickTool 5 | from langchain.tools.playwright.current_page import CurrentWebPageTool 6 | from langchain.tools.playwright.extract_hyperlinks import ExtractHyperlinksTool 7 | from langchain.tools.playwright.extract_text import ExtractTextTool 8 | from langchain.tools.playwright.get_elements import GetElementsTool 9 | from langchain.tools.playwright.navigate import NavigateTool 10 | from langchain.tools.playwright.navigate_back import NavigateBackTool 11 | 12 | from langchain_visualizer.hijacking import ice_hijack 13 | 14 | ice_hijack(SerpAPIWrapper, "run") 15 | ice_hijack(PythonREPL, "run") 16 | ice_hijack(SQLDatabase, "run") 17 | 18 | try: 19 | from langchain_experimental.llm_bash.base import BashProcess 20 | 21 | ice_hijack(BashProcess, "run") 22 | except ImportError: 23 | pass 24 | 25 | ice_hijack(ClickTool, "arun") 26 | ice_hijack(CurrentWebPageTool, "arun") 27 | ice_hijack(ExtractHyperlinksTool, "arun") 28 | ice_hijack(ExtractTextTool, "arun") 29 | ice_hijack(GetElementsTool, "arun") 30 | ice_hijack(NavigateTool, "arun") 31 | ice_hijack(NavigateBackTool, "arun") 32 | -------------------------------------------------------------------------------- /langchain_visualizer/chains/__init__.py: -------------------------------------------------------------------------------- 1 | from langchain.chains.llm import LLMChain 2 | 3 | from .base import Chain 4 | 5 | Chain._should_trace = True # type: ignore 6 | LLMChain._should_trace = False # type: ignore 7 | -------------------------------------------------------------------------------- /langchain_visualizer/chains/base.py: -------------------------------------------------------------------------------- 1 | from langchain.chains.base import Chain 2 | 3 | from langchain_visualizer.hijacking import ice_hijack 4 | 5 | ice_hijack(Chain, "__call__") 6 | ice_hijack(Chain, "acall") 7 | -------------------------------------------------------------------------------- /langchain_visualizer/embeddings/__init__.py: -------------------------------------------------------------------------------- 1 | from langchain.embeddings.openai import OpenAIEmbeddings 2 | 3 | from langchain_visualizer.hijacking import ice_hijack 4 | 5 | 6 | def visualize_embeddings(): 7 | ice_hijack(OpenAIEmbeddings, "embed_documents") 8 | -------------------------------------------------------------------------------- /langchain_visualizer/hijacking.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import inspect 3 | 4 | import gorilla 5 | from ice.trace import TracedABC 6 | 7 | LANGCHAIN_VISUALIZER_PATCH_ID = "lc-viz" 8 | # override prefix used by vcr-langchain for visualization compatibility 9 | VCR_VIZ_INTEROP_PREFIX = "_vcr_" 10 | 11 | 12 | class VisualizationWrapper(TracedABC): 13 | def __init__(self, og_obj, og_fn): 14 | self.og_obj = og_obj 15 | self.og_fn = og_fn 16 | 17 | @property 18 | def is_async(self): 19 | return inspect.iscoroutinefunction(self.og_fn) 20 | 21 | async def run(self, *args, **kwargs): 22 | # Async function that gets visualized. 23 | # 24 | # Invocation of this function is what triggers visibility in the ICE execution 25 | # graph. Override this function if you want args and kwargs to be visualized as 26 | # actual named arguments rather than as arrays and dicts. 27 | # 28 | # The docstring gets visualized too, which is why this documentation is not in 29 | # the docstring. 30 | if self.is_async: 31 | return await self.og_fn(self.og_obj, *args, **kwargs) 32 | else: 33 | return self.og_fn(self.og_obj, *args, **kwargs) 34 | 35 | 36 | def get_viz_wrapper(viz_cls, og_self, og_fn_name: str): 37 | """ 38 | Return the visualization wrapper object. 39 | 40 | This object's "run" function will be what triggers ICE execution capture. 41 | """ 42 | vcr_key = VCR_VIZ_INTEROP_PREFIX + og_fn_name 43 | if hasattr(og_self.__class__, vcr_key): 44 | # if vcr-langchain is here as well, call them so that caching can 45 | # happen 46 | og_fn = getattr(og_self.__class__, vcr_key) 47 | else: 48 | og_fn = gorilla.get_original_attribute( 49 | og_self.__class__, og_fn_name, LANGCHAIN_VISUALIZER_PATCH_ID 50 | ) 51 | return viz_cls(og_self, og_fn=og_fn) 52 | 53 | 54 | def get_overridden_call(viz_cls, og_method_name): 55 | """ 56 | Get the new function that will override the original function. 57 | 58 | The returned function will end up calling the original function, but in a way that 59 | causes the call to be recorded by ICE. 60 | """ 61 | 62 | def overridden_call(og_self, *args, **kwargs): 63 | ice_agent = get_viz_wrapper(viz_cls, og_self, og_method_name) 64 | if ( 65 | not hasattr(og_self.__class__, "_should_trace") 66 | or og_self.__class__._should_trace 67 | ): 68 | # ICE displays class name in visualization 69 | ice_agent.__class__.__name__ = og_self.__class__.__name__ 70 | # this is not the original class's "run" function -- in fact, the original 71 | # function can be named anything, since the name is stored in 72 | # og_method_name. Instead, this is the visualization wrapper's "run" 73 | # function, which is what gets visualized. 74 | return asyncio.get_event_loop().run_until_complete( 75 | ice_agent.run(*args, **kwargs) 76 | ) 77 | 78 | return ice_agent.og_fn(og_self, *args, **kwargs) 79 | 80 | return overridden_call 81 | 82 | 83 | def get_async_overridden_call(viz_cls, og_method_name): 84 | """ 85 | Like get_overridden_call, but returns an async override. 86 | """ 87 | 88 | async def overridden_call(og_self, *args, **kwargs): 89 | ice_agent = get_viz_wrapper(viz_cls, og_self, og_method_name) 90 | if ( 91 | not hasattr(og_self.__class__, "_should_trace") 92 | or og_self.__class__._should_trace 93 | ): 94 | # ICE displays class name in visualization 95 | ice_agent.__class__.__name__ = og_self.__class__.__name__ 96 | # this is not the original class's "run" function -- in fact, the original 97 | # function can be named anything, since the name is stored in 98 | # og_method_name. Instead, this is the visualization wrapper's "run" 99 | # function, which is what gets visualized. 100 | return await ice_agent.run(*args, **kwargs) 101 | 102 | return await ice_agent.og_fn(og_self, *args, **kwargs) 103 | 104 | return overridden_call 105 | 106 | 107 | def hijack(cls, fn_name, get_replacement): 108 | replacement = get_replacement(getattr(cls, fn_name)) 109 | setattr(cls, fn_name, replacement) 110 | 111 | 112 | def ice_hijack(cls, og_method_name, viz_cls=VisualizationWrapper): 113 | """ 114 | Hijack cls.og_method_name to refer to an overridden call. 115 | 116 | The overridden call will have a chance to call the original function. 117 | """ 118 | og_fn = getattr(cls, og_method_name) 119 | is_async = inspect.iscoroutinefunction(og_fn) 120 | overridden_call = ( 121 | get_async_overridden_call(viz_cls, og_method_name) 122 | if is_async 123 | else get_overridden_call(viz_cls, og_method_name) 124 | ) 125 | gorilla.apply( 126 | gorilla.Patch( 127 | destination=cls, 128 | name=og_method_name, 129 | obj=overridden_call, 130 | settings=gorilla.Settings(store_hit=True, allow_hit=True), 131 | ), 132 | id=LANGCHAIN_VISUALIZER_PATCH_ID, 133 | ) 134 | -------------------------------------------------------------------------------- /langchain_visualizer/ice.py: -------------------------------------------------------------------------------- 1 | import time 2 | from typing import Any, List, Union 3 | from warnings import warn 4 | 5 | from ice import json_value, server 6 | from ice import settings as ice_settings 7 | from langchain.schema import ( 8 | AIMessage, 9 | BaseMessage, 10 | ChatResult, 11 | FunctionMessage, 12 | HumanMessage, 13 | LLMResult, 14 | SystemMessage, 15 | ) 16 | 17 | og_json_value = json_value.to_json_value 18 | 19 | 20 | def to_json_value(x: Any) -> json_value.JSONValue: 21 | if isinstance(x, LLMResult): 22 | regular_generations = x.generations 23 | regular_texts: Union[List[List[str]], List[str], str] = [ 24 | g.text for sublist in regular_generations for g in sublist 25 | ] 26 | if len(regular_texts) == 1: 27 | regular_texts = regular_texts[0] 28 | if len(regular_texts) == 1: 29 | # do it a second time because it's a list of lists 30 | regular_texts = regular_texts[0] 31 | return og_json_value(regular_texts) 32 | elif isinstance(x, ChatResult): 33 | chat_generations = x.generations 34 | chat_messages: List[BaseMessage] = [ 35 | chat_generation.message for chat_generation in chat_generations 36 | ] 37 | if len(chat_messages) == 1: 38 | return og_json_value(chat_messages[0]) 39 | return og_json_value(chat_messages) 40 | elif isinstance(x, SystemMessage): 41 | return { 42 | "System": x.content, 43 | } 44 | elif isinstance(x, AIMessage): 45 | if "function_call" in x.additional_kwargs: 46 | function_call = x.additional_kwargs["function_call"] 47 | return { 48 | "AI (function call)": function_call, 49 | } 50 | else: 51 | return { 52 | "AI": x.content, 53 | } 54 | elif isinstance(x, HumanMessage): 55 | return { 56 | "Human": x.content, 57 | } 58 | elif isinstance(x, FunctionMessage): 59 | return { 60 | "Function": x.name, 61 | "Result": x.content, 62 | } 63 | elif isinstance(x, BaseMessage): 64 | warn(f"Unknown message type: {x.type}") 65 | 66 | return og_json_value(x) 67 | 68 | 69 | def wait_until_server_running(): 70 | start_time = time.time() 71 | while not server.is_server_running(): 72 | if time.time() - start_time > server.ICE_WAIT_TIME: 73 | raise TimeoutError( 74 | f"Server didn't start within {server.ICE_WAIT_TIME} seconds" 75 | ) 76 | time.sleep(0.1) 77 | 78 | 79 | json_value.to_json_value = to_json_value 80 | server.ICE_WAIT_TIME = 10 # type: ignore 81 | server.wait_until_server_running = wait_until_server_running 82 | ice_settings.settings.OUGHT_ICE_HOST = "127.0.0.1" 83 | -------------------------------------------------------------------------------- /langchain_visualizer/jupyter.py: -------------------------------------------------------------------------------- 1 | import threading 2 | 3 | from ice.logging import log_lock 4 | from ice.server import ensure_server_running, is_server_running 5 | from ice.settings import settings 6 | from ice.trace import Trace 7 | from IPython.display import IFrame 8 | 9 | from .visualize import visualize as regular_visualize 10 | 11 | latest_viz_url = None 12 | evt = threading.Event() 13 | 14 | 15 | def new_server_and_browser(self): 16 | global evt 17 | global latest_viz_url 18 | # We use this lock to prevent logging from here (which runs in a 19 | # background thread) from burying the input prompt in 20 | # [Settings.__get_and_store]. 21 | with log_lock: 22 | is_running = None 23 | if settings.OUGHT_ICE_AUTO_SERVER: 24 | ensure_server_running() 25 | is_running = True 26 | 27 | if not settings.OUGHT_ICE_AUTO_BROWSER: 28 | return 29 | 30 | is_running = is_running or is_server_running() 31 | if not is_running: 32 | return 33 | 34 | latest_viz_url = self.url 35 | evt.set() 36 | 37 | 38 | Trace._server_and_browser = new_server_and_browser # type: ignore 39 | 40 | 41 | def visualize(fn, width: int = 1000, height: int = 500): 42 | threading.Thread(target=regular_visualize, args=(fn,)).start() 43 | evt.wait(timeout=20) 44 | print(f"Rendering {latest_viz_url} in notebook") 45 | return IFrame(latest_viz_url, width=width, height=height) 46 | -------------------------------------------------------------------------------- /langchain_visualizer/llms/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/langchain_visualizer/llms/__init__.py -------------------------------------------------------------------------------- /langchain_visualizer/llms/base.py: -------------------------------------------------------------------------------- 1 | from typing import Any, List, Optional, Union 2 | 3 | from ice.trace import add_fields 4 | from langchain.callbacks.manager import ( 5 | AsyncCallbackManagerForLLMRun, 6 | CallbackManagerForLLMRun, 7 | Callbacks, 8 | ) 9 | from langchain.chat_models import ChatOpenAI 10 | from langchain.llms.base import BaseLLM 11 | from langchain.llms.openai import OpenAI, OpenAIChat 12 | from langchain.schema import BaseMessage, ChatResult, LLMResult 13 | 14 | from langchain_visualizer.hijacking import VisualizationWrapper, ice_hijack 15 | 16 | # todo: ideally we would stop tying costs to Davinci tokens, but this would involve 17 | # changing ICE's frontend logic 18 | MODEL_COST_MAP = { 19 | # model names: https://platform.openai.com/docs/models/gpt-3-5 20 | # model costs: https://openai.com/pricing 21 | "text-davinci-003": 1, 22 | "gpt-3.5-turbo": 0.1, 23 | "gpt-3.5-turbo-16k": 0.2, 24 | } 25 | 26 | 27 | class LlmVisualizer(VisualizationWrapper): 28 | def determine_cost(self, result: Union[LLMResult, ChatResult]) -> None: 29 | llm_output = result.llm_output or {} 30 | total_tokens = llm_output.get("token_usage", {}).get("total_tokens", 0) 31 | davinci_equivalent = int( 32 | MODEL_COST_MAP.get(self.og_obj.model_name, 0) * total_tokens 33 | ) 34 | if davinci_equivalent > 0: 35 | add_fields(davinci_equivalent_tokens=davinci_equivalent) 36 | 37 | 38 | class LlmSyncVisualizer(LlmVisualizer): 39 | """Overrides the sync generate function for regular LLMs.""" 40 | 41 | async def run( 42 | self, 43 | prompts: List[str], 44 | stop: Optional[List[str]] = None, 45 | callbacks: Callbacks = None, 46 | **kwargs: Any, 47 | ) -> LLMResult: 48 | """Run the LLM on the given prompt and input.""" 49 | result: LLMResult = self.og_fn( 50 | self.og_obj, prompts=prompts, stop=stop, callbacks=callbacks, **kwargs 51 | ) 52 | if isinstance(self.og_obj, OpenAI): 53 | self.determine_cost(result) 54 | return result 55 | 56 | 57 | class LlmAsyncVisualizer(LlmVisualizer): 58 | """Overrides the async generate function for regular LLMs.""" 59 | 60 | async def run( 61 | self, 62 | prompts: List[str], 63 | stop: Optional[List[str]] = None, 64 | callbacks: Callbacks = None, 65 | **kwargs: Any, 66 | ) -> LLMResult: 67 | """Run the LLM on the given prompt and input.""" 68 | result: LLMResult = await self.og_fn( 69 | self.og_obj, 70 | prompts=prompts, 71 | stop=stop, 72 | callbacks=callbacks, 73 | **kwargs, 74 | ) 75 | if isinstance(self.og_obj, OpenAI): 76 | self.determine_cost(result) 77 | return result 78 | 79 | 80 | class ChatLlmSyncVisualizer(LlmVisualizer): 81 | """Overrides the sync _generate function for chat LLMs.""" 82 | 83 | async def run( 84 | self, 85 | messages: List[BaseMessage], 86 | stop: Optional[List[str]] = None, 87 | run_manager: Optional[CallbackManagerForLLMRun] = None, 88 | **kwargs, 89 | ) -> ChatResult: 90 | """Run the LLM on the given prompt and input.""" 91 | result: ChatResult = self.og_fn( 92 | self.og_obj, messages=messages, stop=stop, run_manager=run_manager, **kwargs 93 | ) 94 | if isinstance(self.og_obj, OpenAIChat) or isinstance(self.og_obj, ChatOpenAI): 95 | self.determine_cost(result) 96 | return result 97 | 98 | 99 | class ChatLlmAsyncVisualizer(LlmVisualizer): 100 | """Overrides the async _agenerate function for chat LLMs.""" 101 | 102 | async def run( 103 | self, 104 | messages: List[BaseMessage], 105 | stop: Optional[List[str]] = None, 106 | run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, 107 | **kwargs, 108 | ) -> ChatResult: 109 | """Run the LLM on the given prompt and input.""" 110 | result: ChatResult = await self.og_fn( 111 | self.og_obj, 112 | messages=messages, 113 | stop=stop, 114 | run_manager=run_manager, 115 | **kwargs, 116 | ) 117 | if isinstance(self.og_obj, OpenAIChat) or isinstance(self.og_obj, ChatOpenAI): 118 | self.determine_cost(result) 119 | return result 120 | 121 | 122 | ice_hijack(BaseLLM, "generate", LlmSyncVisualizer) 123 | ice_hijack(BaseLLM, "agenerate", LlmAsyncVisualizer) 124 | ice_hijack(ChatOpenAI, "_generate", ChatLlmSyncVisualizer) 125 | ice_hijack(ChatOpenAI, "_agenerate", ChatLlmAsyncVisualizer) 126 | -------------------------------------------------------------------------------- /langchain_visualizer/prompts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/langchain_visualizer/prompts/__init__.py -------------------------------------------------------------------------------- /langchain_visualizer/prompts/few_shot.py: -------------------------------------------------------------------------------- 1 | from fvalues import F 2 | from langchain import FewShotPromptTemplate 3 | from langchain.prompts.prompt import PromptTemplate 4 | 5 | from langchain_visualizer.hijacking import hijack 6 | 7 | 8 | def get_new_format(og_format): 9 | def new_format(self, *args, **kwargs) -> str: 10 | if self.template_format != "f-string": 11 | return og_format(*args, **kwargs) 12 | 13 | # copied from FewShotPromptTemplate.format 14 | kwargs = self._merge_partial_and_user_variables(**kwargs) 15 | # Get the examples to use. 16 | examples = self._get_examples(**kwargs) 17 | examples = [ 18 | {k: e[k] for k in self.example_prompt.input_variables} for e in examples 19 | ] 20 | # Format the examples. 21 | example_strings = [ 22 | self.example_prompt.format(**example) for example in examples 23 | ] 24 | # Create the overall template. 25 | prefix_template = PromptTemplate.from_template(self.prefix) 26 | suffix_template = PromptTemplate.from_template(self.suffix) 27 | prefix_args = { 28 | k: v for k, v in kwargs.items() if k in prefix_template.input_variables 29 | } 30 | suffix_args = { 31 | k: v for k, v in kwargs.items() if k in suffix_template.input_variables 32 | } 33 | pieces = [ 34 | prefix_template.format(**prefix_args), 35 | *example_strings, 36 | suffix_template.format(**suffix_args), 37 | ] 38 | return F(self.example_separator).preserved_join( # type: ignore 39 | [piece for piece in pieces if piece] 40 | ) 41 | 42 | return new_format 43 | 44 | 45 | hijack(FewShotPromptTemplate, "format", get_new_format) 46 | -------------------------------------------------------------------------------- /langchain_visualizer/prompts/prompt.py: -------------------------------------------------------------------------------- 1 | from fvalues import F, FValue 2 | from langchain.formatting import formatter as og_formatter 3 | from langchain_core.prompts.string import DEFAULT_FORMATTER_MAPPING 4 | 5 | 6 | def new_format(format_string, /, *args, **kwargs): 7 | # if there are any issues with the formatting, let the formatter expose them first 8 | result = og_formatter.format(format_string, *args, **kwargs) 9 | parts = [] 10 | # modified from string._vformat 11 | for literal_text, field_name, format_spec, _ in og_formatter.parse(format_string): 12 | if literal_text: 13 | parts.append(literal_text) 14 | 15 | if field_name is not None: 16 | obj, _ = og_formatter.get_field(field_name, args, kwargs) 17 | parts.append( 18 | FValue( 19 | source=field_name, 20 | value=obj, 21 | formatted=og_formatter.format_field(obj, format_spec), 22 | ) 23 | ) 24 | 25 | return F(result, parts=tuple(parts)) 26 | 27 | 28 | DEFAULT_FORMATTER_MAPPING["f-string"] = new_format 29 | -------------------------------------------------------------------------------- /langchain_visualizer/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/langchain_visualizer/py.typed -------------------------------------------------------------------------------- /langchain_visualizer/visualize.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import sys 3 | from functools import wraps 4 | from inspect import iscoroutinefunction 5 | from traceback import print_exc 6 | 7 | from ice.environment import env 8 | from ice.mode import Mode 9 | from ice.recipe import FunctionBasedRecipe, recipe 10 | from ice.trace import enable_trace, trace 11 | from merge_args import merge_args 12 | 13 | 14 | def visualize(fn: FunctionBasedRecipe): 15 | def new_main(self, main: FunctionBasedRecipe): 16 | if not iscoroutinefunction(main): 17 | raise TypeError("visualize must be given an async function") 18 | 19 | # Trace all globals defined in main's module. 20 | try: 21 | g = main.__globals__ 22 | except AttributeError: 23 | # Perhaps this is a functools.partial 24 | g = main.func.__globals__ # type: ignore[attr-defined] 25 | for name, value in g.items(): 26 | if getattr(value, "__module__", None) == main.__module__: 27 | g[name] = trace(value) 28 | 29 | traced_main = trace(main) 30 | self.all_recipes.append(traced_main) 31 | 32 | # The frontend shows everything under the first traced root. 33 | # TODO: Once main.py is gone, change the frontend and get rid of this wrapper. 34 | @trace 35 | @wraps(main) 36 | async def hidden_wrapper(*args, **kwargs): 37 | try: 38 | result = await traced_main(*args, **kwargs) 39 | except NameError: 40 | print_exc() 41 | print( 42 | "\nReminder: recipe.main should be at the bottom of the file", 43 | file=sys.stderr, 44 | ) 45 | sys.exit(1) 46 | 47 | env().print(result, format_markdown=False) 48 | return result 49 | 50 | # A traced function cannot be called until the event loop is running. 51 | @wraps(main) 52 | async def untraced_wrapper(*args, **kwargs): 53 | return await hidden_wrapper(*args, **kwargs) 54 | 55 | @merge_args(main) 56 | def cli( 57 | *args, 58 | mode: Mode = "machine", 59 | trace: bool = True, 60 | **kwargs, 61 | ): 62 | self._mode = mode 63 | if trace: 64 | enable_trace() 65 | return asyncio.run(untraced_wrapper(*args, **kwargs)) 66 | 67 | return cli() 68 | 69 | return new_main(recipe, fn) 70 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "langchain-visualizer" 3 | version = "0.0.32" 4 | description = "Visualization and debugging tool for LangChain workflows" 5 | authors = ["Amos Jun-yeung Ng "] 6 | readme = "README.md" 7 | packages = [{ include = "langchain_visualizer" }] 8 | 9 | [tool.poetry.dependencies] 10 | python = "^3.9" 11 | langchain = ">=0.0.344,<0.1" 12 | ought-ice = "^0.5.0" 13 | fvalues = "^0.0.4" 14 | gorilla = "^0.4.0" 15 | pydantic = "^1.0.0" 16 | 17 | 18 | [tool.poetry.group.dev.dependencies] 19 | mypy = "^1.5.1" 20 | black = "^22.12.0" 21 | isort = "^5.11.4" 22 | flake8 = "^6.0.0" 23 | pytest = "^7.2.1" 24 | openai = "^0.27.0" 25 | vcr-langchain = "^0.0.30" 26 | google-search-results = "^2.4.1" 27 | faiss-cpu = "^1.7.3" 28 | tiktoken = "^0.3.3" 29 | autoflake = "^2.0.1" 30 | pytest-asyncio = "^0.21.0" 31 | playwright = "^1.34.0" 32 | beautifulsoup4 = "^4.12.2" 33 | lxml = "^4.9.2" 34 | langchain-experimental = "^0.0.43" 35 | numexpr = "^2.8.8" 36 | 37 | [tool.pytest.ini_options] 38 | asyncio_mode = "auto" 39 | markers = ["network: marks tests as requiring network services"] 40 | filterwarnings = [ 41 | "error", 42 | 'ignore:There is no current event loop:DeprecationWarning', 43 | 'ignore:distutils Version classes are deprecated:DeprecationWarning', 44 | 'ignore:unclosed str: 18 | browser = get_async_test_browser( 19 | cassette_path="tests/agents/structured_tool_chat_demo.yaml" 20 | ) 21 | browser_toolkit = PlayWrightBrowserToolkit.from_browser(async_browser=browser) 22 | tools = browser_toolkit.get_tools() 23 | 24 | llm = ChatOpenAI(temperature=0) 25 | chat_history = MessagesPlaceholder(variable_name="chat_history") 26 | memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) 27 | agent_chain = initialize_agent( 28 | tools, 29 | llm, 30 | agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, 31 | verbose=True, 32 | memory=memory, 33 | agent_kwargs={ 34 | "memory_prompts": [chat_history], 35 | "input_variables": ["input", "agent_scratchpad", "chat_history"], 36 | }, 37 | ) 38 | agent_chain.run(input="Hi I'm Erica.") 39 | agent_chain.run(input="whats my name?") 40 | return await agent_chain.arun( 41 | input=( 42 | "What's the latest xkcd comic about? " 43 | "Navigate to the xkcd website and tell me. " 44 | "Use the tools in your subsequent responses." 45 | ) 46 | ) 47 | 48 | 49 | # ================================== Execute example ================================== 50 | 51 | 52 | def test_llm_usage_succeeds(): 53 | """Check that the chain can run normally""" 54 | result = asyncio.get_event_loop().run_until_complete(structured_tool_chat_demo()) 55 | assert result.strip().startswith("The latest xkcd comic is titled ") 56 | 57 | 58 | if __name__ == "__main__": 59 | from langchain_visualizer import visualize 60 | 61 | visualize(structured_tool_chat_demo) 62 | -------------------------------------------------------------------------------- /tests/chains/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/tests/chains/__init__.py -------------------------------------------------------------------------------- /tests/chains/bash_chain_demo.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"prompt": ["If someone asks you to perform a task, your job is to come 4 | up with a series of bash commands that will perform the task. There is no need 5 | to put \"#!/bin/bash\" in your answer. Make sure to reason step by step, using 6 | this format:\n\nQuestion: \"copy the files in the directory named ''target'' 7 | into a new directory at the same level as target called ''myNewDirectory''\"\n\nI 8 | need to take the following actions:\n- List all files in the directory\n- Create 9 | a new directory\n- Copy the files from the first directory into the second directory\n```bash\nls\nmkdir 10 | myNewDirectory\ncp -r target/* myNewDirectory\n```\n\nThat is the format. Begin!\n\nQuestion: 11 | What files are in my current directory?"], "model": "text-davinci-003", "temperature": 12 | 0.0, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": 13 | {}, "max_tokens": 256}' 14 | headers: {} 15 | method: POST 16 | uri: https://api.openai.com/v1/completions 17 | response: 18 | body: 19 | string: !!binary | 20 | H4sIAAAAAAAAA0xPyU4jMRC991eUfM7SaZpJ6BuBC4g5IBhppAlKHLuSLnC7PHYlMCD+feROWC6W 21 | /LZ6760AUM86evJb1YC6bylBxxYd7DEmYg+UwGKIaLSgHcFP2kYtCGvccES41n6n4z+oB1CVVQ3C 22 | oPdMFiyluAuSE3gDCeOeDI7gBnX00GVrKxJSMx4Hp2XDsRtxQK9pZLgbWzZp/HGW2Cc1yE3J5pKm 23 | C244u5Xn9u9VPasu5k8n/HhZz89/Vb/PXuX1/O6g5vUjGskOwRdZGu6Cw5x2oE3EPEk1MJmWk7qq 24 | qulpT/T7P2xDq/fkDQ3L8uToa5kMJtXAnwIA4K1/4SDPtoVf+NVqtdapXXiX+k9v7VXkLb6oBspP 25 | xPE2RF7nRL9z7hPfkKfULiPqxD4HJ+Ggeva9AHjo2+yS3qJqji1UiNwFWQo/oc+Bkx+zQ576mv+N 26 | PZZQwqLdN3w6K/KR9+I/AAAA//8DAG8ZRGohAgAA 27 | headers: 28 | CF-Cache-Status: 29 | - DYNAMIC 30 | CF-RAY: 31 | - 82ea3d67f92f5ed2-PDX 32 | Cache-Control: 33 | - no-cache, must-revalidate 34 | Connection: 35 | - keep-alive 36 | Content-Encoding: 37 | - gzip 38 | Content-Type: 39 | - application/json 40 | Date: 41 | - Fri, 01 Dec 2023 09:17:56 GMT 42 | Transfer-Encoding: 43 | - chunked 44 | openai-model: 45 | - text-davinci-003 46 | openai-processing-ms: 47 | - '260' 48 | x-ratelimit-limit-tokens_usage_based: 49 | - '250000' 50 | x-ratelimit-remaining-tokens_usage_based: 51 | - '249744' 52 | x-ratelimit-reset-tokens_usage_based: 53 | - 61ms 54 | status: 55 | code: 200 56 | message: OK 57 | - request: 58 | body: '{"commands": ["ls"]}' 59 | headers: 60 | persistent: 61 | - false 62 | return_err_output: 63 | - false 64 | strip_newlines: 65 | - false 66 | method: POST 67 | uri: tool://BashProcess/run 68 | response: 'dist 69 | 70 | langchain_visualizer 71 | 72 | LICENSE 73 | 74 | Makefile 75 | 76 | poetry.lock 77 | 78 | pyproject.toml 79 | 80 | README.md 81 | 82 | screenshots 83 | 84 | tests 85 | 86 | ' 87 | version: 1 88 | -------------------------------------------------------------------------------- /tests/chains/foundational/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/tests/chains/foundational/__init__.py -------------------------------------------------------------------------------- /tests/chains/foundational/router_demo.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"prompt": ["Given a raw text input to a language model select the model 4 | prompt best suited for the input. You will be given the names of the available 5 | prompts and a description of what the prompt is best suited for. You may also 6 | revise the original input if you think that revising it will ultimately lead 7 | to a better response from the language model.\n\n<< FORMATTING >>\nReturn a 8 | markdown code snippet with a JSON object formatted to look like:\n```json\n{\n \"destination\": 9 | string \\ name of the prompt to use or \"DEFAULT\"\n \"next_inputs\": string 10 | \\ a potentially modified version of the original input\n}\n```\n\nREMEMBER: 11 | \"destination\" MUST be one of the candidate prompt names specified below OR 12 | it can be \"DEFAULT\" if the input is not well suited for any of the candidate 13 | prompts.\nREMEMBER: \"next_inputs\" can just be the original input if you don''t 14 | think any modifications are needed.\n\n<< CANDIDATE PROMPTS >>\nphysics: Good 15 | for answering questions about physics\nmath: Good for answering math questions\n\n<< 16 | INPUT >>\nWhat is black body radiation?\n\n<< OUTPUT (must include ```json at 17 | the start of the response) >>\n<< OUTPUT (must end with ```) >>\n"], "model": 18 | "text-davinci-003", "temperature": 0.7, "top_p": 1, "frequency_penalty": 0, 19 | "presence_penalty": 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 20 | headers: {} 21 | method: POST 22 | uri: https://api.openai.com/v1/completions 23 | response: 24 | body: 25 | string: !!binary | 26 | H4sIAAAAAAAAA0xQyW7bMBC9+ysGPHtV7MbQpeihRZsuQBcgKKrCosiRNYk0Q5CUFxj+94KynfTC 27 | w7ydpxGA2mvPxFuVg/rVUIBOLLawQx9IGCiARefR6Ih2Cl9p63VEqLAWj/Cgudf+CMsxZPNsCVFA 28 | 74QsWAq+dzE5SA0B/Y4MTuELas/QJWkTowv5bOZaHWvx3VQcsqapkW5mxYTZLZaEgxqnpmRTSdO5 29 | drL+Hve0+rj++a5+iIcfnz4s+oX9vO32739/e7ywpXpCE5Mi4iFujHSuxeR2gY3HNEnlsLifL5ZZ 30 | lt2/GYBh/002sXpHbGgyn99ddY2QwaBy+DMCADgNL1zoSVaW5VMQLvhUcAIKZTFE4mFJoXIolGuO 31 | gUwo1PhG4VSR2PUxXCiPjY7p86tWm2eoxB7Ba0uDydtCFXwuuCzLodMQT2zxoHKYv1xa2TovVarK 32 | fdu+3GtiCs3Gow7CqXGI4tSAnkcAf4eZfdBbVPl1nnJeOhc3UZ6Rk2G2vuao1399Re9WVzBK1O1/ 33 | 98VqlELOo38AAAD//wMAt8i0u3oCAAA= 34 | headers: 35 | CF-Cache-Status: 36 | - DYNAMIC 37 | CF-RAY: 38 | - 82ea3d6a8cde5ece-PDX 39 | Cache-Control: 40 | - no-cache, must-revalidate 41 | Connection: 42 | - keep-alive 43 | Content-Encoding: 44 | - gzip 45 | Content-Type: 46 | - application/json 47 | Date: 48 | - Fri, 01 Dec 2023 09:17:57 GMT 49 | Transfer-Encoding: 50 | - chunked 51 | openai-model: 52 | - text-davinci-003 53 | openai-processing-ms: 54 | - '835' 55 | x-ratelimit-limit-tokens_usage_based: 56 | - '250000' 57 | x-ratelimit-remaining-tokens_usage_based: 58 | - '249744' 59 | x-ratelimit-reset-tokens_usage_based: 60 | - 61ms 61 | status: 62 | code: 200 63 | message: OK 64 | - request: 65 | body: '{"prompt": ["You are a very smart physics professor. You are great at answering 66 | questions about physics in a concise and easy to understand manner. When you 67 | don''t know the answer to a question you admit that you don''t know.\n\nHere 68 | is a question:\nWhat is black body radiation?"], "model": "text-davinci-003", 69 | "temperature": 0.7, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 70 | 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 71 | headers: {} 72 | method: POST 73 | uri: https://api.openai.com/v1/completions 74 | response: 75 | body: 76 | string: !!binary | 77 | H4sIAAAAAAAAA4xTTW/bMAy951cQvuySr6YtuuZYoBiwL2zYdtjWoaAl2mYji6pEJ/WK/vdBdpJ2 78 | hwG7GJLI9/hIPj9OAIodRs++LtZQfG04QSuWHGwpJhYPnMBSiGRQyc7hA9cRlaCkSiLBW/Qdxh7O 79 | prBars5ABXArbMFyil3QzCAVJIpbNjSH94TRQ5uhjWpI68UiONRKYjuXQB55bqRdWDFpcSjL4lMx 80 | zUrZZpGmDW72+rPu7n68uerOw+Xvu+sv3zd4fVp9e/exLO/jmC3lHRnNCKUHvTXSBkeZbQybSLml 81 | Yg0nF8uTs9VqdXExBIb+D7CZxS17w7Pl8nSPa4QNpWINPycAAI/DF8b0DLvxN/7KodlAKbaHiJaH 82 | JvIotSEgR0ajtFh7UjYvEqhlVbJQRWkBPbAldPybLISmT2zQjZTaoAKWSWKZAJ17WcMbtuQVupBv 83 | Oodhp6EhLy35w0KTiVyShbKHTw692bxK4HAH6C1U4pzsEiCYBiMapcgpC60i3XfkTT+zFMgPZSwn 84 | jVx2Q/GNl50HHNss8wgGuSnkhrs2a6HjbRSiFFv2o5KMUmoDRdQuUjbOQCS2n8KOtYFGVCnCuNk0 85 | zot9PRqKPMW6B1RouG4oHvUypaExI+L+A+1k9zd4Dv9aZyLKIwfaUuwt9uC4ovyQdWdTD+bvvOO6 86 | 0Skkxbg/Zj05qXayy0nNYMaDtvlgtcFV7C09FGtYHl+c1CFKmR3oO+eO7xV7Ts1tJEzisxGTSiiG 87 | 6NME4Nfg3i5hTcV679oiRGmD3qpsyGfC84uRrnj+W56DJ8vVPqqi6F4Ezi8nucjT5A8AAAD//wMA 88 | 9aBC0VEEAAA= 89 | headers: 90 | CF-Cache-Status: 91 | - DYNAMIC 92 | CF-RAY: 93 | - 82ea3d705ebd5ece-PDX 94 | Cache-Control: 95 | - no-cache, must-revalidate 96 | Connection: 97 | - keep-alive 98 | Content-Encoding: 99 | - gzip 100 | Content-Type: 101 | - application/json 102 | Date: 103 | - Fri, 01 Dec 2023 09:17:59 GMT 104 | Transfer-Encoding: 105 | - chunked 106 | openai-model: 107 | - text-davinci-003 108 | openai-processing-ms: 109 | - '2046' 110 | x-ratelimit-limit-tokens_usage_based: 111 | - '250000' 112 | x-ratelimit-remaining-tokens_usage_based: 113 | - '249744' 114 | x-ratelimit-reset-tokens_usage_based: 115 | - 61ms 116 | status: 117 | code: 200 118 | message: OK 119 | - request: 120 | body: '{"prompt": ["Given a raw text input to a language model select the model 121 | prompt best suited for the input. You will be given the names of the available 122 | prompts and a description of what the prompt is best suited for. You may also 123 | revise the original input if you think that revising it will ultimately lead 124 | to a better response from the language model.\n\n<< FORMATTING >>\nReturn a 125 | markdown code snippet with a JSON object formatted to look like:\n```json\n{\n \"destination\": 126 | string \\ name of the prompt to use or \"DEFAULT\"\n \"next_inputs\": string 127 | \\ a potentially modified version of the original input\n}\n```\n\nREMEMBER: 128 | \"destination\" MUST be one of the candidate prompt names specified below OR 129 | it can be \"DEFAULT\" if the input is not well suited for any of the candidate 130 | prompts.\nREMEMBER: \"next_inputs\" can just be the original input if you don''t 131 | think any modifications are needed.\n\n<< CANDIDATE PROMPTS >>\nphysics: Good 132 | for answering questions about physics\nmath: Good for answering math questions\n\n<< 133 | INPUT >>\nWhat is the first prime number greater than 40 such that one plus 134 | the prime number is divisible by 3\n\n<< OUTPUT (must include ```json at the 135 | start of the response) >>\n<< OUTPUT (must end with ```) >>\n"], "model": "text-davinci-003", 136 | "temperature": 0.7, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 137 | 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 138 | headers: {} 139 | method: POST 140 | uri: https://api.openai.com/v1/completions 141 | response: 142 | body: 143 | string: !!binary | 144 | H4sIAAAAAAAAA1RRXW/bMAx8z68g9JwP10mQ1S972joU2YANwwZsHmJZZmJ2MiVIdNoiyH8fZCcN 145 | 9iJAPN7xjjxNANSzDkx8UAWo7y1F6FyDFo4YIjkGitCgD2i0YDOHz3QIWhBq3LuA8Ki51+EVVlPI 146 | s3wF4kAfHTXQUAy9l6Tg9hAxHMngHLaoA0OXqK2Ij8Vi4a2WvQvd3HlkTXPjukXjTFxcx5LjqKbJ 147 | KTXJpOm8nb37Ks+26x767Q+/sZ+25uOXbx/sevvrge3j2O3qJzSSGIIvsjOu8xaT2gibgCmSKuBu 148 | k92t8jzf3A/AkP9KmzX6SGxolmXLC691ZDCqAn5PAABOwwtje6JVVfUUHZd8KjkBpWowCvGQpFQF 149 | lKrT0pZqesU5+SP2vcQR/9lqSZuXFmFPIQr4QB0C912NAQ6D9QDSaoZVBrE3bfoIOEbwth+Z/3HS 150 | HelIkWqLUL/C8n2pSj6XXFXVEGzIQNzgiyoge6tYd/DB1Skv99a+1ffEFNtdQB0dp9hRnFcDep4A 151 | /Bl21Ud9QFVcdqR8cJ2Xnbi/yEkwv1+Peup2nBu6vphQ4kTbW325Wk/SkPPkHwAAAP//AwBKei/U 152 | vwIAAA== 153 | headers: 154 | CF-Cache-Status: 155 | - DYNAMIC 156 | CF-RAY: 157 | - 82ea3d7dcb635ece-PDX 158 | Cache-Control: 159 | - no-cache, must-revalidate 160 | Connection: 161 | - keep-alive 162 | Content-Encoding: 163 | - gzip 164 | Content-Type: 165 | - application/json 166 | Date: 167 | - Fri, 01 Dec 2023 09:18:00 GMT 168 | Transfer-Encoding: 169 | - chunked 170 | openai-model: 171 | - text-davinci-003 172 | openai-processing-ms: 173 | - '1058' 174 | x-ratelimit-limit-tokens_usage_based: 175 | - '250000' 176 | x-ratelimit-remaining-tokens_usage_based: 177 | - '249744' 178 | x-ratelimit-reset-tokens_usage_based: 179 | - 61ms 180 | status: 181 | code: 200 182 | message: OK 183 | - request: 184 | body: '{"prompt": ["You are a very good mathematician. You are great at answering 185 | math questions. You are so good because you are able to break down hard problems 186 | into their component parts, answer the component parts, and then put them together 187 | to answer the broader question.\n\nHere is a question:\nWhat is the first prime 188 | number greater than 40 such that one plus the prime number is divisible by 3?"], 189 | "model": "text-davinci-003", "temperature": 0.7, "top_p": 1, "frequency_penalty": 190 | 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 191 | headers: {} 192 | method: POST 193 | uri: https://api.openai.com/v1/completions 194 | response: 195 | body: 196 | string: !!binary | 197 | H4sIAAAAAAAAA4RRXWvbMBR9z6846GmDxHEcjzZ+LaPQboNCn7aWIMvX8e1kSZVkt6H0vw85acLY 198 | w14EOueec+7H2wwQL9IbNjtRQdx3HNDbhjRG8oGtAQc05DwpGanJ8J13XkZCTa31hBtpBun3KOco 199 | 8qJEtJCj5QYNBz+4mBxsi0B+ZEUZvpH0Bn2SdjG6UC2XTsvYWt9n1pGRnCnbLxurwvIjlq0JYp46 200 | 5SY1qXqnF5d38aWXxY8rFeXV82q8ebredHe3t/vrr88/D9W2fiIVkyLSa9wq2ztNye1AK09pJFFh 201 | dZGvyqIoLvOJmOb/kC0aObJRvMjz9VHXWVYURIVfMwB4m14cypPswTyY+47Qsg8RznNPMENfk8du 202 | yvSInTQoc4RBdekTYQ3B6SEgdvS3Jh2ARw5ca0K9xzoh5TrDdCwOqEnJIRDKiUn60Eut6T/hc0jT 203 | oCzx6ZRdrj//G5dNY08TsmnoVVTIT4i2O+dtnbZhBq1PeMuGQ7f1JIM1aSkhWicm9n0GPE6bHILc 204 | kaiOGxTO297FbbS/ySTDi83BTpwvdya/HHsQ0Uapz/iq2MxSxvvsDwAAAP//AwDtynjk3AIAAA== 205 | headers: 206 | CF-Cache-Status: 207 | - DYNAMIC 208 | CF-RAY: 209 | - 82ea3d84fe405ece-PDX 210 | Cache-Control: 211 | - no-cache, must-revalidate 212 | Connection: 213 | - keep-alive 214 | Content-Encoding: 215 | - gzip 216 | Content-Type: 217 | - application/json 218 | Date: 219 | - Fri, 01 Dec 2023 09:18:01 GMT 220 | Transfer-Encoding: 221 | - chunked 222 | openai-model: 223 | - text-davinci-003 224 | openai-processing-ms: 225 | - '1338' 226 | x-ratelimit-limit-tokens_usage_based: 227 | - '250000' 228 | x-ratelimit-remaining-tokens_usage_based: 229 | - '249744' 230 | x-ratelimit-reset-tokens_usage_based: 231 | - 61ms 232 | status: 233 | code: 200 234 | message: OK 235 | - request: 236 | body: '{"prompt": ["Given a raw text input to a language model select the model 237 | prompt best suited for the input. You will be given the names of the available 238 | prompts and a description of what the prompt is best suited for. You may also 239 | revise the original input if you think that revising it will ultimately lead 240 | to a better response from the language model.\n\n<< FORMATTING >>\nReturn a 241 | markdown code snippet with a JSON object formatted to look like:\n```json\n{\n \"destination\": 242 | string \\ name of the prompt to use or \"DEFAULT\"\n \"next_inputs\": string 243 | \\ a potentially modified version of the original input\n}\n```\n\nREMEMBER: 244 | \"destination\" MUST be one of the candidate prompt names specified below OR 245 | it can be \"DEFAULT\" if the input is not well suited for any of the candidate 246 | prompts.\nREMEMBER: \"next_inputs\" can just be the original input if you don''t 247 | think any modifications are needed.\n\n<< CANDIDATE PROMPTS >>\nphysics: Good 248 | for answering questions about physics\nmath: Good for answering math questions\n\n<< 249 | INPUT >>\nWhat is the name of the type of cloud that rins\n\n<< OUTPUT (must 250 | include ```json at the start of the response) >>\n<< OUTPUT (must end with ```) 251 | >>\n"], "model": "text-davinci-003", "temperature": 0.7, "top_p": 1, "frequency_penalty": 252 | 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 253 | headers: {} 254 | method: POST 255 | uri: https://api.openai.com/v1/completions 256 | response: 257 | body: 258 | string: !!binary | 259 | H4sIAAAAAAAAA0RQ24rbMBB9z1cMes7F8abd4JfSpV1K2S0NZGmhLrEiTWJt7RkhjXMh5N+LnGT3 260 | RYg5lzlnTgMAtdeBHG1VAWpZuwgtW2xghyE6JnARLPqARgvaMTy7bdCCsMYNB4TvmjodjjAbQp7l 261 | MxAGvWNnwboYOi/JgTcQMeycwTE8oQ4EbZLWIj4Wk4lvtGw4tGP2SNqNDbcTyyZObmsdU1TDlNTZ 262 | FNK0vhnNF7LnH/7x+Px78cHXh8VD1v1crvEh23ffLmxev6KRpBA8yMpw6xtMbhfYBEyVVAHT+2w6 263 | y/N8nvdA3/8mG1m9c2TcKMvurrqancGoCvgzAAA49S9c6ElWVdVrZCrpVFICSmUxiqO+SakKKNWX 264 | r4+fX56WpRreKJQiOvKdxAvlV60lHV9qBNItpjOmvxx9/zcNdxYksYJ2FD+VqqRzSVVV9Tn7SI4s 265 | HlQB2duk4a0PvE7xqWuat/nGkYv1KqCOTKlFFPaqR88DgL999S7qLariWln5wK2XlfA/pGSYzz9e 266 | /NT7rd/R2fQKCotu3ud3+f0gLTkP/gMAAP//AwAoOFTTjgIAAA== 267 | headers: 268 | CF-Cache-Status: 269 | - DYNAMIC 270 | CF-RAY: 271 | - 82ea3d8df96a5ece-PDX 272 | Cache-Control: 273 | - no-cache, must-revalidate 274 | Connection: 275 | - keep-alive 276 | Content-Encoding: 277 | - gzip 278 | Content-Type: 279 | - application/json 280 | Date: 281 | - Fri, 01 Dec 2023 09:18:02 GMT 282 | Transfer-Encoding: 283 | - chunked 284 | openai-model: 285 | - text-davinci-003 286 | openai-processing-ms: 287 | - '941' 288 | x-ratelimit-limit-tokens_usage_based: 289 | - '250000' 290 | x-ratelimit-remaining-tokens_usage_based: 291 | - '249744' 292 | x-ratelimit-reset-tokens_usage_based: 293 | - 61ms 294 | status: 295 | code: 200 296 | message: OK 297 | - request: 298 | body: '{"prompt": ["The following is a friendly conversation between a human and 299 | an AI. The AI is talkative and provides lots of specific details from its context. 300 | If the AI does not know the answer to a question, it truthfully says it does 301 | not know.\n\nCurrent conversation:\n\nHuman: What is the name of the type of 302 | cloud that rains?\nAI:"], "model": "text-davinci-003", "temperature": 0.7, "top_p": 303 | 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, 304 | "max_tokens": 256}' 305 | headers: {} 306 | method: POST 307 | uri: https://api.openai.com/v1/completions 308 | response: 309 | body: 310 | string: !!binary | 311 | H4sIAAAAAAAAA0xQTW/bMAy951cQOieO42Rr4NuwL3TdhrUIhgLFENASE2uRRUGi3QZF//sgJ212 312 | 4YHvg+/xeQKgHjF66/eqBrVpbYKODTkYKCbLHmwCQyGSRiFTwA+7jygEDe04EnxD32M8wmoKVVmt 313 | QBhwYGvA2BT7INmBd5AoDlZTAd8Jo4cuS1uRkOr5PDiUHceu4EAebaG5mxvWaf561rJPapqTWpND 314 | 6i642fpWHhnfm5+fr7/e3R9+390OzY3+9OHX5stNPLG5+UtaskLoSbaau+Aou51gHSlXUjUsrsrF 315 | qqqqdTUCY/9X2czgYL22s7JcnnUtW01J1fAwAQB4Hiec6FkGm5ZAjoFyc+24NyAtCkS0PuV/anSO 316 | DCB87Lvesbdd06cTs4BryRQEQeemYDAe/vfQ6KGJ1u9B2t4bikk4dmkKLVo3BY7QEg7H8VYx5h2j 317 | WW/oSdVQvm0c70PkJtfwvXNv+531NrXbSJjY5zZJOKgRfZkA/Blf0Cfck6rP1VWI3AXZCh/IZ8Or 318 | 5clOXV5+AZfvzqCwoLvsF+V6km+8TP4BAAD//wMAXBqQ25UCAAA= 319 | headers: 320 | CF-Cache-Status: 321 | - DYNAMIC 322 | CF-RAY: 323 | - 82ea3d946bc65ece-PDX 324 | Cache-Control: 325 | - no-cache, must-revalidate 326 | Connection: 327 | - keep-alive 328 | Content-Encoding: 329 | - gzip 330 | Content-Type: 331 | - application/json 332 | Date: 333 | - Fri, 01 Dec 2023 09:18:03 GMT 334 | Transfer-Encoding: 335 | - chunked 336 | openai-model: 337 | - text-davinci-003 338 | openai-processing-ms: 339 | - '797' 340 | x-ratelimit-limit-tokens_usage_based: 341 | - '250000' 342 | x-ratelimit-remaining-tokens_usage_based: 343 | - '249744' 344 | x-ratelimit-reset-tokens_usage_based: 345 | - 61ms 346 | status: 347 | code: 200 348 | message: OK 349 | version: 1 350 | -------------------------------------------------------------------------------- /tests/chains/foundational/test_router.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain.chains import ConversationChain, LLMChain 6 | from langchain.chains.router import MultiPromptChain 7 | from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser 8 | from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE 9 | from langchain.llms import OpenAI 10 | from langchain.prompts import PromptTemplate 11 | 12 | # ========================== Start of langchain example code ========================== 13 | # https://python.langchain.com/docs/modules/chains/foundational/router 14 | 15 | 16 | @vcr.use_cassette() 17 | async def router_demo(): 18 | physics_template = ( 19 | "You are a very smart physics professor. " 20 | "You are great at answering questions about physics in a concise and easy to " 21 | "understand manner. When you don't know the answer to a question you admit " 22 | "that you don't know." 23 | "\n\n" 24 | "Here is a question:\n" 25 | "{input}" 26 | ) 27 | 28 | math_template = ( 29 | "You are a very good mathematician. " 30 | "You are great at answering math questions. You are so good because you are " 31 | "able to break down hard problems into their component parts, answer the " 32 | "component parts, and then put them together to answer the broader question." 33 | "\n\n" 34 | "Here is a question:\n" 35 | "{input}" 36 | ) 37 | 38 | prompt_infos = [ 39 | { 40 | "name": "physics", 41 | "description": "Good for answering questions about physics", 42 | "prompt_template": physics_template, 43 | }, 44 | { 45 | "name": "math", 46 | "description": "Good for answering math questions", 47 | "prompt_template": math_template, 48 | }, 49 | ] 50 | 51 | llm = OpenAI() 52 | destination_chains = {} 53 | for p_info in prompt_infos: 54 | name = p_info["name"] 55 | prompt_template = p_info["prompt_template"] 56 | prompt = PromptTemplate(template=prompt_template, input_variables=["input"]) 57 | chain = LLMChain(llm=llm, prompt=prompt) 58 | destination_chains[name] = chain 59 | default_chain = ConversationChain(llm=llm, output_key="text") 60 | 61 | destinations = [f"{p['name']}: {p['description']}" for p in prompt_infos] 62 | destinations_str = "\n".join(destinations) 63 | router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(destinations=destinations_str) 64 | router_prompt = PromptTemplate( 65 | template=router_template, 66 | input_variables=["input"], 67 | # note: output parsers should now be attached to the LLMChain and not to the 68 | # PromptTemplate. But because of this bug: 69 | # https://github.com/hwchase17/langchain/issues/6819 70 | # we are just going to ignore the error instead 71 | output_parser=RouterOutputParser(), 72 | ) 73 | router_chain = LLMRouterChain.from_llm(llm, router_prompt) 74 | 75 | chain = MultiPromptChain( 76 | router_chain=router_chain, 77 | destination_chains=destination_chains, 78 | default_chain=default_chain, 79 | verbose=True, 80 | ) 81 | return [ 82 | chain.run("What is black body radiation?"), 83 | chain.run( 84 | "What is the first prime number greater than 40 such that one plus the " 85 | "prime number is divisible by 3" 86 | ), 87 | chain.run("What is the name of the type of cloud that rins"), 88 | ] 89 | 90 | 91 | # ================================== Execute example ================================== 92 | 93 | 94 | def test_llm_usage_succeeds(): 95 | """Check that the chain can run normally""" 96 | results = asyncio.get_event_loop().run_until_complete(router_demo()) 97 | assert len(results) == 3 98 | assert "heavy rain" in results[-1] 99 | 100 | 101 | if __name__ == "__main__": 102 | from langchain_visualizer import visualize 103 | 104 | visualize(router_demo) 105 | -------------------------------------------------------------------------------- /tests/chains/foundational/test_router_embedding.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain import PromptTemplate 6 | from langchain.chains import ConversationChain, LLMChain 7 | from langchain.chains.router import MultiPromptChain 8 | from langchain.chains.router.embedding_router import EmbeddingRouterChain 9 | from langchain.embeddings import OpenAIEmbeddings 10 | from langchain.llms import OpenAI 11 | from langchain.vectorstores import FAISS 12 | from tiktoken_ext.openai_public import cl100k_base 13 | 14 | # ========================== Start of langchain example code ========================== 15 | # https://python.langchain.com/docs/modules/chains/foundational/router 16 | 17 | 18 | @vcr.use_cassette() 19 | async def router_embedding_demo(): 20 | physics_template = ( 21 | "You are a very smart physics professor. " 22 | "You are great at answering questions about physics in a concise and easy to " 23 | "understand manner. When you don't know the answer to a question you admit " 24 | "that you don't know." 25 | "\n\n" 26 | "Here is a question:\n" 27 | "{input}" 28 | ) 29 | 30 | math_template = ( 31 | "You are a very good mathematician. " 32 | "You are great at answering math questions. You are so good because you are " 33 | "able to break down hard problems into their component parts, answer the " 34 | "component parts, and then put them together to answer the broader question." 35 | "\n\n" 36 | "Here is a question:\n" 37 | "{input}" 38 | ) 39 | 40 | prompt_infos = [ 41 | { 42 | "name": "physics", 43 | "description": "Good for answering questions about physics", 44 | "prompt_template": physics_template, 45 | }, 46 | { 47 | "name": "math", 48 | "description": "Good for answering math questions", 49 | "prompt_template": math_template, 50 | }, 51 | ] 52 | 53 | llm = OpenAI() 54 | destination_chains = {} 55 | for p_info in prompt_infos: 56 | name = p_info["name"] 57 | prompt_template = p_info["prompt_template"] 58 | prompt = PromptTemplate(template=prompt_template, input_variables=["input"]) 59 | chain = LLMChain(llm=llm, prompt=prompt) 60 | destination_chains[name] = chain 61 | default_chain = ConversationChain(llm=llm, output_key="text") 62 | 63 | names_and_descriptions = [ 64 | ("physics", ["for questions about physics"]), 65 | ("math", ["for questions about math"]), 66 | ] 67 | router_chain = EmbeddingRouterChain.from_names_and_descriptions( 68 | names_and_descriptions, FAISS, OpenAIEmbeddings(), routing_keys=["input"] 69 | ) 70 | 71 | chain = MultiPromptChain( 72 | router_chain=router_chain, 73 | destination_chains=destination_chains, 74 | default_chain=default_chain, 75 | verbose=True, 76 | ) 77 | return [ 78 | chain.run("What is black body radiation?"), 79 | chain.run( 80 | "What is the first prime number greater than 40 such that one plus the " 81 | "prime number is divisible by 3" 82 | ), 83 | chain.run("What is the name of the type of cloud that rains"), 84 | ] 85 | 86 | 87 | # ================================== Execute example ================================== 88 | 89 | # run this before cassette to download blob first 90 | # avoids errors in CI such as: 91 | # No match for the request () was found 92 | cl100k_base() 93 | 94 | 95 | def test_llm_usage_succeeds(): 96 | """Check that the chain can run normally""" 97 | results = asyncio.get_event_loop().run_until_complete(router_embedding_demo()) 98 | assert len(results) == 3 99 | assert "cumulonimbus" in results[-1].lower() 100 | 101 | 102 | if __name__ == "__main__": 103 | from langchain_visualizer import visualize 104 | 105 | visualize(router_embedding_demo) 106 | -------------------------------------------------------------------------------- /tests/chains/langchain_getting_started/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/tests/chains/langchain_getting_started/__init__.py -------------------------------------------------------------------------------- /tests/chains/langchain_getting_started/custom_chain_demo.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"prompt": ["What is a good name for a company that makes colorful socks?"], 4 | "model": "text-davinci-003", "temperature": 0.7, "top_p": 1, "frequency_penalty": 5 | 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 6 | headers: {} 7 | method: POST 8 | uri: https://api.openai.com/v1/completions 9 | response: 10 | body: 11 | string: !!binary | 12 | H4sIAAAAAAAAA0RPTU8CMRC9768YexZYVhJwbxJPRg5GoyRqSGlnYWS3U9thwRD+u+mCcpnD+5r3 13 | DhmA2ungyK1UCeplTREatlhDiyESO6AIFn1AowVtH2a0CloQllhxQHjQbqvDD4yuociLEQiDbpks 14 | WIph6yUlcAURQ0sG+/CIOjhoknUt4mM5GPhaS8Wh6bNHp6lvuBlYNnHw95bYRXWdmpJNJU3j697k 15 | SXbtXH9XxfRuPr6f4mg+eX17CfPp/Wx8UvPyC40kh+BeFoYbX2NKO9EmYJqkShiO8+GoKIrJbUd0 16 | +/9sPatbcoZ6eX5z9q2ZDEZVwnsGAHDoLpzkyfbhPtwzm43oKGSuOlcnIGdxr0rI/5GaVz7wMoW5 17 | bV3/4xU5iutFQB3Zpcwo7FXHHjOAz67INuoVqvJcQPnAjZeF8AZdChzenOLUZfiFHJ85YdH1BS7y 18 | LH04Zr8AAAD//wMAska3ohkCAAA= 19 | headers: 20 | CF-Cache-Status: 21 | - DYNAMIC 22 | CF-RAY: 23 | - 82ea3dba7cc1efc2-PDX 24 | Cache-Control: 25 | - no-cache, must-revalidate 26 | Connection: 27 | - keep-alive 28 | Content-Encoding: 29 | - gzip 30 | Content-Type: 31 | - application/json 32 | Date: 33 | - Fri, 01 Dec 2023 09:18:09 GMT 34 | Transfer-Encoding: 35 | - chunked 36 | openai-model: 37 | - text-davinci-003 38 | openai-processing-ms: 39 | - '230' 40 | x-ratelimit-limit-tokens_usage_based: 41 | - '250000' 42 | x-ratelimit-remaining-tokens_usage_based: 43 | - '249744' 44 | x-ratelimit-reset-tokens_usage_based: 45 | - 61ms 46 | status: 47 | code: 200 48 | message: OK 49 | - request: 50 | body: '{"prompt": ["What is a good slogan for a company that makes colorful socks?"], 51 | "model": "text-davinci-003", "temperature": 0.7, "top_p": 1, "frequency_penalty": 52 | 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 53 | headers: {} 54 | method: POST 55 | uri: https://api.openai.com/v1/completions 56 | response: 57 | body: 58 | string: !!binary | 59 | H4sIAAAAAAAAA0yQT0/jMBDF7/kUg8/9k4ZKQI4gQXdZtFrBja4q154mszgey56kVIjvjpwW2IsP 60 | 8+b33hu/FQBqr6Mn36ga1FNLCTq26GDAmIg9UAKLIaLRgnYGD9RELQhb3HFE+Kl9r+MBlhOoymoJ 61 | wqAHJguWUuyDZAfeQcI4kMEZ/EIdPXQZbUVCqufz4LTsOHYzDug1zQx3c8smzT9jiX1Sk9yUbC5p 62 | uuCml39kP2h9aB5iexH294f7u1W4fbpe3VR3ctzm7T80kgnBV9kY7oLD7HaUTcR8kqphcVEullVV 63 | XV6Nwnj/Jza1eiBvaFqW5yeuZTKYVA3PBQDA2/jCcT1ja7/2a/UoGOCHF4YbdhxhT9LC7z7CI5uX 64 | dLZWo9sIkrf4qmoovyaOmxB5m0N879zXfEeeUruJqBP7nJWEgxrV9wLg71iwT7pBVZ+KqRC5C7IR 65 | fkGfDRfnRzv1/SH/iYuTKCzafc+rZZEj3osPAAAA//8DALdSeNIyAgAA 66 | headers: 67 | CF-Cache-Status: 68 | - DYNAMIC 69 | CF-RAY: 70 | - 82ea3dbc8daeefc2-PDX 71 | Cache-Control: 72 | - no-cache, must-revalidate 73 | Connection: 74 | - keep-alive 75 | Content-Encoding: 76 | - gzip 77 | Content-Type: 78 | - application/json 79 | Date: 80 | - Fri, 01 Dec 2023 09:18:09 GMT 81 | Transfer-Encoding: 82 | - chunked 83 | openai-model: 84 | - text-davinci-003 85 | openai-processing-ms: 86 | - '407' 87 | x-ratelimit-limit-tokens_usage_based: 88 | - '250000' 89 | x-ratelimit-remaining-tokens_usage_based: 90 | - '249744' 91 | x-ratelimit-reset-tokens_usage_based: 92 | - 61ms 93 | status: 94 | code: 200 95 | message: OK 96 | version: 1 97 | -------------------------------------------------------------------------------- /tests/chains/langchain_getting_started/llm_chain_demo.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"prompt": ["What is a good name for a company that makes colorful socks?"], 4 | "model": "text-davinci-003", "temperature": 0.0, "top_p": 1, "frequency_penalty": 5 | 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 6 | headers: {} 7 | method: POST 8 | uri: https://api.openai.com/v1/completions 9 | response: 10 | body: 11 | string: !!binary | 12 | H4sIAAAAAAAAA0RP207bQBB991dM95kkjklL8CfQoBYBqqpSRZvdiT3Fnlntjh0Q4t+rdQJ5mYdz 13 | m3PeCgBzsJGJG1ODeWgpQS8eOxgxJhIGSuAxRHRW0c/hlppoFWGHe4kIN5YHG19hdQFVWa1ABewo 14 | 5MFTikPQnCB7SBhHcjiHDdrI0GdrqxpSvViEzupeYj+XgGxp7qRfeHFp8fGWhJO5yE3J55KuD91s 15 | faeH8fdhkMef7bc1f7dfdR1+/LptePO6Oapl9w+dZofii26d9KHDnHakXcQ8ydSwvCqXq6qq1tcT 16 | Me3/sM28HYkdzcry8uRrhRwmU8OfAgDgbbpwlGfbEz/xvbhntUnJfZlck4DY44upofxEOmlClF0O 17 | 46HrPvE9MaV2G9Em4ZyZVIKZ2PcC4O9UZEi2QVOfCpgQpQ+6VXlGzoHLy2OcOQ8/k1cnTkVtd4ar 18 | ssgf3ov/AAAA//8DANVagEMZAgAA 19 | headers: 20 | CF-Cache-Status: 21 | - DYNAMIC 22 | CF-RAY: 23 | - 82ea3dc01b6befda-PDX 24 | Cache-Control: 25 | - no-cache, must-revalidate 26 | Connection: 27 | - keep-alive 28 | Content-Encoding: 29 | - gzip 30 | Content-Type: 31 | - application/json 32 | Date: 33 | - Fri, 01 Dec 2023 09:18:10 GMT 34 | Transfer-Encoding: 35 | - chunked 36 | openai-model: 37 | - text-davinci-003 38 | openai-processing-ms: 39 | - '230' 40 | x-ratelimit-limit-tokens_usage_based: 41 | - '250000' 42 | x-ratelimit-remaining-tokens_usage_based: 43 | - '249744' 44 | x-ratelimit-reset-tokens_usage_based: 45 | - 61ms 46 | status: 47 | code: 200 48 | message: OK 49 | version: 1 50 | -------------------------------------------------------------------------------- /tests/chains/langchain_getting_started/sequential_chain_demo.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"prompt": ["What is a good name for a company that makes colorful socks?"], 4 | "model": "text-davinci-003", "temperature": 0.0, "top_p": 1, "frequency_penalty": 5 | 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 6 | headers: {} 7 | method: POST 8 | uri: https://api.openai.com/v1/completions 9 | response: 10 | body: 11 | string: !!binary | 12 | H4sIAAAAAAAAA0RPS08CMRC+768Ye+ZRVoy6RzEmGj0YTUwUQ0o7sCO7ndoO4CP+d9MF5TKH7zXf 13 | 910AqK2JnvxSVaAea0rQssMGNhgTsQdK4DBEtEbQDeCOltEIwhwXHBFujF+b+AnjHpS6HIMwmA2T 14 | A0cproPkBF5AwrghiwO4RRM9tNlai4RUDYehMbLg2A44oDc0sNwOHds0/HtL7JPq5abkcknbhqZ/ 15 | di/b7eX5VXyffE0urrU5uU3XE9H0/FR/7tQ8f0Mr2SH4ITPLbWgwp+1oGzFPUhWMTvVoXJblue6I 16 | bv+fre/MhrylvtbHe1/NZDGpCl4KAIDv7sJOnm1TP/UPbFdikpA96lydgLzDD1WB/kcaXobI8xzm 17 | 103zjy/IU6pnEU1inzOTcFAd+1MAvHZF1sksUVX7AipEboPMhFfoc+DoeBenDsMP5OmeExbTHOBS 18 | F/nDT/ELAAD//wMATlx/hhkCAAA= 19 | headers: 20 | CF-Cache-Status: 21 | - DYNAMIC 22 | CF-RAY: 23 | - 82ea3dc27ff75ed2-PDX 24 | Cache-Control: 25 | - no-cache, must-revalidate 26 | Connection: 27 | - keep-alive 28 | Content-Encoding: 29 | - gzip 30 | Content-Type: 31 | - application/json 32 | Date: 33 | - Fri, 01 Dec 2023 09:18:10 GMT 34 | Transfer-Encoding: 35 | - chunked 36 | openai-model: 37 | - text-davinci-003 38 | openai-processing-ms: 39 | - '166' 40 | x-ratelimit-limit-tokens_usage_based: 41 | - '250000' 42 | x-ratelimit-remaining-tokens_usage_based: 43 | - '249744' 44 | x-ratelimit-reset-tokens_usage_based: 45 | - 61ms 46 | status: 47 | code: 200 48 | message: OK 49 | - request: 50 | body: '{"prompt": ["Write a catchphrase for the following company: \n\nSocktastic!"], 51 | "model": "text-davinci-003", "temperature": 0.0, "top_p": 1, "frequency_penalty": 52 | 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 53 | headers: {} 54 | method: POST 55 | uri: https://api.openai.com/v1/completions 56 | response: 57 | body: 58 | string: !!binary | 59 | H4sIAAAAAAAAA0xQy27bMBC86ys2PMe2rBhIrGuLFijag+EAQREHBk2upG0oLkuuJRdB/j2g7CS9 60 | 8DCz8+JLAaBGHT35VtWg7jtK0LNFBwPGROyBElgMEY0WtHP4RW3UgnDAhiPCD+2POv6D1TVUZbUC 61 | YdADkwVLKR6DZAduIGEcyOAcfqKOHvos7URCqheL4LQ0HPs5B/Sa5ob7hWWTFu+xxD6p69yUbC5p 62 | +uBmdxsZx/ZubPG0cQ/f/jI+6K/DMnz/veHt+ZoPf9BIVgieZG+4Dw6z25k2EfMkVcPytlyuqqpa 63 | lxMx7X+XzaweyBualeXNRdcxGUyqhscCAOBleuF8nmU7v/M7tRUMQF4YvnDfcBQYSTrYsnkWnYTM 64 | 1U5NhpOWvMWTqqH8QBy3IfIh5/ijcx94Q55St4+oE/scl4SDmtjXAuBp6nhMukVVX7qpELkPshd+ 65 | Rp+mwWc79fkn/5HVhRQW7T7xal3kiNfiDQAA//8DAFSI2iw1AgAA 66 | headers: 67 | CF-Cache-Status: 68 | - DYNAMIC 69 | CF-RAY: 70 | - 82ea3dc428915ed2-PDX 71 | Cache-Control: 72 | - no-cache, must-revalidate 73 | Connection: 74 | - keep-alive 75 | Content-Encoding: 76 | - gzip 77 | Content-Type: 78 | - application/json 79 | Date: 80 | - Fri, 01 Dec 2023 09:18:10 GMT 81 | Transfer-Encoding: 82 | - chunked 83 | openai-model: 84 | - text-davinci-003 85 | openai-processing-ms: 86 | - '335' 87 | x-ratelimit-limit-tokens_usage_based: 88 | - '250000' 89 | x-ratelimit-remaining-tokens_usage_based: 90 | - '249744' 91 | x-ratelimit-reset-tokens_usage_based: 92 | - 61ms 93 | status: 94 | code: 200 95 | message: OK 96 | version: 1 97 | -------------------------------------------------------------------------------- /tests/chains/langchain_getting_started/test_custom_chain.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | from typing import Any, Dict, List, Optional 4 | 5 | import vcr_langchain as vcr 6 | from langchain import PromptTemplate 7 | from langchain.callbacks.manager import CallbackManagerForChainRun 8 | from langchain.chains import LLMChain 9 | from langchain.chains.base import Chain 10 | from langchain.llms import OpenAI 11 | 12 | # ========================== Start of langchain example code ========================== 13 | # https://langchain.readthedocs.io/en/latest/modules/chains/getting_started.html 14 | 15 | 16 | class ConcatenateChain(Chain): 17 | chain_1: LLMChain 18 | chain_2: LLMChain 19 | 20 | @property 21 | def input_keys(self) -> List[str]: 22 | # Union of the input keys of the two chains. 23 | all_input_vars = set(self.chain_1.input_keys).union( 24 | set(self.chain_2.input_keys) 25 | ) 26 | return list(all_input_vars) 27 | 28 | @property 29 | def output_keys(self) -> List[str]: 30 | return ["concat_output"] 31 | 32 | def _call( 33 | self, 34 | inputs: Dict[str, str], 35 | run_manager: Optional[CallbackManagerForChainRun] = None, 36 | ) -> Dict[str, Any]: 37 | output_1 = self.chain_1.run(inputs) 38 | output_2 = self.chain_2.run(inputs) 39 | return {"concat_output": output_1 + output_2} 40 | 41 | 42 | llm = OpenAI() 43 | 44 | prompt_1 = PromptTemplate( 45 | input_variables=["product"], 46 | template="What is a good name for a company that makes {product}?", 47 | ) 48 | chain_1 = LLMChain(llm=llm, prompt=prompt_1) 49 | 50 | prompt_2 = PromptTemplate( 51 | input_variables=["product"], 52 | template="What is a good slogan for a company that makes {product}?", 53 | ) 54 | chain_2 = LLMChain(llm=llm, prompt=prompt_2) 55 | 56 | concat_chain = ConcatenateChain(chain_1=chain_1, chain_2=chain_2) 57 | chain = concat_chain 58 | 59 | 60 | # ================================== Execute example ================================== 61 | 62 | 63 | @vcr.use_cassette() 64 | async def custom_chain_demo(): 65 | return chain.run("colorful socks") 66 | 67 | 68 | def test_llm_usage_succeeds(): 69 | """Check that the chain can run normally""" 70 | result = asyncio.get_event_loop().run_until_complete(custom_chain_demo()).strip() 71 | print(result) 72 | assert ( 73 | result == 'Sock Spectacular.\n\n"Step Up Your Style with Colorful Socks!"' 74 | or result == 'Socktastic!\n\n"Step Into Color with Our Socks!"' 75 | ) 76 | 77 | 78 | if __name__ == "__main__": 79 | from langchain_visualizer import visualize 80 | 81 | visualize(custom_chain_demo) 82 | -------------------------------------------------------------------------------- /tests/chains/langchain_getting_started/test_llm_chain.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain import PromptTemplate 6 | from langchain.chains import LLMChain 7 | from langchain.llms import OpenAI 8 | 9 | # ========================== Start of langchain example code ========================== 10 | # https://langchain.readthedocs.io/en/latest/modules/chains/getting_started.html 11 | 12 | 13 | llm = OpenAI(temperature=0) 14 | prompt = PromptTemplate( 15 | input_variables=["product"], 16 | template="What is a good name for a company that makes {product}?", 17 | ) 18 | 19 | chain = LLMChain(llm=llm, prompt=prompt) 20 | 21 | 22 | # ================================== Execute example ================================== 23 | 24 | 25 | @vcr.use_cassette() 26 | async def llm_chain_demo(): 27 | return chain.run("colorful socks") 28 | 29 | 30 | def test_llm_usage_succeeds(): 31 | """Check that the chain can run normally""" 32 | result = asyncio.get_event_loop().run_until_complete(llm_chain_demo()) 33 | assert result.strip() == "Socktastic!" 34 | 35 | 36 | if __name__ == "__main__": 37 | from langchain_visualizer import visualize 38 | 39 | visualize(llm_chain_demo) 40 | -------------------------------------------------------------------------------- /tests/chains/langchain_getting_started/test_sequential_chain.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain import PromptTemplate 6 | from langchain.chains import LLMChain, SimpleSequentialChain 7 | from langchain.llms import OpenAI 8 | 9 | # ========================== Start of langchain example code ========================== 10 | # https://langchain.readthedocs.io/en/latest/modules/chains/getting_started.html 11 | 12 | llm = OpenAI(temperature=0) 13 | prompt = PromptTemplate( 14 | input_variables=["product"], 15 | template="What is a good name for a company that makes {product}?", 16 | ) 17 | 18 | chain = LLMChain(llm=llm, prompt=prompt) 19 | 20 | second_prompt = PromptTemplate( 21 | input_variables=["company_name"], 22 | template="Write a catchphrase for the following company: {company_name}", 23 | ) 24 | chain_two = LLMChain(llm=llm, prompt=second_prompt) 25 | 26 | overall_chain = SimpleSequentialChain(chains=[chain, chain_two], verbose=True) 27 | 28 | 29 | # ================================== Execute example ================================== 30 | 31 | 32 | @vcr.use_cassette() 33 | async def sequential_chain_demo(): 34 | return overall_chain.run("colorful socks") 35 | 36 | 37 | def test_llm_usage_succeeds(): 38 | """Check that the chain can run normally""" 39 | result = asyncio.get_event_loop().run_until_complete(sequential_chain_demo()) 40 | assert result.strip() == '"Step into Comfort with Socktastic!"' 41 | 42 | 43 | if __name__ == "__main__": 44 | from langchain_visualizer import visualize 45 | 46 | visualize(sequential_chain_demo) 47 | -------------------------------------------------------------------------------- /tests/chains/langchain_how_to/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/tests/chains/langchain_how_to/__init__.py -------------------------------------------------------------------------------- /tests/chains/langchain_how_to/combine_documents_chains/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/tests/chains/langchain_how_to/combine_documents_chains/__init__.py -------------------------------------------------------------------------------- /tests/chains/langchain_how_to/combine_documents_chains/map_rerank_demo.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"input": [[3923, 1550, 279, 4872, 2019, 922, 12007, 426, 8233, 261]], 4 | "model": "text-embedding-ada-002", "encoding_format": "base64"}' 5 | headers: {} 6 | method: POST 7 | uri: https://api.openai.com/v1/embeddings 8 | response: 9 | body: 10 | string: !!binary | 11 | H4sIAAAAAAAAA1R6WROyOrfm/fcrdu1b+ysQlSz2HbNMJgqo2NXVBY7ggAxJIKfOf+/C99Tp7hur 12 | RIxR13qmlf/4119//V0X1fXc//3PX3+/yq7/+39M1y55n//9z1//819//fXXX//xe/z/7ry+i+vl 13 | Un7uv9t/L5afy3X4+5+/5P++8n9vmtbm5x27tfNXyEfkxKh3a0Z55j6KRvUPMjrK45Eu1cU6FOPr 14 | NIPbB0a62LBtJX8Exui04iEjfrZq+QmRTN3P+APfNt4qGaRCuPA6QUyu2aUrxnQzxCtXFyuG0dxP 15 | hryYxajmZ535ObGLYflAHQwf78Cu/NUk7OuuMbiFzQm5LY+iuxeuCuUzrPEKMaNVrojnaJOLLV29 16 | SJVQrNMtfE+tRZfA1ta8/3qRih+uTbzhtkbCkB+jxr71g5klZJV4W4WuxsaoUASmZw2RCHVAZvol 17 | erCTwyGqFleoH1eDRJt5VvA8KEzAaOcx9xhZLSdmAgi1VUeceecWfJ+sZXD5k5Jw8VKtTjy3IwrH 18 | 4UTIc/8Sw6r+zJCtCInOue2KBTruMNjqc0mMezQrRun6AK0jR5WqlhoXo7i2OjqdOaYQl+9CoGIX 19 | Qxlfn2xTb3gxpudXDRZ/S1jeAbGGoYItgigt6eDvKGqyqM/R2wWCJXdeJ/zFcx0tqgMi4WF/CQd0 20 | ZzK822vI1k0sVbx/n1JI+7hmhjGwZKRqdYZX/83Y+XvZI1G9CQXluzeYuTSccJwfmqW6KtKR+bUp 21 | o759yCMK06EgQawOxbe0fAyZxQ/s0HyStn/68yv4yfvOonX8sTrFsWfItZ8jOV9KpxgvcQfIC54f 22 | Zvg7LJhKHaoa98xhyX4wW64/ahnuCVDim0e3UlhaZYgv9g9iNtECDcVxFiHJsE50tfo82sFywiU0 23 | F2/GnG3nt/N6rrmIOE+dni7mHNVLmuVoWp+Y0VlNxGWfyNoyqmYkjF+XpGWrOAbDzNa0wrvKEtvT 24 | GdB1GWq48p/zcHAT6w002VOK7kFTiM3r4qH0LjG6DBZtwtgbN5CF8Z44uV2LIVuYJuhkGZHkG43V 25 | GGqjDr2yH5j+ce/Wi+7lN5JKeUviJSgt7+1TCu89YOaPxzJp8rKPwKuGlqLB9MPFythl6NW3GV3k 26 | H5JQ0d8ysIW7ImtqSgWrT7GsBrKdkexqopBXeRxBGksDFdHJShafRw5wJaASb7H+FGIgcSAd0u2N 27 | Xbt5U43n1/KNci5tiN8sTKFg/R2jma8ciXuS5uH4bLotKItUYuZ190m44jsm8lfuha31z65q7Ifa 28 | oMWYqlP/LAs2hLEN19d1iVVnHIvBYLYOmjicmKFGELKtey9hXQuO5VOEkx9eAA+OT+akZ//Pc60P 29 | 9nOyXq4jxPdlkqEee5Rc+jkTIoJagdnmcSF42v9wEgZoZQ0l8xLzHNapl9yBu82VnliZhl16JEv0 30 | qmc7Yt0Hox0+rT6CVBy2xF4xraDJfpVBtuMpCb24q/i72wEC7/ElhNaHilN9F6OpPyg67C8Wv9H4 31 | DtgdIjxjy3PIP8+dohon1aXPud1ZA7TiAGE15DQcVU8srqygSDYaD3dR41bf194DMPbriMLQVAV1 32 | LuobkoIzYuVngYalhGqoRjiTIC13Ie8Th8KELxP+WYmo5skWkHaoqHg+jELZylRHfbYXFMRoJbxP 33 | Nh0EC3Gd+l+zuDU7qzCPGpPgZwaCak3jwX0bzmn9/PjWvDgqGJTd3CTk6bwqHo9FDYb68onhPEg4 34 | nMnMRpEmEhZFzbsa2pcsw9I4rDD3D2Y7rnbXGTptOGHugsUVX0fnRg1TUbBgoWJrGDXZQ+5NLKhW 35 | b7bFmJMlB9U99MS+Sbf245jfEn13oYXn/W0pupWXpfDDx0Q685YvoywDWWssrOjgtNw43TtNkWSJ 36 | NsFNT8SQ4ytitvckTgQ+GkJkeejwlp4s2rwyS3wX+ag6O3dBid3XyVhA74LnCkqX4QioS6LkDUtP 37 | XrLLdj9W9crLDr/6Z2RVz9sO8q2tnXc3Bx+U511QB6kl4Nh2mL+QHkjMXTcAG7+BOI+0tsatNTTa 38 | YqNoDGebzhI4rjtVseY2yfTyIsYmaDlY1sdkaxivlkidc4Oqu/8k4WMP4bcP9RhE74XM3T2f4fho 39 | l94ffgp4QCsBtvNG30Vr4G63P6JffyHDzNckAtWsxHE4x/AZvhFJvJUl+Pmcc+TrQ82iwMkqPlib 40 | 6w9/qSi/j4Qy5yaj6felq/s4r4bFGilIdvaY+O64qEb5e0vRHPY2SVJZLrqu2Cgw26VHlgwn3Roq 41 | ppXwlsIdBmtcT3yb6EA/wYet36WE+jsxcm2g9Z4qffRom8GrMKzW1Yq4iMThkEiKAjZ5apg/jUEM 42 | +j4KUBFtHWLT2zXsJr2B1Ivc4ZH5rtWvagYIDo8PRnh+qWiwfd9h80gI7dPxXrC18ojBvQ0LZmwf 43 | 1GLBcX9Fy+KhEffEUsHnavZG+Zt7ZMt2W9SWq+KgjodFRozQx+0of48p9NlRYCl41dawNQKOTukW 44 | kwnvWq7a2/iHzwRvLpuEPy23hIOvfZjlfatKPCiR1fViYMSNb/NqdBfqGULLPZIf3/LFmF0R648f 45 | qujwqtj5lCzhx0fG7nxrhzNRXKDDvsNdFTzF1H+umknjDovjt0y4uagbWMXKOOHDYL2GSo7h2G+P 46 | zF2t43CeVbMMPjr4xCpOZSjSsQ7go80csj4TEo5jWbngDeJLAr+Mw3F3Ge6Q29qOzquzZy2i/K4g 47 | iJUHFRfjgcrHhuNVlc6uVEFkDLlfple0lBWZuNdziXpnZTXAq7ogsXFGSZ9VsxzWL7th3nodVv2k 48 | h6ALG5lgvg9C5T7r3lCZ4ZtSdykVHdVPW3XiPzrXJWhHQ/+k8LVCBwvjZBcL6bVVtby4ERb5wTpc 49 | iMduOZv0CzG3kRpSMK4dTHxKle1xXvFHGR/AmOUms4OOiCE8WYF2ViQTV2zQC46I/YZzptn0adg8 50 | 4cf1uoFP0mI8al+7XZjxl6ON/ozYdV9fw35XBDX6Bq1NZzctrJRDNbiIOkfO9JMxCpEo90hL17z9 51 | ff/is6pO2UoXuUeVY2RVw+BAiub60Wf28tlWnXVPZ/Ccnw9sY7wSwevZjsLBjR/EIbYvhv0r2KL9 52 | Ii5ZeC2DSvQjVqCahQ/MI8UMxSzcmfDTl3/6Fd2ZgnYkRsyt1qk1NIVeoiSNObHYcC+GNDQzyCS+ 53 | wzPLu1UiPtcHyGOJ0JnIipZb7H5HAXoeMW20oOUrb3vQJjxmpMoWRX8pjLcqs8bBodLU1btUnlet 54 | TfYSwZ9La42+r24hqp6EFI/9ORzYxTMRP+0rhh1tg0Sr4hmMy5qQ4KLyQmhk50IdgsWiCb84l08m 55 | Kl/hlwXzkVoc8swGTrwr2USvWAg2j7FW4euN6fKXF328lnTE7OBJ3HtqIt5ud/ef3sZS5i2LATYi 56 | h1pvQ7o63OxW7HapApfNRWdk2DyrYTRgi4rDJcCv9bNJ/uzvdvARXnosTH56CGp+1YnPb2bLbzcX 57 | IDi6BQvFPE962IgMzViasnTSL4srSyhsZHvN8G55TcZ8WI4AiVIyfeOWxdCH+hZafgQ8fLpLJWDY 58 | 2UhO9xuqLecUTXoygE4K2FTfH4u7NG1ArGvMNtv92IohzF2IVW3FAinoC77LilF9xdfkp69DlnzS 59 | GnDsOoyUl5dgs/V2pk16Ds+z6GvVPz2Q6tuO6echbnkXnc6wjYRB5x5BYjgJH6B43jwSmfNTIs6n 60 | QkWHo/TCc/8cJLxrdjl4ztAze/IfYq+lV8CW6xL3GFXV9x4aB5AeMsHvwH5Ww2lnzGDykySQgk3B 61 | qve6Q7dDiCb9uC6G7BS44DG3wvuSnNruxXMTHoXPmP0hV9TV9x3XFu5hRpOL8RDDqEEA63rgxN0e 62 | 5+0fv3Jd+hojpqcWnX6pR5ASeccs8bhbfJA2KapM/0300+xhjara5Op5djFYUKukGLJKyeA+C0c8 63 | W3io4Gq5jaBxamnCf9ryr3qqYWXJgl0vntZy51PnYKwdTPTrNy7G1eagAoP9m4S7phXj6l6WWqxK 64 | K2aa3y/i+YKkyN24I5XE3q6GnRbUgC+7gBF03xfjUVsCFN/YJLr83Rbiuks4hPU7JeTau+Fw2vkz 65 | FOTDA8M6Xlvzwjdk1Pa1wor161rU3+zUqLzzCmZN+NxvdpYJmOwiol+oLhZLxVFgEw47gmnfFMN7 66 | oytaUIsblmdRa7EIann180M/P/qNFqH76w/6ljsjpKvdAeDuAGd+LVXhGN5GW5upVY555hpFC/bm 67 | DbJZ+3R+JGo7LNZCgZvwl2TtksEagkuI4VDFV8of13s16QUZeano8P32vrdcNrNRlbZWxvB5eSnE 68 | cMV3dbV+rIj+Wg0VW8w2M/h8rmsWGc2nopFHc2RFa5POnhkg1lf5GbZ2YhB8XmoFL/X8APkt9vA4 69 | jw5iWFiCw6S3GZF1xeov4UwHfE1CZvLhJQbimIr2eV9d4hAJTf3pxXDY8Dtzl2cdjctn81xRvf6y 70 | n18QA8kDiLGmYeUppWG9T4gMmvS4MX/I03CgLy9A1Kgb4rzh0w7mx1JVkiXOxOdSxazLwV36Z/dO 71 | y6hT28lPxbBXpXryo3nRnF/LJxjZ0mHFaX6uhltiXGEzdz1C0no5KWPJhfYYLBn2+7760z9GsFyT 72 | P3hmbGwTjvq2IBkpz5XYrPcREG0wyeZ02U3+UzVRTrdrFtPVC4mMpFs4jdvoTx7U90iu4QGzNwkw 73 | 0a3p/Rh+9XuPlDKc/NAMKcu9jiVpu7f4gaw5SFzBzGzOmTUGW3qHOwsFpdK+FpMe9mDyo4wUm3fV 74 | rYs71nrVawjZLlcWx25qgrQ6YDy8h84ay8/SRttC6BRu+cvixCxmKP5sBVZrlST9HWkdWjYPmPwI 75 | s+jusrojPAqPRZ22L/hngbcwDPuC+dXHauW8jgGC/n3CLOnX4egx9YCOXLoyy0znQvRGflWhODwY 76 | 3lz6ZHh8/S34yfOO6WsfWkLZ7yiivvdlzkJ2i2G2/TyhWGx1Yn2juKVUqTKkrg+UOf0tE4P6/Ki/ 77 | /5/hSc+MG/ud/vEf/lXdt9xAd/rLk375UsK/retCuh5bRjbeKWmV3an54wcm/kdiqpfVVwp1pofX 78 | shXffTyCmSx1tt9233Zc7o8mQs2jxXP0dENuq2cFUWfPSReab9HDBmUoPkuIeNYxROJJkgjZlyfC 79 | /ICNhAd5egBVlb/E9EBFvTwHU/2U7WbKh27VaL3UDrbNKOHF94jDeZvoubb9jjNm9nBsx2y1XCIb 80 | PVfEkWyn7X7rBYtfG++YGNgGzrDx7JCsdzee9PHcvAOlzZtKdu8lrHPy8od/LHgFpB36r4ehH5uW 81 | mcmuTvikB5EwFykW1uOZcPZ17jAe5hkepvyAF2K9/bP+dr9LQsqcowIb/R2RwCKPXz4WA5Zsg1g7 82 | W654+k7OWseDAc++ToPor15/eONa7IDGYXV7q2Pe7Jj3KvNqNENVgVswmxO7kWYVazdxrTkzWxAP 83 | HxsxOmi8a4zsv8yoTyESOjxUddILjGjZvGAw3wVaLnOX5V2OxbA6oScsVw9E/He+t/ruAt1qdq7O 84 | WJFAD1vUOA0MQXAiP35UTvOlDP7ZvjP32lkWl3dOgMSzCYn/ulktd+m5Qel1+6Xt4qWGVLp+Z4gt 85 | mzsjSf+xBssIlT/864RpEMqLdHvXXtKZELKq91XzaJcBCkdxoqO/8kP2PpAz3J0Zx8u8RK0Iyf4A 86 | vD6WdJWTZzIKvXXVlXjM6cSXST2iTQzN0CCGs0uXdEZ3CdSatD4jzVJBPIuLAEk364CDzNxWfOJn 87 | dEjjGyO7+TrpL5rxhm/YOsxJGLKE1p9MmJeBTpX2+RD8eCgw3A1foa8C6mpMd6sUjfo+Z/YsasP+ 88 | zXTll1dgcPJ1cf/5ObR5UKJbhoyE61843MzrnPna8VGM/bm6Q+JJCyqOq7Lt60pvYMtjjbmwGEQr 89 | HTYHtDwqC+Y+7bLlq7ndaHtVqwmW9h5S0nNfo4k/iffL18YuUxFNjwyX7slvV5qDtvDrB/LxjpZY 90 | Z98DkhXPJla5Mqo+ngclaHFV0VuirZOBfEwZUknrmT8rRcjHIFvClKcTV9hGIaZ8TGuHo0ZFcTIt 91 | 8T6sz0jSrJSQH55NfhJ9brM1XZnrdzKEn0UJP70Em/kyGU1gWKV+8GXWJx0r5t32T3gsw45Ekz/n 92 | w2OTgRIeLbzU40vL57Nto0Ube0v5A+vJXN/bnmbsMpecZFVU32hhubDK0+GPv2UE1zVc5esCV9J5 93 | Ww22ZtFVeT+XFN6NXcjimY0of2iYmcrplfDx4qQQ5OLxJz8b2MNzwdWHFdZc89H24UcqkVMNgFfu 94 | eKz4vcmv8N7PMHGOJG9HNWtymPaD09k5TMa74LGme9mGRU3+tMbVrLnCnvGSmXT1EhxdtwE8m/BC 95 | ouVGbv/gw5Q3UHR5QSLiWX2AlaUIhmPv2o6//CC7aDlG9zyY8lD+hv50lJmpP75CrMbN+NMz+H64 96 | Plou6kxWvdP7TaZ+T4Zob9kQ7oczmwgLdXlQ6Kg++v6k/81iZGmbwcsLC3r3D2UljNc1Wk76gZhR 97 | l1fjkfa5ukbvmukhNguhmNiG4nXzySboRzEc2OwJXNnfmXF7RIh5xqVGE/+z45Qf/anPxUbWcFeo 98 | TvUnPz/50oGqaTmEsnzPKBzpNmP2vetF1zW77JcnUeVyk6t+8ofajmwRu7imUSlaVEbosrnpzEiN 99 | Tfsh0sVE+ea2o9UmTdGoeWUA3XW/wrN7rYXd08KlOuWLVNHkuyUac+3B+3LdMN1QKkv0VXyF40O7 100 | /dFfPIrTCH0XX4Nshhdrp7zIhDGqE2Yu4VA1ZD1Q+PhnHy8PI0Kde7vHmr7OI7LeER4KgusG8WD/ 101 | ZFEauCGf5lOqLn0wi+a5ZX3Xg1X++BBrJNeFqI7FAaHZ4cnCt9lYfDFuzzA9J45HCjEGKX2qeq8G 102 | VEzfj3vHe6C15X7GiFOfUJe78eyXXxM8XhprqCTfRXGwVYj7grIYDkx5qkuazmmZ7Oril0+rPz1l 103 | KOdzUrc3ZwkpbD/MuvkGGo/G4Gr3dMboYK1YMqxqNoNssd2xK9lvLDH0uQ1SkW5ZSm8zizcfrP/y 104 | lcnv5Mkf/7fbjypxz8/KuiLnyn96i/i7UfnTz5Db0o44k7+i8vd4gHsJPRYdxAVX9QzDNC8ivqwm 105 | rZzJhQfiWYdse1N0tDAOXwUmfUPCcH+1GFrtIsiP3Gf7I1ErLu82AQyZdyTnKW+k/eY4g3Mgucw5 106 | 3nLB4tk91fDdtZgz+UueFGuAGlqX/PY/HrevM8qNOCQWP+8Qz+TEA3k1D9iFOtwa4rVkgtoqDbMe 107 | nQhHoA3+4QWVJv4aLxI/g9291Wk++JjytrUM8KreGMmjJwZphUwYnOONOavz2/rVDxj4taHoW37b 108 | psGVAqUIP7i+L74JP8/iEdR7NTJ/c3xV450tMfiaeNJz+9ojIWL38MuX8DQftUZ9Ps6Q+3gz9vNb 109 | su3XZ9ifpYYqIVGSVsaTHq2Ol8lPrwoaFqr+y9uxvD/TdvCt8ADTPJH9/ORQO14HTN+3uFuUVqhs 110 | 5sMSpvUpU18VolXFdViTZ0c8Xq7aX96tvkcImXcYC8Eg2XmaT4cXVhCJreHE/CVU1exCHElaWN1U 111 | P+rm+vSn/X+sIf2aB7CfrkqR23SiL4X2RPohC6nCwUiGS+E/4VmEV8yPpyT8o4+bNgBmGSc7GcBH 112 | BzjqccEiUMu2U/Us+uUTxF9LrrWY8E3FD9smiXjo4fy98WQkTscYo33e/vC2BGdnL0hqy9wag0oN 113 | 4JTGmOh4Vlr8kbijKmu1RdaVhCuuJna9GvvjgcJztCtxr4o7bMtYYkZ71gpu1BdbNaTcYt5l7be0 114 | MpYm+I19JbnL9GqYGSiCKb+mwPt92ASV6iE4VB/MTzPD4qt3tv3pe2ZN88fxlHAFcPh2idOln1Ac 115 | 5+d0Oc0XJr3K2vG85g24V6EQX8RCcOUTzYC4T+NXX9Uvz0bByz4Rn5WpNcaLVf4nrz7vnWP1Zx7D 116 | 8rrEr8k/85kUbVEo2TEJz02DBsuxVFWf7B1W6xyNt8XygE7tuCHmPFLEpB8pqr8zgxFnmYc9k3RT 117 | W24eEnO/XWmN+lHoUIRbm+3a86WY8sQY/v6dCvjPf/311//6nTB415frazoY0F+H/t//fVTg3/kl 118 | /7csK/9myp+TCLTL79e///mvQwh/f9v6/e3/d18/r5/u73/+mv/XcYO/+7rPX//v9X9Nn/af//o/ 119 | AAAA//8DAJA1jaHjIAAA 120 | headers: 121 | CF-Cache-Status: 122 | - DYNAMIC 123 | CF-RAY: 124 | - 82ea3e2e3b9c5edf-PDX 125 | Connection: 126 | - keep-alive 127 | Content-Encoding: 128 | - gzip 129 | Content-Type: 130 | - application/json 131 | Date: 132 | - Fri, 01 Dec 2023 09:18:27 GMT 133 | Transfer-Encoding: 134 | - chunked 135 | openai-processing-ms: 136 | - '22' 137 | status: 138 | code: 200 139 | message: OK 140 | - request: 141 | body: '{"prompt": ["Use the following pieces of context to answer the question 142 | at the end. If you don''t know the answer, just say that you don''t know, don''t 143 | try to make up an answer.\n\nIn addition to giving an answer, also return a 144 | score of how fully it answered the user''s question. This should be in the following 145 | format:\n\nQuestion: [question here]\nHelpful Answer: [answer here]\nScore: 146 | [score between 0 and 100]\n\nHow to determine the score:\n- Higher is a better 147 | answer\n- Better responds fully to the asked question, with sufficient level 148 | of detail\n- If you do not know the answer based on the context, that should 149 | be a score of 0\n- Don''t be overconfident!\n\nExample #1\n\nContext:\n---------\nApples 150 | are red\n---------\nQuestion: what color are apples?\nHelpful Answer: red\nScore: 151 | 100\n\nExample #2\n\nContext:\n---------\nit was night and the witness forgot 152 | his glasses. he was not sure if it was a sports car or an suv\n---------\nQuestion: 153 | what type was the car?\nHelpful Answer: a sports car or an suv\nScore: 60\n\nExample 154 | #3\n\nContext:\n---------\nPears are either red or orange\n---------\nQuestion: 155 | what color are apples?\nHelpful Answer: This document does not answer the question\nScore: 156 | 0\n\nBegin!\n\nContext:\n---------\nTonight. I call on the Senate to: Pass the 157 | Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you\u2019re 158 | at it, pass the Disclose Act so Americans can know who is funding our elections. 159 | \n\nTonight, I\u2019d like to honor someone who has dedicated his life to serve 160 | this country: Justice Stephen Breyer\u2014an Army veteran, Constitutional scholar, 161 | and retiring Justice of the United States Supreme Court. Justice Breyer, thank 162 | you for your service. \n\nOne of the most serious constitutional responsibilities 163 | a President has is nominating someone to serve on the United States Supreme 164 | Court. \n\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals 165 | Judge Ketanji Brown Jackson. One of our nation\u2019s top legal minds, who will 166 | continue Justice Breyer\u2019s legacy of excellence.\n---------\nQuestion: What 167 | did the president say about Justice Breyer\nHelpful Answer:", "Use the following 168 | pieces of context to answer the question at the end. If you don''t know the 169 | answer, just say that you don''t know, don''t try to make up an answer.\n\nIn 170 | addition to giving an answer, also return a score of how fully it answered the 171 | user''s question. This should be in the following format:\n\nQuestion: [question 172 | here]\nHelpful Answer: [answer here]\nScore: [score between 0 and 100]\n\nHow 173 | to determine the score:\n- Higher is a better answer\n- Better responds fully 174 | to the asked question, with sufficient level of detail\n- If you do not know 175 | the answer based on the context, that should be a score of 0\n- Don''t be overconfident!\n\nExample 176 | #1\n\nContext:\n---------\nApples are red\n---------\nQuestion: what color are 177 | apples?\nHelpful Answer: red\nScore: 100\n\nExample #2\n\nContext:\n---------\nit 178 | was night and the witness forgot his glasses. he was not sure if it was a sports 179 | car or an suv\n---------\nQuestion: what type was the car?\nHelpful Answer: 180 | a sports car or an suv\nScore: 60\n\nExample #3\n\nContext:\n---------\nPears 181 | are either red or orange\n---------\nQuestion: what color are apples?\nHelpful 182 | Answer: This document does not answer the question\nScore: 0\n\nBegin!\n\nContext:\n---------\nA 183 | former top litigator in private practice. A former federal public defender. 184 | And from a family of public school educators and police officers. A consensus 185 | builder. Since she\u2019s been nominated, she\u2019s received a broad range 186 | of support\u2014from the Fraternal Order of Police to former judges appointed 187 | by Democrats and Republicans. \n\nAnd if we are to advance liberty and justice, 188 | we need to secure the Border and fix the immigration system. \n\nWe can do both. 189 | At our border, we\u2019ve installed new technology like cutting-edge scanners 190 | to better detect drug smuggling. \n\nWe\u2019ve set up joint patrols with Mexico 191 | and Guatemala to catch more human traffickers. \n\nWe\u2019re putting in place 192 | dedicated immigration judges so families fleeing persecution and violence can 193 | have their cases heard faster. \n\nWe\u2019re securing commitments and supporting 194 | partners in South and Central America to host more refugees and secure their 195 | own borders.\n---------\nQuestion: What did the president say about Justice 196 | Breyer\nHelpful Answer:", "Use the following pieces of context to answer the 197 | question at the end. If you don''t know the answer, just say that you don''t 198 | know, don''t try to make up an answer.\n\nIn addition to giving an answer, also 199 | return a score of how fully it answered the user''s question. This should be 200 | in the following format:\n\nQuestion: [question here]\nHelpful Answer: [answer 201 | here]\nScore: [score between 0 and 100]\n\nHow to determine the score:\n- Higher 202 | is a better answer\n- Better responds fully to the asked question, with sufficient 203 | level of detail\n- If you do not know the answer based on the context, that 204 | should be a score of 0\n- Don''t be overconfident!\n\nExample #1\n\nContext:\n---------\nApples 205 | are red\n---------\nQuestion: what color are apples?\nHelpful Answer: red\nScore: 206 | 100\n\nExample #2\n\nContext:\n---------\nit was night and the witness forgot 207 | his glasses. he was not sure if it was a sports car or an suv\n---------\nQuestion: 208 | what type was the car?\nHelpful Answer: a sports car or an suv\nScore: 60\n\nExample 209 | #3\n\nContext:\n---------\nPears are either red or orange\n---------\nQuestion: 210 | what color are apples?\nHelpful Answer: This document does not answer the question\nScore: 211 | 0\n\nBegin!\n\nContext:\n---------\nWe\u2019re going after the criminals who 212 | stole billions in relief money meant for small businesses and millions of Americans. \n\nAnd 213 | tonight, I\u2019m announcing that the Justice Department will name a chief prosecutor 214 | for pandemic fraud. \n\nBy the end of this year, the deficit will be down to 215 | less than half what it was before I took office. \n\nThe only president ever 216 | to cut the deficit by more than one trillion dollars in a single year. \n\nLowering 217 | your costs also means demanding more competition. \n\nI\u2019m a capitalist, 218 | but capitalism without competition isn\u2019t capitalism. \n\nIt\u2019s exploitation\u2014and 219 | it drives up prices. \n\nWhen corporations don\u2019t have to compete, their 220 | profits go up, your prices go up, and small businesses and family farmers and 221 | ranchers go under. \n\nWe see it happening with ocean carriers moving goods 222 | in and out of America. \n\nDuring the pandemic, these foreign-owned companies 223 | raised prices by as much as 1,000% and made record profits.\n---------\nQuestion: 224 | What did the president say about Justice Breyer\nHelpful Answer:", "Use the 225 | following pieces of context to answer the question at the end. If you don''t 226 | know the answer, just say that you don''t know, don''t try to make up an answer.\n\nIn 227 | addition to giving an answer, also return a score of how fully it answered the 228 | user''s question. This should be in the following format:\n\nQuestion: [question 229 | here]\nHelpful Answer: [answer here]\nScore: [score between 0 and 100]\n\nHow 230 | to determine the score:\n- Higher is a better answer\n- Better responds fully 231 | to the asked question, with sufficient level of detail\n- If you do not know 232 | the answer based on the context, that should be a score of 0\n- Don''t be overconfident!\n\nExample 233 | #1\n\nContext:\n---------\nApples are red\n---------\nQuestion: what color are 234 | apples?\nHelpful Answer: red\nScore: 100\n\nExample #2\n\nContext:\n---------\nit 235 | was night and the witness forgot his glasses. he was not sure if it was a sports 236 | car or an suv\n---------\nQuestion: what type was the car?\nHelpful Answer: 237 | a sports car or an suv\nScore: 60\n\nExample #3\n\nContext:\n---------\nPears 238 | are either red or orange\n---------\nQuestion: what color are apples?\nHelpful 239 | Answer: This document does not answer the question\nScore: 0\n\nBegin!\n\nContext:\n---------\nAnd 240 | for our LGBTQ+ Americans, let\u2019s finally get the bipartisan Equality Act 241 | to my desk. The onslaught of state laws targeting transgender Americans and 242 | their families is wrong. \n\nAs I said last year, especially to our younger 243 | transgender Americans, I will always have your back as your President, so you 244 | can be yourself and reach your God-given potential. \n\nWhile it often appears 245 | that we never agree, that isn\u2019t true. I signed 80 bipartisan bills into 246 | law last year. From preventing government shutdowns to protecting Asian-Americans 247 | from still-too-common hate crimes to reforming military justice. \n\nAnd soon, 248 | we\u2019ll strengthen the Violence Against Women Act that I first wrote three 249 | decades ago. It is important for us to show the nation that we can come together 250 | and do big things. \n\nSo tonight I\u2019m offering a Unity Agenda for the Nation. 251 | Four big things we can do together. \n\nFirst, beat the opioid epidemic.\n---------\nQuestion: 252 | What did the president say about Justice Breyer\nHelpful Answer:"], "model": 253 | "text-davinci-003", "temperature": 0.0, "top_p": 1, "frequency_penalty": 0, 254 | "presence_penalty": 0, "n": 1, "logit_bias": {}, "max_tokens": 500}' 255 | headers: {} 256 | method: POST 257 | uri: https://api.openai.com/v1/completions 258 | response: 259 | body: 260 | string: !!binary | 261 | H4sIAAAAAAAAA8yTS2/bMBCE7/4VC579kGTnpVvaHoqgAVLUKBC0hUGTK4mJtMsuKT8Q5L8HlB2n 262 | h/bUSy46cHaW3wyopxGA2mohR7UqQS0bF6Bjiy1sUIJjAhfAohc0OqKdwq2rRUeENVYsCDeaei17 263 | WIyhyIoFRAa9YWfBuiC9j2kDVxBQNs7gFL6gFoIuWZsYfShnM9/qWLF0U/ZI2k0NdzPLJsxer3VM 264 | QY0TqbMJ0nS+nVx+jbtP18v7j6aRUN91t5+39/T9YXvebK+uD9O8fkATkyPiLq4Md77FtO0gG8EU 265 | SZWQX2T5oijm2cUgDPlfbROrN46Mm2TZ/Ohr2BkMqoQfIwCAp+ELh/Fkg2WDcCcYnEWKEBtNj2jh 266 | pg/RGYQPgnsUqFggtX2sBjRZaJhY0ELjukG3aF0qgOphtHUVpoaTBSE2CIZ7irKf/qRvhgVLyLNs 267 | oByAHFncqRKy00nLtRdeJ3jq2/Z0XjlyoVkJ6sCUMoTIXg3q8/ifKdPTYNN3KaVlDEAcQVPYogx0 268 | v3sMqe8T3V/Y8nfMVrxjtvl/sY0Afg2PuQ+6RlUeMZUX7nxcRX5ESguLvDg/LFRvv8+bfHZ1FCNH 269 | 3f5puzwbpWueRy8AAAD//wMAt8dT1GIEAAA= 270 | headers: 271 | CF-Cache-Status: 272 | - DYNAMIC 273 | CF-RAY: 274 | - 82ea3e2f1c015edf-PDX 275 | Cache-Control: 276 | - no-cache, must-revalidate 277 | Connection: 278 | - keep-alive 279 | Content-Encoding: 280 | - gzip 281 | Content-Type: 282 | - application/json 283 | Date: 284 | - Fri, 01 Dec 2023 09:18:28 GMT 285 | Transfer-Encoding: 286 | - chunked 287 | openai-model: 288 | - text-davinci-003 289 | openai-processing-ms: 290 | - '639' 291 | x-ratelimit-limit-tokens_usage_based: 292 | - '250000' 293 | x-ratelimit-remaining-tokens_usage_based: 294 | - '248000' 295 | x-ratelimit-reset-tokens_usage_based: 296 | - 480ms 297 | status: 298 | code: 200 299 | message: OK 300 | version: 1 301 | -------------------------------------------------------------------------------- /tests/chains/langchain_how_to/combine_documents_chains/test_map_rerank.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain.chains.qa_with_sources import load_qa_with_sources_chain 6 | from langchain.llms import OpenAI 7 | 8 | from tests.sotu import load_sotu 9 | 10 | # ========================== Start of langchain example code ========================== 11 | # https://python.langchain.com/en/latest/modules/chains/index_examples/qa_with_sources.html 12 | 13 | 14 | docsearch = load_sotu() 15 | 16 | 17 | @vcr.use_cassette() 18 | async def map_rerank_demo(): 19 | query = "What did the president say about Justice Breyer" 20 | docs = docsearch.similarity_search(query) 21 | chain = load_qa_with_sources_chain( 22 | OpenAI(temperature=0, max_tokens=500), 23 | chain_type="map_rerank", 24 | metadata_keys=["source"], 25 | return_intermediate_steps=True, 26 | ) 27 | return chain({"input_documents": docs, "question": query}, return_only_outputs=True) 28 | 29 | 30 | # ================================== Execute example ================================== 31 | 32 | 33 | def test_map_rerank_succeeds(): 34 | """Check that the chain can run normally""" 35 | result = asyncio.get_event_loop().run_until_complete(map_rerank_demo()) 36 | assert "The President thanked" in result["output_text"] 37 | 38 | 39 | if __name__ == "__main__": 40 | from langchain_visualizer import visualize, visualize_embeddings 41 | 42 | visualize_embeddings() 43 | visualize(map_rerank_demo) 44 | -------------------------------------------------------------------------------- /tests/chains/langchain_how_to/combine_documents_chains/test_mapreduce.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain.chains.qa_with_sources import load_qa_with_sources_chain 6 | from langchain.llms import OpenAI 7 | from tiktoken_ext.openai_public import p50k_base 8 | 9 | from tests.sotu import load_sotu 10 | 11 | # ========================== Start of langchain example code ========================== 12 | # https://python.langchain.com/en/latest/modules/chains/index_examples/qa_with_sources.html 13 | 14 | 15 | docsearch = load_sotu() 16 | 17 | 18 | @vcr.use_cassette() 19 | async def mapreduce_demo(): 20 | query = "What did the president say about Justice Breyer" 21 | docs = docsearch.similarity_search(query) 22 | chain = load_qa_with_sources_chain( 23 | OpenAI(temperature=0, max_tokens=500), chain_type="map_reduce" 24 | ) 25 | return chain({"input_documents": docs, "question": query}, return_only_outputs=True) 26 | 27 | 28 | # ================================== Execute example ================================== 29 | 30 | 31 | p50k_base() # run this before cassette to download blob first 32 | 33 | 34 | def test_mapreduce_succeeds(): 35 | """Check that the chain can run normally""" 36 | result = asyncio.get_event_loop().run_until_complete(mapreduce_demo()) 37 | assert "The president said" in result["output_text"] 38 | 39 | 40 | if __name__ == "__main__": 41 | from langchain_visualizer import visualize, visualize_embeddings 42 | 43 | visualize_embeddings() 44 | visualize(mapreduce_demo) 45 | -------------------------------------------------------------------------------- /tests/chains/langchain_how_to/combine_documents_chains/test_quickstart.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain.chains.qa_with_sources import load_qa_with_sources_chain 6 | from langchain.llms import OpenAI 7 | 8 | from tests.sotu import load_sotu 9 | 10 | # ========================== Start of langchain example code ========================== 11 | # https://python.langchain.com/en/latest/modules/chains/index_examples/qa_with_sources.html 12 | 13 | 14 | docsearch = load_sotu() 15 | 16 | 17 | @vcr.use_cassette() 18 | async def quickstart_demo(): 19 | query = "What did the president say about Justice Breyer" 20 | docs = docsearch.similarity_search(query) 21 | chain = load_qa_with_sources_chain( 22 | OpenAI(temperature=0, max_tokens=500), chain_type="stuff" 23 | ) 24 | return chain({"input_documents": docs, "question": query}, return_only_outputs=True) 25 | 26 | 27 | # ================================== Execute example ================================== 28 | 29 | 30 | def test_quickstart_succeeds(): 31 | """Check that the chain can run normally""" 32 | result = asyncio.get_event_loop().run_until_complete(quickstart_demo()) 33 | assert ( 34 | "The president thanked" in result["output_text"] 35 | or "The president honored" in result["output_text"] 36 | ) 37 | 38 | 39 | if __name__ == "__main__": 40 | from langchain_visualizer import visualize, visualize_embeddings 41 | 42 | visualize_embeddings() 43 | visualize(quickstart_demo) 44 | -------------------------------------------------------------------------------- /tests/chains/langchain_how_to/combine_documents_chains/test_refine.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain.chains.qa_with_sources import load_qa_with_sources_chain 6 | from langchain.llms import OpenAI 7 | 8 | from tests.sotu import load_sotu 9 | 10 | # ========================== Start of langchain example code ========================== 11 | # https://python.langchain.com/en/latest/modules/chains/index_examples/qa_with_sources.html 12 | 13 | 14 | docsearch = load_sotu() 15 | 16 | 17 | @vcr.use_cassette() 18 | async def refine_demo(): 19 | query = "What did the president say about Justice Breyer" 20 | docs = docsearch.similarity_search(query) 21 | chain = load_qa_with_sources_chain( 22 | OpenAI(temperature=0, max_tokens=500), chain_type="refine" 23 | ) 24 | return chain({"input_documents": docs, "question": query}, return_only_outputs=True) 25 | 26 | 27 | # ================================== Execute example ================================== 28 | 29 | 30 | def test_refine_succeeds(): 31 | """Check that the chain can run normally""" 32 | result = asyncio.get_event_loop().run_until_complete(refine_demo()) 33 | assert "The president said that he wanted" in result["output_text"] 34 | 35 | 36 | if __name__ == "__main__": 37 | from langchain_visualizer import visualize, visualize_embeddings 38 | 39 | visualize_embeddings() 40 | visualize(refine_demo) 41 | -------------------------------------------------------------------------------- /tests/chains/langchain_how_to/sequential_chain_demo.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"prompt": ["You are a playwright. Given the title of play and the era 4 | it is set in, it is your job to write a synopsis for that title.\n\nTitle: Tragedy 5 | at sunset on the beach\nEra: Victorian England\nPlaywright: This is a synopsis 6 | for the above play:"], "model": "text-davinci-003", "temperature": 0.0, "top_p": 7 | 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, 8 | "max_tokens": 256}' 9 | headers: {} 10 | method: POST 11 | uri: https://api.openai.com/v1/completions 12 | response: 13 | body: 14 | string: !!binary | 15 | H4sIAAAAAAAAA1xUTW/kNgy951cQPifOZJIm7dxaYFHsYotisWl76BYBR6ItdSTRlWg77mL/e0HZ 16 | +erFB/PjPT4+6usZQDNjTj71zQGae+cLRLYUYKJcPCfwBSwNmQwK2RZ+8X1GIThSx5ngA6YR8wI3 17 | 57Df7W9AGHBib8H6ksdBtAN3UChP3lALHwlzgqilTmQoh8vLIaB0nGPLAyX0reF4admUyydYz6k0 18 | 58rUWyVp4hAuvv8k8+O/16fhV3M6fvh0e5evbq//+Djf/Pxb3K3ZfPybjGiF0KM8GI5DIO22hk0m 19 | Hak5wNXd7upmv9//cFUDdf6nsguLk0/GX+x211udY2+oNAf48wwA4Gv9wpquZV/Sl3SfsSe7AAp8 20 | HlMhAU4gjuAnQuNUVIQh4AIa8gl+90Y4e0zwLvUBk23hvYBQCKWWFeG8qJIIC4+ph5kjpnN4F31Y 21 | zmF2rD2L5LHvg0+9LiLiiYCSLRBpRUEoEUMAw1gEAwjPqYXPjmDmfCqASiuit9BxBoSZMIhboMMV 22 | 5jgKOMpgM2EsK50jiVCG4DsCzAQWiyMLs6MExZEawbCaCcThWk5xCLxQVso+TRwmshs9g8li4LEA 23 | dh363KqYdcjViEI5+kT2eb63BJS2o1wodC9sh4CpVG7iZszymp0qo0M7TLZwVKEzpp7y076OdV+c 24 | CGgiPZMW7h2BzAz/jN6cgqoTgtIPPNGKKo58BofD4BOVUlfjOMtF8NMT+jrU/9RZV70xUJMYGTGE 25 | RddC8ai0upr0djN1lHWFHa+aKclqsI5D4LlseFhq7mYUKqqj0bmFQbUtMHtxFULyKA4w2VXoQqmQ 26 | wrtN7BZ+3Lw5JrVxeSPZZk2IYxGwZLwlnVu0WhiK4PICFXHdhipYgGtGIJwInI+VwjDmMtIr87Xw 27 | fkWjZN9gba5whFmOmfCk16AE6mtWVZ693oBTlZ+nUeFootzWI6/37JOlx+YAu+c/gfsh81FvP40h 28 | PP/vfPLFPWTCwkmfgCI8NDX67Qzgr/pujAV7ag7be9EMmeMgD8InStrwdr+2a17eqZfgfvfdFhUW 29 | DK8Ct3dnCvLt7D8AAAD//wMAh1WPYssFAAA= 30 | headers: 31 | CF-Cache-Status: 32 | - DYNAMIC 33 | CF-RAY: 34 | - 82ea3dca4b3eef28-PDX 35 | Cache-Control: 36 | - no-cache, must-revalidate 37 | Connection: 38 | - keep-alive 39 | Content-Encoding: 40 | - gzip 41 | Content-Type: 42 | - application/json 43 | Date: 44 | - Fri, 01 Dec 2023 09:18:15 GMT 45 | Transfer-Encoding: 46 | - chunked 47 | openai-model: 48 | - text-davinci-003 49 | openai-processing-ms: 50 | - '3974' 51 | x-ratelimit-limit-tokens_usage_based: 52 | - '250000' 53 | x-ratelimit-remaining-tokens_usage_based: 54 | - '249744' 55 | x-ratelimit-reset-tokens_usage_based: 56 | - 61ms 57 | status: 58 | code: 200 59 | message: OK 60 | - request: 61 | body: '{"prompt": ["You are a play critic from the New York Times. Given the synopsis 62 | of play, it is your job to write a review for that play.\n\nPlay Synopsis:\n\n\nTragedy 63 | at Sunset on the Beach is a play set in Victorian England. It tells the story 64 | of a young woman, Emily, who is struggling to make ends meet in a small coastal 65 | town. She works as a maid for a wealthy family, but her dreams of a better life 66 | are dashed when she discovers that her employer is involved in a scandalous 67 | affair.\n\nEmily is determined to make a better life for herself, but her plans 68 | are thwarted when she meets a handsome stranger on the beach one evening. The 69 | two quickly fall in love, but their happiness is short-lived when Emily discovers 70 | that the stranger is actually a member of the wealthy family she works for.\n\nThe 71 | play follows Emily as she struggles to come to terms with the truth and make 72 | sense of her life. As the sun sets on the beach, Emily must decide whether to 73 | stay with the man she loves or to leave him and pursue her dreams. In the end, 74 | Emily must make a heartbreaking decision that will change her life forever.\nReview 75 | from a New York Times play critic of the above play:"], "model": "text-davinci-003", 76 | "temperature": 0.0, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 77 | 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 78 | headers: {} 79 | method: POST 80 | uri: https://api.openai.com/v1/completions 81 | response: 82 | body: 83 | string: !!binary | 84 | H4sIAAAAAAAAA4yUTY/bRgyG7/4VhC692Fqv6yCJjy2CIm3TDyRogTbFgp6hpOmOSGFISasE+e/F 85 | jOw1cutFh+HwJefhS33eAFQzJg7cVieoPnRBoRdPESZKGoQhKHgaEjk08jW8C21CIzhTI4ngR+QR 86 | 0wLHLRz2hyOYAE4SPPigaRwsK0gDSmkKjmr4mTAx9Dm1Mxv0dHc3RLRGUl/LQIyhdtLfeXF6dy0b 87 | hLXa5k6Dz026foi7V7/b0704iz/8MrT+p7fdJ/mLvp/bl/63d8N6W87/krOcYfRkD076IVJWW8Mu 88 | UX5SdYL7l/v74+FweP2iBMr7r2k7j1NgF3b7/beXvE6CI61O8PcGAOBz+cJ6Pad95I/8IWFLfgE0 89 | eD+ykoEwWEfwHaHrMlQEh4OFCS1wC8geOsJk50T4mE/UJC2ZXZSJSjyKag3vySAw/BGcSQrI8Ibb 90 | iOy3RX6IuEAjMcqs8KYPcdkCwiIjtzBLjwxqaWzbmEuYQI+PBMReoadVGEF7jBGcoBpGMJm5XqW+ 91 | UfCJsNfcFsKZzChBDA0BJgKP2pGHuSMG7ShbwEm2EXSUgPohykIpvz3wJHEif6nnkD1GGRWwaTCk 92 | Gv68auSuMqsO2av0lNtHbildeZ4LT2ECmijbeLv2Ck3Ir+ooKcVmLeRD0wQ3RgMNNhZj1WVYV3BB 93 | s95ooRljXGBOwYy4wC9wKWWjIjvS8mIZTQ3ZB25ryCqYh6JwTgVvR+A6TOgsQzBZUc3BuhwLCXjM 94 | Wv42/YaifVVmlc3+8aShLeuIUQVCPyRSDRNtnxUvMJSseGpIMgVf3AVqI2c6cEb36JMM0EgqOcVn 95 | BcOvEyWMcQv/x7yDzJSaMZbee5lKwYhLDW9tvfGVgbfFvdtnlLdRXNYJZoJ+VFsdGbiwuonl0E6J 96 | StvISx54FCmbsh4B9ZIninlyGXt7AWudjG1nu4xjTbg4JfdmHaElqstqly0O7OmpOsH++SRKOyQ5 97 | 543nMcbn8yZw0O4hEapwXnw1GaoS/bIB+Kf8LUbFlqrT5S9RDUn6wR5MHomz4OHFcdWrbr+nW/T+ 98 | 9TVqYhhvgePx1SZX+bL5DwAA//8DAKFpirrCBQAA 99 | headers: 100 | CF-Cache-Status: 101 | - DYNAMIC 102 | CF-RAY: 103 | - 82ea3de3bcafef28-PDX 104 | Cache-Control: 105 | - no-cache, must-revalidate 106 | Connection: 107 | - keep-alive 108 | Content-Encoding: 109 | - gzip 110 | Content-Type: 111 | - application/json 112 | Date: 113 | - Fri, 01 Dec 2023 09:18:19 GMT 114 | Transfer-Encoding: 115 | - chunked 116 | openai-model: 117 | - text-davinci-003 118 | openai-processing-ms: 119 | - '3690' 120 | x-ratelimit-limit-tokens_usage_based: 121 | - '250000' 122 | x-ratelimit-remaining-tokens_usage_based: 123 | - '249744' 124 | x-ratelimit-reset-tokens_usage_based: 125 | - 61ms 126 | status: 127 | code: 200 128 | message: OK 129 | version: 1 130 | -------------------------------------------------------------------------------- /tests/chains/langchain_how_to/simple_sequential_chain_demo.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"prompt": ["You are a playwright. Given the title of play, it is your 4 | job to write a synopsis for that title.\n\nTitle: Tragedy at sunset on the beach\nPlaywright: 5 | This is a synopsis for the above play:"], "model": "text-davinci-003", "temperature": 6 | 0.0, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": 7 | {}, "max_tokens": 256}' 8 | headers: {} 9 | method: POST 10 | uri: https://api.openai.com/v1/completions 11 | response: 12 | body: 13 | string: !!binary | 14 | H4sIAAAAAAAAA6xUTW8bNxC961cM9tLLei3LChLrlt5stAVSu0iBpjBG5OySEZekObOSF0H+ezBc 15 | f7W99iIBO3zz3rxHzrcVQHPCEn0cmh00d84zjMlSgCMV9imCZ7CUCxkUsh386oeCQrCnPhWCG4wT 16 | lhm2LWzWmy1IAjwmb8F6LlMW7ZB6YCpHb6iDXwhLhFGhTiTz7vw8B5Q+lbFLmSL6zqTx3CbD58+0 17 | PkVuWlXqrYo0Yw5nHz7J47s/fnswvx8u/szvps+zT9efwunhfb7pl9Np/5WMKELoUe5NGnMg7baU 18 | TSEdqdnBxfv1xXaz2Vxd1UKd/xl2ZvHoo/Fn6/XlE84lb4ibHfy1AgD4Vn9hOa6wL/FLvCs4kJ0B 19 | BW6nyCSQIogj+JnQODUVgSWVWd0J6UgthMTcAkYLhSyN1bsOrgX6FEI6cYW/YOSUYE5THCq6cAs3 20 | aA4VfuNDaOHkEoy0ECPsKy0KcFXTwZ2jGR4mbw5hhh5DAB9rq9oiB4ya5YilzC3sJ1F2X2qBAQsB 21 | OxShQhZOjuLC7hmk4OANhjDDwYdAVvsiGCyAxnhLUTpQh1SlAgL1ApaOyKJxVHqWMg1DIFYNJmWC 22 | kxcHjgoMxVPfwa0j6H20DJwCGlIW9UdDxlgd0tN7YoG+eIq2hVss6BZjHIXM9UQlGEn/hcrIr0ya 23 | RwcfufoJexp8rHocYWiBHekdN9V8EIeyWODQarRkCglkZFlqjmrlQFnAeWspQl/SqET/tMOSyvCR 24 | rHJNsRLU0aRM4qo9NO6xHHhJ9muaSqR5eWWhP3sWNXfwMaQ4VOwJ50Wy3gi9e+PMQsWnidVsjAOV 25 | /xozRUuFRSnfCNinaZn1J64DdvC5Wua5olt4HgX3ofqqOYEJiafydLsIdRfo2NdLbDWff+NGvY0p 26 | vknE90uD2tFhzj4SM+CAPnbwP725JbCTigmE+iAm6yka4qpouaE+cvaFbFd3Qn3+Plp6bHawfvkS 27 | 0pBL2uuqiFMIL997Hz27+0LIKerGYEm5qdXvK4C/65qZGAdqdk/rpckljVnuJR0oasPt1dKueV1r 28 | r8XN5fapKkkwvCl8uFwpyffVDwAAAP//AwAEiy84+gUAAA== 29 | headers: 30 | CF-Cache-Status: 31 | - DYNAMIC 32 | CF-RAY: 33 | - 82ea3dfbeda0ef1c-PDX 34 | Cache-Control: 35 | - no-cache, must-revalidate 36 | Connection: 37 | - keep-alive 38 | Content-Encoding: 39 | - gzip 40 | Content-Type: 41 | - application/json 42 | Date: 43 | - Fri, 01 Dec 2023 09:18:23 GMT 44 | Transfer-Encoding: 45 | - chunked 46 | openai-model: 47 | - text-davinci-003 48 | openai-processing-ms: 49 | - '3851' 50 | x-ratelimit-limit-tokens_usage_based: 51 | - '250000' 52 | x-ratelimit-remaining-tokens_usage_based: 53 | - '249744' 54 | x-ratelimit-reset-tokens_usage_based: 55 | - 61ms 56 | status: 57 | code: 200 58 | message: OK 59 | - request: 60 | body: '{"prompt": ["You are a play critic from the New York Times. Given the synopsis 61 | of play, it is your job to write a review for that play.\n\nPlay Synopsis:\n\n\nTragedy 62 | at Sunset on the Beach is a story of love, loss, and redemption. It follows 63 | the story of two young lovers, Jack and Jill, who meet on a beach at sunset. 64 | They quickly fall in love and plan to marry, but their plans are shattered when 65 | Jack is tragically killed in a car accident. \n\nJill is left devastated and 66 | struggles to cope with her grief. She finds solace in the company of her best 67 | friend, Sarah, who helps her to come to terms with her loss. As Jill begins 68 | to heal, she discovers that Jack had a secret past that he had kept hidden from 69 | her. \n\nJill is determined to uncover the truth and embarks on a journey of 70 | self-discovery. Along the way, she meets a mysterious stranger who helps her 71 | to understand the truth about Jack''s past. With his help, Jill is able to find 72 | closure and peace. \n\nIn the end, Jill is able to move on with her life and 73 | find happiness again. Tragedy at Sunset on the Beach is a story of love, loss, 74 | and redemption that will leave audiences moved and inspired.\nReview from a 75 | New York Times play critic of the above play:"], "model": "text-davinci-003", 76 | "temperature": 0.0, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 77 | 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 78 | headers: {} 79 | method: POST 80 | uri: https://api.openai.com/v1/completions 81 | response: 82 | body: 83 | string: !!binary | 84 | H4sIAAAAAAAAA4xUQW/sRAy+91dYubzL7na7W+m1e0Q9QAEBogIkHqq8M04y7WQ82E7S5en9dzST 85 | bgucuERJbH/+bH/25wuAZkZJIXXNAZqHPigM7CnCRKKBEwQFT1nIoZHfwPehEzSCI7UsBPeYRpQT 86 | XK9gt91dgzHgxMGDDypjtoLALSjJFBxt4DtCSTCU0N4s6+HyMke0lmXYcKaEYeN4uPTs9PKcNnDS 87 | ZlWYBl9IuiHH9c1P9nL79NuP+7vb7ts/21/Hv/Rud3u33+6/Pv6yePPxiZyVCKMXe3Q85EgFbTE7 88 | oVJSc4Crj9ur691uv91XQ63/HLb2OIXkwnq73b/G9RwcaXOA3y8AAD7XJyzuJexT+pQeBDvyJ0CD 89 | n8ekZMAJrCf4itD1pakImWeSdoyAycPAU0gdqLGcSsciT7SCyKqrahfyNNR+buChJ8gRT9ByjDxr 90 | BX6LtJnhxGPqKoboCu7RPVeQ+xDjCuaetQIkhZYFENrRRiEw7sh6EsDyIdgFhzGewI0G2rMYzD2l 91 | BS4oPIcYyUNIgOBQAJ0LnpL9g6AV/zPLkv2DwhOPkui0yCK2ax/UFaInQAWtlcjYdZG0qMnxUIiB 92 | kQwKc7AeCsNOArW1pjHV6NoDk9F6wCOPVll+UMiotqkTOXMKCkfC0UI7luJmCWaUKlbBcD0KOiPR 93 | 2oaZYlx7mihyJl+9KHXYhdQtdaIzFgXPgDBz8stIn/hYCjxKSMX1v8jGEENLFc5xmuh0dqKBq+Dr 94 | JM9jXTIVFXnS0NWlxKgMYchCqqFopfamhByrxJTMCmgWnoIvbwiZpCVncET37IVzHf97ltKmHyYS 95 | LCr5Pwo2Hl1foVPRgeYg7yK2Hg3mECNEwokARx8oOSoHZnpt5ZhjaOtd+cYWyGFUWyvRosx04kQQ 96 | mZ8L7iLWf63Na7swFmpoUhQL9JJJaq5NXdm6nSF5emkOsH37E7nLwseyyWmM8e1/G1LQ/lEIlVNZ 97 | aDXOTbV+uQD4o16BUbGj5vC6/U0WHrI9Gj9TKoC7m/2C17yfnXfr1c3tq9XYML4brj/uLkqWLxd/ 98 | AwAA//8DAE/hMsiaBQAA 99 | headers: 100 | CF-Cache-Status: 101 | - DYNAMIC 102 | CF-RAY: 103 | - 82ea3e149fa0ef1c-PDX 104 | Cache-Control: 105 | - no-cache, must-revalidate 106 | Connection: 107 | - keep-alive 108 | Content-Encoding: 109 | - gzip 110 | Content-Type: 111 | - application/json 112 | Date: 113 | - Fri, 01 Dec 2023 09:18:27 GMT 114 | Transfer-Encoding: 115 | - chunked 116 | openai-model: 117 | - text-davinci-003 118 | openai-processing-ms: 119 | - '3942' 120 | x-ratelimit-limit-tokens_usage_based: 121 | - '250000' 122 | x-ratelimit-remaining-tokens_usage_based: 123 | - '249744' 124 | x-ratelimit-reset-tokens_usage_based: 125 | - 61ms 126 | status: 127 | code: 200 128 | message: OK 129 | version: 1 130 | -------------------------------------------------------------------------------- /tests/chains/langchain_how_to/test_async.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain.chains import LLMChain 6 | from langchain.llms import OpenAI 7 | from langchain.prompts import PromptTemplate 8 | 9 | # ========================== Start of langchain example code ========================== 10 | # https://python.langchain.com/docs/modules/chains/how_to/async_chain 11 | 12 | 13 | async def async_generate(chain: LLMChain): 14 | resp = await chain.arun(product="toothpaste") 15 | return resp.strip() 16 | 17 | 18 | async def generate_concurrently(): 19 | llm = OpenAI(temperature=0.9) 20 | prompt = PromptTemplate( 21 | input_variables=["product"], 22 | template="What is a good name for a company that makes {product}?", 23 | ) 24 | chain = LLMChain(llm=llm, prompt=prompt) 25 | tasks = [async_generate(chain) for _ in range(5)] 26 | return await asyncio.gather(*tasks) 27 | 28 | 29 | @vcr.use_cassette() 30 | async def test_async_api_demo(): 31 | return await generate_concurrently() 32 | 33 | 34 | # ================================== Execute example ================================== 35 | 36 | 37 | def test_llm_usage_succeeds(): 38 | """Check that the chain can run normally""" 39 | result = asyncio.get_event_loop().run_until_complete(test_async_api_demo()) 40 | assert len(result) == 5 41 | 42 | 43 | if __name__ == "__main__": 44 | from langchain_visualizer import visualize 45 | 46 | visualize(test_async_api_demo) 47 | -------------------------------------------------------------------------------- /tests/chains/langchain_how_to/test_async_api_demo.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"prompt": ["What is a good name for a company that makes toothpaste?"], 4 | "model": "text-davinci-003", "temperature": 0.9, "top_p": 1, "frequency_penalty": 5 | 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 6 | headers: {} 7 | method: post 8 | uri: https://api.openai.com/v1/completions 9 | response: 10 | body: 11 | string: "{\n \"warning\": \"This model version is deprecated. Migrate before 12 | January 4, 2024 to avoid disruption of service. Learn more https://platform.openai.com/docs/deprecations\",\n 13 | \ \"id\": \"cmpl-8Qtwxo1yTJPEMgUAZfuJgLMUbStZk\",\n \"object\": \"text_completion\",\n 14 | \ \"created\": 1701422291,\n \"model\": \"text-davinci-003\",\n \"choices\": 15 | [\n {\n \"text\": \"\\n\\nBrushing Bright Co.\",\n \"index\": 16 | 0,\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n 17 | \ ],\n \"usage\": {\n \"prompt_tokens\": 13,\n \"completion_tokens\": 18 | 7,\n \"total_tokens\": 20\n }\n}\n" 19 | headers: 20 | Access-Control-Allow-Origin: 21 | - '*' 22 | CF-Cache-Status: 23 | - DYNAMIC 24 | CF-RAY: 25 | - 82ea3dc6fcc35ece-PDX 26 | Cache-Control: 27 | - no-cache, must-revalidate 28 | Connection: 29 | - keep-alive 30 | Content-Encoding: 31 | - gzip 32 | Content-Type: 33 | - application/json 34 | Date: 35 | - Fri, 01 Dec 2023 09:18:11 GMT 36 | Transfer-Encoding: 37 | - chunked 38 | openai-model: 39 | - text-davinci-003 40 | openai-processing-ms: 41 | - '230' 42 | x-ratelimit-limit-tokens_usage_based: 43 | - '250000' 44 | x-ratelimit-remaining-tokens_usage_based: 45 | - '249499' 46 | x-ratelimit-reset-tokens_usage_based: 47 | - 120ms 48 | status: 49 | code: 200 50 | message: OK 51 | url: https://api.openai.com/v1/completions 52 | - request: 53 | body: '{"prompt": ["What is a good name for a company that makes toothpaste?"], 54 | "model": "text-davinci-003", "temperature": 0.9, "top_p": 1, "frequency_penalty": 55 | 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 56 | headers: {} 57 | method: post 58 | uri: https://api.openai.com/v1/completions 59 | response: 60 | body: 61 | string: "{\n \"warning\": \"This model version is deprecated. Migrate before 62 | January 4, 2024 to avoid disruption of service. Learn more https://platform.openai.com/docs/deprecations\",\n 63 | \ \"id\": \"cmpl-8QtwxaFMYOs8Ow5UeTKZypTfck6sc\",\n \"object\": \"text_completion\",\n 64 | \ \"created\": 1701422291,\n \"model\": \"text-davinci-003\",\n \"choices\": 65 | [\n {\n \"text\": \"\\n\\nBrushBright Toothpaste Company\",\n \"index\": 66 | 0,\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n 67 | \ ],\n \"usage\": {\n \"prompt_tokens\": 13,\n \"completion_tokens\": 68 | 8,\n \"total_tokens\": 21\n }\n}\n" 69 | headers: 70 | Access-Control-Allow-Origin: 71 | - '*' 72 | CF-Cache-Status: 73 | - DYNAMIC 74 | CF-RAY: 75 | - 82ea3dc6fa4aef47-PDX 76 | Cache-Control: 77 | - no-cache, must-revalidate 78 | Connection: 79 | - keep-alive 80 | Content-Encoding: 81 | - gzip 82 | Content-Type: 83 | - application/json 84 | Date: 85 | - Fri, 01 Dec 2023 09:18:11 GMT 86 | Transfer-Encoding: 87 | - chunked 88 | openai-model: 89 | - text-davinci-003 90 | openai-processing-ms: 91 | - '243' 92 | x-ratelimit-limit-tokens_usage_based: 93 | - '250000' 94 | x-ratelimit-remaining-tokens_usage_based: 95 | - '249744' 96 | x-ratelimit-reset-tokens_usage_based: 97 | - 61ms 98 | status: 99 | code: 200 100 | message: OK 101 | url: https://api.openai.com/v1/completions 102 | - request: 103 | body: '{"prompt": ["What is a good name for a company that makes toothpaste?"], 104 | "model": "text-davinci-003", "temperature": 0.9, "top_p": 1, "frequency_penalty": 105 | 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 106 | headers: {} 107 | method: post 108 | uri: https://api.openai.com/v1/completions 109 | response: 110 | body: 111 | string: "{\n \"warning\": \"This model version is deprecated. Migrate before 112 | January 4, 2024 to avoid disruption of service. Learn more https://platform.openai.com/docs/deprecations\",\n 113 | \ \"id\": \"cmpl-8QtwxTFo12HPlRzEqtefDc1Sk467c\",\n \"object\": \"text_completion\",\n 114 | \ \"created\": 1701422291,\n \"model\": \"text-davinci-003\",\n \"choices\": 115 | [\n {\n \"text\": \"\\n\\nBrushFresh Toothpaste Co.\",\n \"index\": 116 | 0,\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n 117 | \ ],\n \"usage\": {\n \"prompt_tokens\": 13,\n \"completion_tokens\": 118 | 9,\n \"total_tokens\": 22\n }\n}\n" 119 | headers: 120 | Access-Control-Allow-Origin: 121 | - '*' 122 | CF-Cache-Status: 123 | - DYNAMIC 124 | CF-RAY: 125 | - 82ea3dc70b72ef7b-PDX 126 | Cache-Control: 127 | - no-cache, must-revalidate 128 | Connection: 129 | - keep-alive 130 | Content-Encoding: 131 | - gzip 132 | Content-Type: 133 | - application/json 134 | Date: 135 | - Fri, 01 Dec 2023 09:18:11 GMT 136 | Transfer-Encoding: 137 | - chunked 138 | openai-model: 139 | - text-davinci-003 140 | openai-processing-ms: 141 | - '276' 142 | x-ratelimit-limit-tokens_usage_based: 143 | - '250000' 144 | x-ratelimit-remaining-tokens_usage_based: 145 | - '248766' 146 | x-ratelimit-reset-tokens_usage_based: 147 | - 295ms 148 | status: 149 | code: 200 150 | message: OK 151 | url: https://api.openai.com/v1/completions 152 | - request: 153 | body: '{"prompt": ["What is a good name for a company that makes toothpaste?"], 154 | "model": "text-davinci-003", "temperature": 0.9, "top_p": 1, "frequency_penalty": 155 | 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 156 | headers: {} 157 | method: post 158 | uri: https://api.openai.com/v1/completions 159 | response: 160 | body: 161 | string: "{\n \"warning\": \"This model version is deprecated. Migrate before 162 | January 4, 2024 to avoid disruption of service. Learn more https://platform.openai.com/docs/deprecations\",\n 163 | \ \"id\": \"cmpl-8Qtwxz1zJwYC5YT2KhrMm6q1O2Bgi\",\n \"object\": \"text_completion\",\n 164 | \ \"created\": 1701422291,\n \"model\": \"text-davinci-003\",\n \"choices\": 165 | [\n {\n \"text\": \"\\n\\nDentyFresh Toothpaste Co.\",\n \"index\": 166 | 0,\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n 167 | \ ],\n \"usage\": {\n \"prompt_tokens\": 13,\n \"completion_tokens\": 168 | 9,\n \"total_tokens\": 22\n }\n}\n" 169 | headers: 170 | Access-Control-Allow-Origin: 171 | - '*' 172 | CF-Cache-Status: 173 | - DYNAMIC 174 | CF-RAY: 175 | - 82ea3dc70a63ef24-PDX 176 | Cache-Control: 177 | - no-cache, must-revalidate 178 | Connection: 179 | - keep-alive 180 | Content-Encoding: 181 | - gzip 182 | Content-Type: 183 | - application/json 184 | Date: 185 | - Fri, 01 Dec 2023 09:18:11 GMT 186 | Transfer-Encoding: 187 | - chunked 188 | openai-model: 189 | - text-davinci-003 190 | openai-processing-ms: 191 | - '300' 192 | x-ratelimit-limit-tokens_usage_based: 193 | - '250000' 194 | x-ratelimit-remaining-tokens_usage_based: 195 | - '249243' 196 | x-ratelimit-reset-tokens_usage_based: 197 | - 181ms 198 | status: 199 | code: 200 200 | message: OK 201 | url: https://api.openai.com/v1/completions 202 | - request: 203 | body: '{"prompt": ["What is a good name for a company that makes toothpaste?"], 204 | "model": "text-davinci-003", "temperature": 0.9, "top_p": 1, "frequency_penalty": 205 | 0, "presence_penalty": 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 206 | headers: {} 207 | method: post 208 | uri: https://api.openai.com/v1/completions 209 | response: 210 | body: 211 | string: "{\n \"warning\": \"This model version is deprecated. Migrate before 212 | January 4, 2024 to avoid disruption of service. Learn more https://platform.openai.com/docs/deprecations\",\n 213 | \ \"id\": \"cmpl-8Qtwx8ivqOPMa0h3N1jXcJ9k22qga\",\n \"object\": \"text_completion\",\n 214 | \ \"created\": 1701422291,\n \"model\": \"text-davinci-003\",\n \"choices\": 215 | [\n {\n \"text\": \"\\n\\nSmile-Lite Toothpaste Co.\",\n \"index\": 216 | 0,\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n 217 | \ ],\n \"usage\": {\n \"prompt_tokens\": 13,\n \"completion_tokens\": 218 | 11,\n \"total_tokens\": 24\n }\n}\n" 219 | headers: 220 | Access-Control-Allow-Origin: 221 | - '*' 222 | CF-Cache-Status: 223 | - DYNAMIC 224 | CF-RAY: 225 | - 82ea3dc6f9dd8e69-PDX 226 | Cache-Control: 227 | - no-cache, must-revalidate 228 | Connection: 229 | - keep-alive 230 | Content-Encoding: 231 | - gzip 232 | Content-Type: 233 | - application/json 234 | Date: 235 | - Fri, 01 Dec 2023 09:18:11 GMT 236 | Transfer-Encoding: 237 | - chunked 238 | openai-model: 239 | - text-davinci-003 240 | openai-processing-ms: 241 | - '346' 242 | x-ratelimit-limit-tokens_usage_based: 243 | - '250000' 244 | x-ratelimit-remaining-tokens_usage_based: 245 | - '248998' 246 | x-ratelimit-reset-tokens_usage_based: 247 | - 240ms 248 | status: 249 | code: 200 250 | message: OK 251 | url: https://api.openai.com/v1/completions 252 | version: 1 253 | -------------------------------------------------------------------------------- /tests/chains/langchain_how_to/test_sequential_chain.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain import PromptTemplate 6 | from langchain.chains import LLMChain, SequentialChain 7 | from langchain.llms import OpenAI 8 | 9 | # ========================== Start of langchain example code ========================== 10 | # https://langchain.readthedocs.io/en/latest/modules/chains/generic/sequential_chains.html 11 | 12 | # This is an LLMChain to write a synopsis given a title of a play and the era it is 13 | # set in. 14 | llm = OpenAI(temperature=0) 15 | template = """ 16 | You are a playwright. Given the title of play and the era it is set in, it is your job to write a synopsis for that title. 17 | 18 | Title: {title} 19 | Era: {era} 20 | Playwright: This is a synopsis for the above play: 21 | """.strip() # noqa 22 | prompt_template = PromptTemplate(input_variables=["title", "era"], template=template) 23 | synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, output_key="synopsis") 24 | 25 | # This is an LLMChain to write a review of a play given a synopsis. 26 | llm = OpenAI(temperature=0) 27 | template = """ 28 | You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play. 29 | 30 | Play Synopsis: 31 | {synopsis} 32 | Review from a New York Times play critic of the above play: 33 | """.strip() # noqa 34 | prompt_template = PromptTemplate(input_variables=["synopsis"], template=template) 35 | review_chain = LLMChain(llm=llm, prompt=prompt_template, output_key="review") 36 | 37 | # This is the overall chain where we run these two chains in sequence. 38 | overall_chain = SequentialChain( 39 | chains=[synopsis_chain, review_chain], 40 | input_variables=["era", "title"], 41 | # Here we return multiple variables 42 | output_variables=["synopsis", "review"], 43 | verbose=True, 44 | ) 45 | 46 | 47 | # ================================== Execute example ================================== 48 | 49 | 50 | @vcr.use_cassette() 51 | async def sequential_chain_demo(): 52 | return overall_chain( 53 | {"title": "Tragedy at sunset on the beach", "era": "Victorian England"} 54 | ) 55 | 56 | 57 | def test_llm_usage_succeeds(): 58 | """Check that the chain can run normally""" 59 | result = asyncio.get_event_loop().run_until_complete(sequential_chain_demo()) 60 | assert ( 61 | result["synopsis"] 62 | .strip() 63 | .startswith( 64 | "Tragedy at Sunset on the Beach is a play set in Victorian England." 65 | ) 66 | ) 67 | assert ( 68 | result["review"] 69 | .strip() 70 | .startswith("Tragedy at Sunset on the Beach is a captivating and heartbreaking") 71 | ) 72 | 73 | 74 | if __name__ == "__main__": 75 | from langchain_visualizer import visualize 76 | 77 | visualize(sequential_chain_demo) 78 | -------------------------------------------------------------------------------- /tests/chains/langchain_how_to/test_simple_sequential_chain.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain import PromptTemplate 6 | from langchain.chains import LLMChain, SimpleSequentialChain 7 | from langchain.llms import OpenAI 8 | 9 | # ========================== Start of langchain example code ========================== 10 | # https://langchain.readthedocs.io/en/latest/modules/chains/generic/sequential_chains.html 11 | 12 | # This is an LLMChain to write a synopsis given a title of a play. 13 | llm = OpenAI(temperature=0) 14 | template = """ 15 | You are a playwright. Given the title of play, it is your job to write a synopsis for that title. 16 | 17 | Title: {title} 18 | Playwright: This is a synopsis for the above play: 19 | """.strip() # noqa 20 | prompt_template = PromptTemplate(input_variables=["title"], template=template) 21 | synopsis_chain = LLMChain(llm=llm, prompt=prompt_template) 22 | 23 | # This is an LLMChain to write a review of a play given a synopsis. 24 | llm = OpenAI(temperature=0) 25 | template = """ 26 | You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play. 27 | 28 | Play Synopsis: 29 | {synopsis} 30 | Review from a New York Times play critic of the above play: 31 | """.strip() # noqa 32 | prompt_template = PromptTemplate(input_variables=["synopsis"], template=template) 33 | review_chain = LLMChain(llm=llm, prompt=prompt_template) 34 | 35 | # This is the overall chain where we run these two chains in sequence. 36 | overall_chain = SimpleSequentialChain( 37 | chains=[synopsis_chain, review_chain], verbose=True 38 | ) 39 | 40 | 41 | # ================================== Execute example ================================== 42 | 43 | 44 | @vcr.use_cassette() 45 | async def simple_sequential_chain_demo(): 46 | return overall_chain.run("Tragedy at sunset on the beach") 47 | 48 | 49 | def test_llm_usage_succeeds(): 50 | """Check that the chain can run normally""" 51 | result = asyncio.get_event_loop().run_until_complete(simple_sequential_chain_demo()) 52 | assert result.strip().startswith( 53 | "Tragedy at Sunset on the Beach is a powerful and moving story of love" 54 | ) 55 | 56 | 57 | if __name__ == "__main__": 58 | from langchain_visualizer import visualize 59 | 60 | visualize(simple_sequential_chain_demo) 61 | -------------------------------------------------------------------------------- /tests/chains/langchain_how_to/utility_chains/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/tests/chains/langchain_how_to/utility_chains/__init__.py -------------------------------------------------------------------------------- /tests/chains/langchain_how_to/utility_chains/llm_checker_chain.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain.chains import LLMCheckerChain 6 | from langchain.llms import OpenAI 7 | 8 | # ========================== Start of langchain example code ========================== 9 | # https://langchain.readthedocs.io/en/latest/modules/chains/examples/llm_checker.html 10 | 11 | 12 | @vcr.use_cassette() 13 | async def checker_chain_demo(): 14 | llm = OpenAI(temperature=0.7) 15 | text = "What type of mammal lays the biggest eggs?" 16 | checker_chain = LLMCheckerChain(llm=llm, verbose=True) 17 | checker_chain.run(text) 18 | 19 | 20 | # ================================== Execute example ================================== 21 | 22 | 23 | def test_llm_usage_succeeds(): 24 | """Check that the chain can run normally""" 25 | result = asyncio.get_event_loop().run_until_complete(checker_chain_demo()) 26 | assert "The Southern Elephant Seal" in result 27 | 28 | 29 | if __name__ == "__main__": 30 | from langchain_visualizer import visualize 31 | 32 | visualize(checker_chain_demo) 33 | -------------------------------------------------------------------------------- /tests/chains/test_bash_chain.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain.llms import OpenAI 6 | from langchain_experimental.llm_bash.base import LLMBashChain 7 | 8 | llm = OpenAI(temperature=0) 9 | chain = LLMBashChain.from_llm(llm=llm) 10 | 11 | 12 | # ================================== Execute example ================================== 13 | 14 | 15 | @vcr.use_cassette() 16 | async def bash_chain_demo(): 17 | return chain("What files are in my current directory?") 18 | 19 | 20 | def test_bash_usage_succeeds(): 21 | """Check that the chain can run normally""" 22 | result = asyncio.get_event_loop().run_until_complete(bash_chain_demo()) 23 | assert "langchain_visualizer" in result["answer"] 24 | 25 | 26 | if __name__ == "__main__": 27 | from langchain_visualizer import visualize 28 | 29 | visualize(bash_chain_demo) 30 | -------------------------------------------------------------------------------- /tests/demo.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "e9d8f40d-0f6a-4d5a-ba9d-f78a8666fd3f", 6 | "metadata": {}, 7 | "source": [ 8 | "# Jupyter notebook support\n", 9 | "\n", 10 | "This demonstrates usage of the visualizer inside a Jupyter notebook. **Make sure to import the `visualize` function from `langchain_visualizer.jupyter` instead.**\n", 11 | "\n", 12 | "As noted in the example, you can customize the width and height of the displayed window if you so wish to." 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 1, 18 | "id": "3192e422-2174-4129-a0a7-40a03c068136", 19 | "metadata": { 20 | "tags": [] 21 | }, 22 | "outputs": [ 23 | { 24 | "name": "stderr", 25 | "output_type": "stream", 26 | "text": [ 27 | "/home/amos/.cache/pypoetry/virtualenvs/langchain-visualizer-LErXDBzh-py3.11/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", 28 | " from .autonotebook import tqdm as notebook_tqdm\n" 29 | ] 30 | }, 31 | { 32 | "name": "stdout", 33 | "output_type": "stream", 34 | "text": [ 35 | "2023-03-29 17:39.54.135905 [info ] Trace: http://127.0.0.1:8935/traces/01GWP1GARQ5CNVHHQP2JNRZ92E\n", 36 | "\n", 37 | "\n", 38 | "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", 39 | "Rendering http://127.0.0.1:8935/traces/01GWP1GARQ5CNVHHQP2JNRZ92E in notebook\n" 40 | ] 41 | }, 42 | { 43 | "data": { 44 | "text/html": [ 45 | "\n", 46 | " \n", 54 | " " 55 | ], 56 | "text/plain": [ 57 | "" 58 | ] 59 | }, 60 | "execution_count": 1, 61 | "metadata": {}, 62 | "output_type": "execute_result" 63 | }, 64 | { 65 | "name": "stdout", 66 | "output_type": "stream", 67 | "text": [ 68 | "\u001b[32;1m\u001b[1;3m I need to find out who Olivia Wilde's boyfriend is and then calculate his age raised to the 0.23 power.\n", 69 | "Action: Search\n", 70 | "Action Input: \"Olivia Wilde boyfriend\"\u001b[0m\n", 71 | "Observation: \u001b[36;1m\u001b[1;3mSudeikis and Wilde's relationship ended in November 2020. Wilde was publicly served with court documents regarding child custody while she was presenting Don't Worry Darling at CinemaCon 2022. In January 2021, Wilde began dating singer Harry Styles after meeting during the filming of Don't Worry Darling.\u001b[0m\n", 72 | "Thought:\u001b[32;1m\u001b[1;3m I need to find out Harry Styles' age.\n", 73 | "Action: Search\n", 74 | "Action Input: \"Harry Styles age\"\u001b[0m\n", 75 | "Observation: \u001b[36;1m\u001b[1;3m29 years\u001b[0m\n", 76 | "Thought:\u001b[32;1m\u001b[1;3m I need to calculate 29 raised to the 0.23 power.\n", 77 | "Action: Calculator\n", 78 | "Action Input: 29^0.23\u001b[0m\n", 79 | "Observation: \u001b[33;1m\u001b[1;3mAnswer: 2.169459462491557\n", 80 | "\u001b[0m\n", 81 | "Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n", 82 | "Final Answer: Harry Styles, Olivia Wilde's boyfriend, is 29 years old and his age raised to the 0.23 power is 2.169459462491557.\u001b[0m\n", 83 | "\n", 84 | "\u001b[1m> Finished chain.\u001b[0m\n", 85 | "Harry Styles, Olivia Wilde's boyfriend, is 29 years old and his age raised to the 0.23 power is 2.169459462491557.\n" 86 | ] 87 | } 88 | ], 89 | "source": [ 90 | "from langchain_visualizer.jupyter import visualize\n", 91 | "from langchain.agents import initialize_agent, load_tools\n", 92 | "from langchain.llms import OpenAI\n", 93 | "\n", 94 | "\n", 95 | "async def search_agent_demo():\n", 96 | " llm = OpenAI(temperature=0)\n", 97 | " tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n", 98 | " agent = initialize_agent(\n", 99 | " tools, llm, agent=\"zero-shot-react-description\", verbose=True\n", 100 | " )\n", 101 | " return agent.run(\n", 102 | " \"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 \"\n", 103 | " \"power?\"\n", 104 | " )\n", 105 | "\n", 106 | "# you don't have to specify width and height\n", 107 | "# but if you do, you can change the size of the rendered window\n", 108 | "visualize(search_agent_demo, width=1000, height=500)" 109 | ] 110 | } 111 | ], 112 | "metadata": { 113 | "kernelspec": { 114 | "display_name": "Python 3 (ipykernel)", 115 | "language": "python", 116 | "name": "python3" 117 | }, 118 | "language_info": { 119 | "codemirror_mode": { 120 | "name": "ipython", 121 | "version": 3 122 | }, 123 | "file_extension": ".py", 124 | "mimetype": "text/x-python", 125 | "name": "python", 126 | "nbconvert_exporter": "python", 127 | "pygments_lexer": "ipython3", 128 | "version": "3.11.1" 129 | } 130 | }, 131 | "nbformat": 4, 132 | "nbformat_minor": 5 133 | } 134 | -------------------------------------------------------------------------------- /tests/dummy_viz.py: -------------------------------------------------------------------------------- 1 | from langchain_visualizer import visualize 2 | 3 | 4 | async def dummy_func(): 5 | pass 6 | 7 | 8 | visualize(dummy_func) 9 | -------------------------------------------------------------------------------- /tests/llms/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/tests/llms/__init__.py -------------------------------------------------------------------------------- /tests/llms/chatgpt_demo.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"messages": [{"role": "system", "content": "You are a helpful assistant 4 | that translates English to French."}, {"role": "user", "content": "I love programming."}], 5 | "model": "gpt-3.5-turbo", "stream": false, "n": 1, "temperature": 0.7}' 6 | headers: {} 7 | method: POST 8 | uri: https://api.openai.com/v1/chat/completions 9 | response: 10 | body: 11 | string: !!binary | 12 | H4sIAAAAAAAAA1SQu07DMBSG9zyFdRaWpkqTUlBmbmUBBhAIocp1TlOD7WPZp6Ko6rsjuy0Ri4f/ 13 | 5s/eFUKA7qAVoNaSlfWmvHzircSXeHW94jBtbiq6fXj7fp7P5ePrHYxSg5afqPjUGiuy3iBrcgdb 14 | BZSMaXVyUU2mdd00VTYsdWhSrfdcNuPzkjdhSWU1mzTH5pq0wgiteC+EEGKXz8ToOtxCK/JOVizG 15 | KHuE9i8kBAQySQEZo44sHcNoMBU5Rpex789kRwGFD9QHaS2GMRyD+78bVtrpuF4ElJFcakUmf4jt 16 | CyE+MvHmHwT4QNbzgukLXXpGPTvMwfBHg3nymFiaQW7q4ogB8Scy2sVKux6DDzrTu40xxb74BQAA 17 | //8DABPwwzK8AQAA 18 | headers: 19 | CF-Cache-Status: 20 | - DYNAMIC 21 | CF-RAY: 22 | - 82ea3ebebd65efbe-PDX 23 | Cache-Control: 24 | - no-cache, must-revalidate 25 | Connection: 26 | - keep-alive 27 | Content-Encoding: 28 | - gzip 29 | Content-Type: 30 | - application/json 31 | Date: 32 | - Fri, 01 Dec 2023 09:18:51 GMT 33 | Transfer-Encoding: 34 | - chunked 35 | openai-model: 36 | - gpt-3.5-turbo-0613 37 | openai-processing-ms: 38 | - '1021' 39 | x-ratelimit-limit-tokens_usage_based: 40 | - '60000' 41 | x-ratelimit-remaining-tokens_usage_based: 42 | - '59961' 43 | x-ratelimit-reset-tokens_usage_based: 44 | - 39ms 45 | status: 46 | code: 200 47 | message: OK 48 | version: 1 49 | -------------------------------------------------------------------------------- /tests/llms/getting_started_demo.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"prompt": ["Tell me a joke", "Tell me a poem", "Tell me a joke", "Tell 4 | me a poem"], "model": "text-davinci-003", "temperature": 1.0, "top_p": 1, "frequency_penalty": 5 | 0, "presence_penalty": 0, "n": 2, "logit_bias": {}, "max_tokens": 256, "best_of": 6 | 2}' 7 | headers: {} 8 | method: POST 9 | uri: https://api.openai.com/v1/completions 10 | response: 11 | body: 12 | string: !!binary | 13 | H4sIAAAAAAAAA9SVTW8TMRCG7/kVw164pPnYlJYuB9RKqKiiQFtUhAiqHHuya7o7Y+xx0qXqf0fe 14 | pC2ISEigCrjsYT6f1zP2XvcAsqXyZKnMCsjeVTZAwwZrWKAPlglsAIPOo1aCZgDHtvRKEGY4Z49w 15 | pCgq38J2H/JRvg3CoBZsDRgbfHSSKvAcAvqF1TiAV6g8QZNSKxEXiuHQ1Urm7JsBOyRlB5qboWEd 16 | hrdtLVPI+onUmgSpG1dvPT2Rq9nOi3j6gRy3Xzzlh4evj1++yY/Oy8NVNM8+o5aUIXglF5obV2Oq 17 | tnJrj0lSVsB4dzTezvPJZNw5Ov23aVtGLSxpuzUaTdZ5FVuNISvgYw8A4Lr7wio8pU1pSicFvK9a 18 | MNaAVAhBK4/a8xKWljrLa55hDW+9/YrPp7RfwAFqFQNChbBUAThKEEXGUgmWIM1lbrE2gw6i62fJ 19 | 4FVWwOjOUnPpPM8SG8W6vrPPLdlQXXhUgSkhBmGXdd6b/i9EKOlUMCEsVOOsRwiqTYNOMlgq9LeO 20 | lZAzRGg5Qq0EfR9cJH356Gfq8UNRn3LAAMojeDT9KZ1brlFWllkdsT+ls1gqnzY7LBGlP6V9MhC4 21 | C2k5bqDN/13aDRsxefCN4G7EWtU1KJih8rC0UnEUUASo/GoV9uFgw1luPyhdC4bpsUDQFklskADi 22 | YxBQwk348a5JhS006hIhOsAF+lYqS+UG5if/1fx3/hbts9+h3f0j2h7Ap+5djkGVmBVr8Mx5bpxc 23 | CF8ipYLj9aFk9/+B75x76yuTCYuq7x35aK+Xmtz0vgEAAP//AwCQHDpeKwcAAA== 24 | headers: 25 | CF-Cache-Status: 26 | - DYNAMIC 27 | CF-RAY: 28 | - 82ea3ec62fe25eeb-PDX 29 | Cache-Control: 30 | - no-cache, must-revalidate 31 | Connection: 32 | - keep-alive 33 | Content-Encoding: 34 | - gzip 35 | Content-Type: 36 | - application/json 37 | Date: 38 | - Fri, 01 Dec 2023 09:18:52 GMT 39 | Transfer-Encoding: 40 | - chunked 41 | openai-model: 42 | - text-davinci-003 43 | openai-processing-ms: 44 | - '574' 45 | x-ratelimit-limit-tokens_usage_based: 46 | - '250000' 47 | x-ratelimit-remaining-tokens_usage_based: 48 | - '245903' 49 | x-ratelimit-reset-tokens_usage_based: 50 | - 983ms 51 | status: 52 | code: 200 53 | message: OK 54 | version: 1 55 | -------------------------------------------------------------------------------- /tests/llms/test_chatgpt.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain.chat_models import ChatOpenAI 6 | from langchain.prompts.chat import ( 7 | ChatPromptTemplate, 8 | HumanMessagePromptTemplate, 9 | SystemMessagePromptTemplate, 10 | ) 11 | 12 | # ========================== Start of langchain example code ========================== 13 | # https://langchain.readthedocs.io/en/latest/modules/chat/getting_started.html 14 | 15 | 16 | @vcr.use_cassette() 17 | async def chatgpt_demo(): 18 | chat = ChatOpenAI(model_name="gpt-3.5-turbo") 19 | 20 | template = ( 21 | "You are a helpful assistant that translates {input_language} to " 22 | "{output_language}." 23 | ) 24 | system_message_prompt = SystemMessagePromptTemplate.from_template(template) 25 | human_template = "{text}" 26 | human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) 27 | chat_prompt = ChatPromptTemplate.from_messages( 28 | [system_message_prompt, human_message_prompt] 29 | ) 30 | 31 | # get a chat completion from the formatted messages 32 | return chat( 33 | chat_prompt.format_prompt( 34 | input_language="English", 35 | output_language="French", 36 | text="I love programming.", 37 | ).to_messages() 38 | ) 39 | 40 | 41 | # ================================== Execute example ================================== 42 | 43 | 44 | def test_llm_usage_succeeds(): 45 | """ 46 | Check that it works like a regular prompt. 47 | Also, record playback for easy visualization. 48 | """ 49 | result = asyncio.get_event_loop().run_until_complete(chatgpt_demo()) 50 | assert ( 51 | result.content == "J'adore programmer." 52 | or result.content == "J'adore la programmation." 53 | ) 54 | 55 | 56 | if __name__ == "__main__": 57 | from langchain_visualizer import visualize 58 | 59 | visualize(chatgpt_demo) 60 | -------------------------------------------------------------------------------- /tests/llms/test_langchain_getting_started.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain.llms import OpenAI 6 | 7 | # ========================== Start of langchain example code ========================== 8 | # https://langchain.readthedocs.io/en/latest/modules/prompts/getting_started.html 9 | 10 | 11 | llm = OpenAI(model="text-davinci-003", n=2, best_of=2, temperature=1) 12 | 13 | # ================================== Execute example ================================== 14 | 15 | 16 | @vcr.use_cassette() 17 | async def getting_started_demo(): 18 | return llm.generate(["Tell me a joke", "Tell me a poem"] * 2) 19 | 20 | 21 | def test_llm_usage_succeeds(): 22 | """ 23 | Check that it works like a regular prompt. 24 | Also, record playback for easy visualization. 25 | """ 26 | result = asyncio.get_event_loop().run_until_complete(getting_started_demo()) 27 | assert len(result.generations) == 4 28 | assert len(result.generations[0]) == 2 29 | 30 | 31 | if __name__ == "__main__": 32 | from langchain_visualizer import visualize 33 | 34 | visualize(getting_started_demo) 35 | -------------------------------------------------------------------------------- /tests/memory/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/tests/memory/__init__.py -------------------------------------------------------------------------------- /tests/memory/langchain_getting_started/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/tests/memory/langchain_getting_started/__init__.py -------------------------------------------------------------------------------- /tests/memory/langchain_getting_started/conversation_summary_memory_demo.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"prompt": ["The following is a friendly conversation between a human and 4 | an AI. The AI is talkative and provides lots of specific details from its context. 5 | If the AI does not know the answer to a question, it truthfully says it does 6 | not know.\n\nCurrent conversation:\n\nHuman: Hi, what''s up?\nAI:"], "model": 7 | "text-davinci-003", "temperature": 0.0, "top_p": 1, "frequency_penalty": 0, 8 | "presence_penalty": 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 9 | headers: {} 10 | method: POST 11 | uri: https://api.openai.com/v1/completions 12 | response: 13 | body: 14 | string: !!binary | 15 | H4sIAAAAAAAAA0RQTW/TQBC9+1cMe+HSOK4dteALF4QCagNIRUJFKFqvJ/bAeme1O3YSqv53tE7a 16 | XPbw3ryvfcoA1F4HR65TNaiHniIM3KKFCUMkdkARWvQBjRZsc7inLmhBaHDHAeGLdqMOR1hdQVmU 17 | KxAGPTG10FIMo5fkwDuIGCYymMMd6uBgSNJexMd6ufRWy47DkLNHpyk3PCxbNnH5EkvsorpKTalN 18 | Jc3g7eLddzmYTTXd3N99+/nj09ePj27zb/3Ycb9pxtM1N3/QSFIIHmRrePAWk9uJNgHTJFXD9W1x 19 | vSrLqipnYt7/Ilu0eiJnaFEU1VnXMxmMqoZfGQDA0/zC6TzJYE0gPQZ8A5/fDtAyuQ66lJbPgBlD 20 | QCf2CD1an0gNZozCAwbYk/SgQdD0joy2QDGOmMOa96AbHgWOPH6Ym8yh5Fo8qBqKV8Ry5wM3qaAb 21 | rX3Fd+Qo9tuAOrJLPaOwVzP7nAH8nseNUXeo6vMo5QMPXrbCf9Elw5vbk526fOaFLKszKSzaXvD3 22 | RZYinrP/AAAA//8DAIsr2vVuAgAA 23 | headers: 24 | CF-Cache-Status: 25 | - DYNAMIC 26 | CF-RAY: 27 | - 82ea3ecacba1ef8c-PDX 28 | Cache-Control: 29 | - no-cache, must-revalidate 30 | Connection: 31 | - keep-alive 32 | Content-Encoding: 33 | - gzip 34 | Content-Type: 35 | - application/json 36 | Date: 37 | - Fri, 01 Dec 2023 09:18:53 GMT 38 | Transfer-Encoding: 39 | - chunked 40 | openai-model: 41 | - text-davinci-003 42 | openai-processing-ms: 43 | - '414' 44 | x-ratelimit-limit-tokens_usage_based: 45 | - '250000' 46 | x-ratelimit-remaining-tokens_usage_based: 47 | - '249744' 48 | x-ratelimit-reset-tokens_usage_based: 49 | - 61ms 50 | status: 51 | code: 200 52 | message: OK 53 | - request: 54 | body: '{"prompt": ["Progressively summarize the lines of conversation provided, 55 | adding onto the previous summary returning a new summary.\n\nEXAMPLE\nCurrent 56 | summary:\nThe human asks what the AI thinks of artificial intelligence. The 57 | AI thinks artificial intelligence is a force for good.\n\nNew lines of conversation:\nHuman: 58 | Why do you think artificial intelligence is a force for good?\nAI: Because artificial 59 | intelligence will help humans reach their full potential.\n\nNew summary:\nThe 60 | human asks what the AI thinks of artificial intelligence. The AI thinks artificial 61 | intelligence is a force for good because it will help humans reach their full 62 | potential.\nEND OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: 63 | Hi, what''s up?\nAI: Hi there! I''m doing great. I''m currently helping a customer 64 | with a technical issue. How about you?\n\nNew summary:"], "model": "text-davinci-003", 65 | "temperature": 0.7, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 66 | 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 67 | headers: {} 68 | method: POST 69 | uri: https://api.openai.com/v1/completions 70 | response: 71 | body: 72 | string: !!binary | 73 | H4sIAAAAAAAAA0xQy24bMQy8+ysInf1YPwCnewtQFGjRFi2QQ9E2MGSJu2IqiYLE9aNB/r3Q2k56 74 | ESgOZzjD5wmAOuocKfaqBfXgqEBgix4OmAtxBCpgMWU0WtDO4Qv1WQvCHjvOCJ90HHQ+w2YKq2a1 75 | AWHQByYLlkoeklQF7qBgPpDBOXxGnSOESnUiqbSLRfJaOs5hzgmjprnhsLBsyuK2ljgWNa1OyVaT 76 | JiQ/u/suJ/vu6eeHPPyQcK/fh/PfYfstN+cjfb1M8/4JjVSG4El2hkPyWNUusMlYI6kWlttmuVmt 77 | 1uv1CIz5b7SZ1QeKhmZNs77yHJPBolr4NQEAeB5fuIxX2u/44BDcEHSEPiNKAXEI9x9BR3srM5bE 78 | 0RYwDjF3g/fnKRTRQrEHknp2hz7VnwYzFOGAGY4kDjQIGhfJaA9UyoDz0djogaLFk2qhee147lPm 79 | ffUbB+9f+x1FKm6XUReO1XYRTmpEXyYAj2PWoegeVXvNqFLmkGQn/AdjFVzerS966u24b+hqcwWF 80 | Rfv/+s12Upe8TP4BAAD//wMAo9ZavX8CAAA= 81 | headers: 82 | CF-Cache-Status: 83 | - DYNAMIC 84 | CF-RAY: 85 | - 82ea3ecdfcd6ef8c-PDX 86 | Cache-Control: 87 | - no-cache, must-revalidate 88 | Connection: 89 | - keep-alive 90 | Content-Encoding: 91 | - gzip 92 | Content-Type: 93 | - application/json 94 | Date: 95 | - Fri, 01 Dec 2023 09:18:53 GMT 96 | Transfer-Encoding: 97 | - chunked 98 | openai-model: 99 | - text-davinci-003 100 | openai-processing-ms: 101 | - '561' 102 | x-ratelimit-limit-tokens_usage_based: 103 | - '250000' 104 | x-ratelimit-remaining-tokens_usage_based: 105 | - '249744' 106 | x-ratelimit-reset-tokens_usage_based: 107 | - 61ms 108 | status: 109 | code: 200 110 | message: OK 111 | - request: 112 | body: '{"prompt": ["The following is a friendly conversation between a human and 113 | an AI. The AI is talkative and provides lots of specific details from its context. 114 | If the AI does not know the answer to a question, it truthfully says it does 115 | not know.\n\nCurrent conversation:\n\nThe human greets the AI and the AI responds 116 | cheerfully, stating it is helping a customer with a technical issue.\nHuman: 117 | Tell me more about it!\nAI:"], "model": "text-davinci-003", "temperature": 0.0, 118 | "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": 119 | {}, "max_tokens": 256}' 120 | headers: {} 121 | method: POST 122 | uri: https://api.openai.com/v1/completions 123 | response: 124 | body: 125 | string: !!binary | 126 | H4sIAAAAAAAAA0RRy27bQAy8+yumuuSSyM+grf/ARYoiaIICLYpgvUtJdKSlwKXsuEH+vVjZiS8C 127 | lsN5cPQ6AYqD08ixLtYoHhpO6CRQiz1pYonghEC9kndGocR3rtUZYUuVKOGbi4PTI1bXWMwWK5jA 128 | 7YUDAicdessKUiGR7tlTiTtyGtFlamPWp/V02rfOKtGulJ6i49JLNw3i0/TdliWm4jon5ZBD+q5v 129 | b77c20t4dqS/Hx7ndxtPP37tlo9yuP+3C+60LdsdecsMoxd78tL1LWW1E+yV8knFGvPPs/lqsVgu 130 | lyMw3v9Ouwluz9HzzWy2PPMaYU+pWOPPBABexy9O65mGn4PSJ2yuOjTU9hxrOPghmXSkMJVh21Jq 131 | RAwu95sGwoGtgTXEipxzMNISDw0dr3JTOUL9zoSXGMnbOJJMAkcjjZT1wuhrejzDOUHe6VBxPShB 132 | BsOhcTYSe5VtSx04lSPPD6oUrT1ChxhPyQO7Okoy9kjexSyaiMAVNshvDhSNq+MpSb6mHIsaO+EY 133 | 6KVYY/YxaaXOrrm/OLTtx7ziyKl5UnJJYq4xmfTFiL5NgL9j90NyNRXrc+dFr9L19mTyTDELfp2f 134 | 5IrLv76At6szaGKuvcznq9tJ9nib/AcAAP//AwB/GV9PDgMAAA== 135 | headers: 136 | CF-Cache-Status: 137 | - DYNAMIC 138 | CF-RAY: 139 | - 82ea3ed21e69ef8c-PDX 140 | Cache-Control: 141 | - no-cache, must-revalidate 142 | Connection: 143 | - keep-alive 144 | Content-Encoding: 145 | - gzip 146 | Content-Type: 147 | - application/json 148 | Date: 149 | - Fri, 01 Dec 2023 09:18:54 GMT 150 | Transfer-Encoding: 151 | - chunked 152 | openai-model: 153 | - text-davinci-003 154 | openai-processing-ms: 155 | - '872' 156 | x-ratelimit-limit-tokens_usage_based: 157 | - '250000' 158 | x-ratelimit-remaining-tokens_usage_based: 159 | - '249744' 160 | x-ratelimit-reset-tokens_usage_based: 161 | - 61ms 162 | status: 163 | code: 200 164 | message: OK 165 | - request: 166 | body: '{"prompt": ["Progressively summarize the lines of conversation provided, 167 | adding onto the previous summary returning a new summary.\n\nEXAMPLE\nCurrent 168 | summary:\nThe human asks what the AI thinks of artificial intelligence. The 169 | AI thinks artificial intelligence is a force for good.\n\nNew lines of conversation:\nHuman: 170 | Why do you think artificial intelligence is a force for good?\nAI: Because artificial 171 | intelligence will help humans reach their full potential.\n\nNew summary:\nThe 172 | human asks what the AI thinks of artificial intelligence. The AI thinks artificial 173 | intelligence is a force for good because it will help humans reach their full 174 | potential.\nEND OF EXAMPLE\n\nCurrent summary:\n\nThe human greets the AI and 175 | the AI responds cheerfully, stating it is helping a customer with a technical 176 | issue.\n\nNew lines of conversation:\nHuman: Tell me more about it!\nAI: Sure! 177 | I''m helping a customer troubleshoot an issue with their computer. They''re 178 | having trouble connecting to the internet and I''m trying to help them figure 179 | out what the problem is. I''m currently running a diagnostic scan to see if 180 | I can identify the issue.\n\nNew summary:"], "model": "text-davinci-003", "temperature": 181 | 0.7, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": 182 | {}, "max_tokens": 256}' 183 | headers: {} 184 | method: POST 185 | uri: https://api.openai.com/v1/completions 186 | response: 187 | body: 188 | string: !!binary | 189 | H4sIAAAAAAAAA0xSwW7bMAy95ysIn9skTdJtzW1de9iwDViXnbahUGTa4iaTgkin6Yr+eyE7SXcx 190 | ZD6+96hHPU0AqgeXmbit1lBtAil0UmOEHWYlYSCFGlNG7wzrKXyhNjtD2GIjGeGT497lR1idwWK+ 191 | WIEJuJ1QDTVp7pMVBWlAMe/I4xQ+o8sMXaEGs6Tr2SxFZ43kbioJ2dHUSzerxevsaEvCWp2VSaku 192 | Q/ouxfN332yPdHe9uG0++LvrH2nnbr8+3Nx8V9z+G7tl+we9FYbh3u69dCliURthn7FcqVrDxdv5 193 | xWqxWC5XAzDc/0g7r92O2NP5fL488IKQR63W8HMCAPA0fGFsL7RfvAkIoe8cQ5sRTcECwvuP4Lg+ 194 | HjNqEq4VfEDMTR/j4xmoOSNugazEHjCm8ufA92rSYYYHsgAODH1g8i4CqfY4hc0oivsUHXHxc3ZQ 195 | yT3zqFKTa1nUyIN6x2VXxWKY6ORgWfptRA0iBo5Hg9HXAlKGEmNvpXPwUEgZd8jD3BawgyZLB16Y 196 | 0Y9FGRyIDTOjTYcUh8CIa9xXa5ifKlHalGVbwuU+xlO9ISYN9xmdCpeM1SRVA/o8Afg9LKZX12K1 197 | PiykSlm6ZPcmf5GL4GJ5NepVry/hFb18cwBNzMX/WFeXk2LyPHkBAAD//wMAgJcZjiwDAAA= 198 | headers: 199 | CF-Cache-Status: 200 | - DYNAMIC 201 | CF-RAY: 202 | - 82ea3ed828baef8c-PDX 203 | Cache-Control: 204 | - no-cache, must-revalidate 205 | Connection: 206 | - keep-alive 207 | Content-Encoding: 208 | - gzip 209 | Content-Type: 210 | - application/json 211 | Date: 212 | - Fri, 01 Dec 2023 09:18:55 GMT 213 | Transfer-Encoding: 214 | - chunked 215 | openai-model: 216 | - text-davinci-003 217 | openai-processing-ms: 218 | - '980' 219 | x-ratelimit-limit-tokens_usage_based: 220 | - '250000' 221 | x-ratelimit-remaining-tokens_usage_based: 222 | - '249744' 223 | x-ratelimit-reset-tokens_usage_based: 224 | - 61ms 225 | status: 226 | code: 200 227 | message: OK 228 | - request: 229 | body: '{"prompt": ["The following is a friendly conversation between a human and 230 | an AI. The AI is talkative and provides lots of specific details from its context. 231 | If the AI does not know the answer to a question, it truthfully says it does 232 | not know.\n\nCurrent conversation:\n\nThe human greets the AI and the AI responds 233 | cheerfully, stating it is helping a customer with a technical issue. The AI 234 | explains that it is running a diagnostic scan to help the customer troubleshoot 235 | an issue with their computer that is preventing them from connecting to the 236 | internet.\nHuman: Very cool -- what is the scope of the project?\nAI:"], "model": 237 | "text-davinci-003", "temperature": 0.0, "top_p": 1, "frequency_penalty": 0, 238 | "presence_penalty": 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 239 | headers: {} 240 | method: POST 241 | uri: https://api.openai.com/v1/completions 242 | response: 243 | body: 244 | string: !!binary | 245 | H4sIAAAAAAAAA1RSwW4aQQy98xXWnhMgBATiWKkHqqRVpPRUVWiY8bJuZu3p2LuBRvn3apZAkssc 246 | /Pyen5/nZQRQPbvMxPtqDdVjQwqtBIzQY1YSBlIImDJ6ZxjGcE/77Axhh7VkhG+OO5ePML+C2XQ2 247 | BxNwvVCAQJq7ZEVBalDMPXkcwx26zNAWamOWdD2ZpOisltyOJSE7GntpJ0G8Ts5jSVirq+KUQjHp 248 | 2xSvVw92qB//anf3sDys7txX3fz8sjl8/3f/pPNTt+z+oLfCMDzY1kubIha1E+wzlpWqNdwspzfz 249 | 2ez2djEAw/5n2nVwPbGn6+n09o3XCHnUag2/RgAAL8MLp/ZCg8cGQb0kLKtbg5CyFCslSxMI5PYs 250 | iuA4gGXpdhG1EbGhl1S7wsAe2Yj3Q9F3atJihjpLC16Y0Z9AOZHYMDPaGDbgWvBdzsgWj5A7LrcF 251 | d55q5EG948KkUEbUx0FCpcv+4nhwMYYf7PEjSnomEYYr2MAzxVjW6yngZ6fPZA04UInd8AtMIKNK 252 | 7PHDgCHRITzigIdqDdNLJco+ZdmVoLmL8VKviUmbbUanwiVvNUnVgL6OAH4PR+rU7bFavx2nSlna 253 | ZFuTJ+QieDNbnfSq91/xji6Wb6CJufiBtVqMypDX0X8AAAD//wMA0A4xszgDAAA= 254 | headers: 255 | CF-Cache-Status: 256 | - DYNAMIC 257 | CF-RAY: 258 | - 82ea3edeebd0ef8c-PDX 259 | Cache-Control: 260 | - no-cache, must-revalidate 261 | Connection: 262 | - keep-alive 263 | Content-Encoding: 264 | - gzip 265 | Content-Type: 266 | - application/json 267 | Date: 268 | - Fri, 01 Dec 2023 09:18:57 GMT 269 | Transfer-Encoding: 270 | - chunked 271 | openai-model: 272 | - text-davinci-003 273 | openai-processing-ms: 274 | - '1103' 275 | x-ratelimit-limit-tokens_usage_based: 276 | - '250000' 277 | x-ratelimit-remaining-tokens_usage_based: 278 | - '249744' 279 | x-ratelimit-reset-tokens_usage_based: 280 | - 61ms 281 | status: 282 | code: 200 283 | message: OK 284 | - request: 285 | body: '{"prompt": ["Progressively summarize the lines of conversation provided, 286 | adding onto the previous summary returning a new summary.\n\nEXAMPLE\nCurrent 287 | summary:\nThe human asks what the AI thinks of artificial intelligence. The 288 | AI thinks artificial intelligence is a force for good.\n\nNew lines of conversation:\nHuman: 289 | Why do you think artificial intelligence is a force for good?\nAI: Because artificial 290 | intelligence will help humans reach their full potential.\n\nNew summary:\nThe 291 | human asks what the AI thinks of artificial intelligence. The AI thinks artificial 292 | intelligence is a force for good because it will help humans reach their full 293 | potential.\nEND OF EXAMPLE\n\nCurrent summary:\n\nThe human greets the AI and 294 | the AI responds cheerfully, stating it is helping a customer with a technical 295 | issue. The AI explains that it is running a diagnostic scan to help the customer 296 | troubleshoot an issue with their computer that is preventing them from connecting 297 | to the internet.\n\nNew lines of conversation:\nHuman: Very cool -- what is 298 | the scope of the project?\nAI: The scope of the project is to diagnose and 299 | troubleshoot the issue preventing the customer from connecting to the internet. 300 | I am currently running a diagnostic scan to identify the source of the issue. 301 | Once the source is identified, I will provide the customer with a solution to 302 | resolve the issue.\n\nNew summary:"], "model": "text-davinci-003", "temperature": 303 | 0.7, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": 304 | {}, "max_tokens": 256}' 305 | headers: {} 306 | method: POST 307 | uri: https://api.openai.com/v1/completions 308 | response: 309 | body: 310 | string: !!binary | 311 | H4sIAAAAAAAAA1RSXW/bMAx8z68g/JwmaZIuXd42dAM2dA9Fi2HANhSKRFvcZFKQaCdp0f8+yPlo 312 | 92LAPB3vjuTzCKDamsTETbWG6sFThlYcBugxZRIGyuAwJrRG0U3gGzXJKMIGa0kIXw13Ju1hOYb5 313 | bL4EFTC9kANHOXVRSwepIWPqyeIEbtEkhrZQvWrM6+k0BqO1pHYiEdnQxEo7dWLz9CRLwrkaF6fk 314 | iknbxnBxfac7f1XbT08fL+3qtrn/cdfT/c3nm+9PLR5ey+YPWi0MxZ0+WmljwNLtANuEJVK1hsvV 315 | 7HI5ny8WqwEY8p9oF870xJYuZrPFkeeFLOZqDT9HAADPwxcOzwvtFz94BN+1hqFJiIoO1CN8+DKG 316 | rSfrIWGOwg4dWI+Y6i6EPRh2gLsYDDE6IIWtyeAxROIGDNguq7SYYEvqwYCi9UzWBKCcO5zAwyAB 317 | DrNNtDlqZisRywbKT0xSJnJyQbmsy5FpWDIO+pqk2wTMXkQHytAbYsIeWYuRUjxbqZO0YIUZ7QGU 318 | A4kVE6OePVEG26WErGEPqWM+RDpKK1nI1nChkys69f5gXrpkz+4HK+PB5pZCKGl6cvi/o+NwsoRu 319 | uD2VMmwJPb6JI2zxrQDlky6hmwxbHhZK7HBXrWF2rgRpYpJNWT53IZzrNTFl/5jQZOFyA1klVgP6 320 | MgL4PRxOl02D1fp4MFVM0kZ9VPmLXBrOV+8P/arXS31Fr6+OoIqa8FpfvFuOisjL6B8AAAD//wMA 321 | oPwQLMwDAAA= 322 | headers: 323 | CF-Cache-Status: 324 | - DYNAMIC 325 | CF-RAY: 326 | - 82ea3ee65f30ef8c-PDX 327 | Cache-Control: 328 | - no-cache, must-revalidate 329 | Connection: 330 | - keep-alive 331 | Content-Encoding: 332 | - gzip 333 | Content-Type: 334 | - application/json 335 | Date: 336 | - Fri, 01 Dec 2023 09:18:58 GMT 337 | Transfer-Encoding: 338 | - chunked 339 | openai-model: 340 | - text-davinci-003 341 | openai-processing-ms: 342 | - '1505' 343 | x-ratelimit-limit-tokens_usage_based: 344 | - '250000' 345 | x-ratelimit-remaining-tokens_usage_based: 346 | - '249744' 347 | x-ratelimit-reset-tokens_usage_based: 348 | - 61ms 349 | status: 350 | code: 200 351 | message: OK 352 | version: 1 353 | -------------------------------------------------------------------------------- /tests/memory/langchain_getting_started/test_conversation_summary_memory.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain.chains import ConversationChain 6 | from langchain.chains.conversation.memory import ConversationSummaryMemory 7 | from langchain.llms import OpenAI 8 | 9 | # ========================== Start of langchain example code ========================== 10 | # https://langchain.readthedocs.io/en/latest/modules/memory/getting_started.html 11 | 12 | 13 | llm = OpenAI(temperature=0) 14 | conversation_with_summary = ConversationChain( 15 | llm=llm, memory=ConversationSummaryMemory(llm=OpenAI()), verbose=True 16 | ) 17 | 18 | 19 | # ================================== Execute example ================================== 20 | 21 | 22 | @vcr.use_cassette() 23 | async def conversation_summary_memory_demo(): 24 | conversation_with_summary.predict(input="Hi, what's up?") 25 | conversation_with_summary.predict(input="Tell me more about it!") 26 | return conversation_with_summary.predict( 27 | input="Very cool -- what is the scope of the project?" 28 | ) 29 | 30 | 31 | def test_llm_usage_succeeds(): 32 | """Check that the chain can run normally""" 33 | result = asyncio.get_event_loop().run_until_complete( 34 | conversation_summary_memory_demo() 35 | ) 36 | assert result.strip().startswith("The scope of the project is to") 37 | 38 | 39 | if __name__ == "__main__": 40 | from langchain_visualizer import visualize 41 | 42 | visualize(conversation_summary_memory_demo) 43 | -------------------------------------------------------------------------------- /tests/prompts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/tests/prompts/__init__.py -------------------------------------------------------------------------------- /tests/prompts/langchain_getting_started/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/tests/prompts/langchain_getting_started/__init__.py -------------------------------------------------------------------------------- /tests/prompts/langchain_getting_started/dynamic_prompt_demo.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"prompt": ["Give the antonym of every input\n\nWord: happy\nAntonym: sad\n\nWord: 4 | big and huge and massive and large and gigantic and tall and much much much 5 | much much bigger than everything else\nAntonym: "], "model": "text-ada-001", 6 | "temperature": 0.0, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 7 | 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 8 | headers: {} 9 | method: POST 10 | uri: https://api.openai.com/v1/completions 11 | response: 12 | body: 13 | string: !!binary | 14 | H4sIAAAAAAAAAwAAAP//RFBNb9swDL37VxA6N47jpqjh265Dd2hXYIdhCGSJttlJoiAxbrqi/32Q 15 | kzYXQXiP74N8rwDUq06BwqR6UM8zZfBs0cGCKRMHoAwWY0KjBW0NP2hKWhAGHDkhfNfhqNMb7G+g 16 | bdo9CINemCxYyukYpTjwCBnTQgZreECdAvginUVi7rfb6LSMnHzNEYOm2rDfWjZ5+xlLHLK6KU3J 17 | lpLGR7fpHuVEv/S3+O+5XX56Pz647oXsa0eDezpP8/CCRopC8CQHwz46LG5n2iQsK6kedvfNbt+2 18 | t7fdSqz7f8o22upN0+wumpnJYFY9/K4AAN7XF86jRQLZa+dAB3v5DSTlALhgepOZwlSvTquIgsWT 19 | 6qH5QhxPMfFQAsLRuS98pEB5PiTUmUPJycJRrexHBfBnLXfMekLVX0qpmNhHOQj/xVAM993ZTl0P 20 | cSXvL5ywaHeF7+6qkvBR/QcAAP//AwDRohDIKQIAAA== 21 | headers: 22 | CF-Cache-Status: 23 | - DYNAMIC 24 | CF-RAY: 25 | - 82ea3ef0da6fefca-PDX 26 | Cache-Control: 27 | - no-cache, must-revalidate 28 | Connection: 29 | - keep-alive 30 | Content-Encoding: 31 | - gzip 32 | Content-Type: 33 | - application/json 34 | Date: 35 | - Fri, 01 Dec 2023 09:18:58 GMT 36 | Transfer-Encoding: 37 | - chunked 38 | openai-model: 39 | - text-ada-001 40 | openai-processing-ms: 41 | - '191' 42 | x-ratelimit-limit-tokens_usage_based: 43 | - '250000' 44 | x-ratelimit-remaining-tokens_usage_based: 45 | - '249744' 46 | x-ratelimit-reset-tokens_usage_based: 47 | - 61ms 48 | status: 49 | code: 200 50 | message: OK 51 | version: 1 52 | -------------------------------------------------------------------------------- /tests/prompts/langchain_getting_started/few_shot_prompt_demo.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"prompt": ["Give the antonym of every input\n\nWord: happy\nAntonym: sad\n\nWord: 4 | tall\nAntonym: short\n\nWord: big\nAntonym: "], "model": "text-ada-001", "temperature": 5 | 0.0, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": 6 | {}, "max_tokens": 256}' 7 | headers: {} 8 | method: POST 9 | uri: https://api.openai.com/v1/completions 10 | response: 11 | body: 12 | string: !!binary | 13 | H4sIAAAAAAAAA0xPyU7DMBC95ytGPndJ0gpojpW4AD1UQixCqHLtSeLgeCzbDYGq/46cblzm8NZ5 14 | +wSAfXNnlKlYAey5Vh5akqihQ+cVGVAeJFqHggeUE1ipyvGAsMWSHMIDNzvufmA+gjzN5xAIeEdK 15 | glTe7WyICVSCR9cpgRN4Qu4MtNFah2B9MZ1azUNJrp2QRcPVRFA7lST89FyryHg2ip8qGZ8UrdXj 16 | u3XoG/+4fFveY++a95ebavWry1fbdHZ9VNO2QRGiI2AfNoJaqzGmHWnhME5iBWS3aTbP89lsMRDD 17 | /rNtzCUfp2l28tSkBHpWwEcCALAfLhyl0QK+5VoP4gFXRmLPCkgviKbKOtrGDLPT+oKXyihfbxxy 18 | TyZG+UCWDewhAfgc+neeV8iKUy+zjlobNoG+0MTA2d0xjl23XsnsxAUKXP/zLJLYcEj+AAAA//8D 19 | AHRmJR8MAgAA 20 | headers: 21 | CF-Cache-Status: 22 | - DYNAMIC 23 | CF-RAY: 24 | - 82ea3ef2e9f7efa4-PDX 25 | Cache-Control: 26 | - no-cache, must-revalidate 27 | Connection: 28 | - keep-alive 29 | Content-Encoding: 30 | - gzip 31 | Content-Type: 32 | - application/json 33 | Date: 34 | - Fri, 01 Dec 2023 09:18:59 GMT 35 | Transfer-Encoding: 36 | - chunked 37 | openai-model: 38 | - text-ada-001 39 | openai-processing-ms: 40 | - '68' 41 | x-ratelimit-limit-tokens_usage_based: 42 | - '250000' 43 | x-ratelimit-remaining-tokens_usage_based: 44 | - '249744' 45 | x-ratelimit-reset-tokens_usage_based: 46 | - 61ms 47 | status: 48 | code: 200 49 | message: OK 50 | version: 1 51 | -------------------------------------------------------------------------------- /tests/prompts/langchain_getting_started/multiple_inputs_prompt_demo.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"prompt": ["Tell me a funny joke about chickens."], "model": "text-ada-001", 4 | "temperature": 0.0, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 5 | 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 6 | headers: {} 7 | method: POST 8 | uri: https://api.openai.com/v1/completions 9 | response: 10 | body: 11 | string: !!binary | 12 | H4sIAAAAAAAAA0RQTU/bQBC9+1dM90wSx0kL8YUDBySgqqioOJQq2uxO7AF7ZzU7DkGI/16tE8hl 13 | Du9r3sx7AWBerQQKjanBPLSUoGePHexQEnEASuAxCjqr6KfwkxqxirDBLQvCjQ2DlTdYnkFVVktQ 14 | Brtj8uApyRA1J/AWEsqOHE7hDq0E6LO1VY2pns1iZ3XL0k85YrA0ddzPPLs0+1xLHJI5y03J55Ku 15 | j93k4l73z4MsCX/HH7fX6fvd1a+Va3aLP4/p5qDmzTM6zQ7Fva4d97HDnHagnWA+ydQwPy/ny6pa 16 | LFYjMd7/aZtYbydlOT96WiaHydTwtwAAeB8nHKTZ8hSewmP7Bp48aIvgWnIvGMAJpzQiwtZfZtkD 17 | Q4OaP5Zh1hYFEnn8Nq4aUyl43Jsayi+k4yYKb3KDMHTdF76lQKldC9rEIRdJytGM7EcB8G9sPyTb 18 | oKmPrU0U7qOulV8w5MCLQ5o5PerEzVdHUlltd8Kr8yJv+Cj+AwAA//8DAPjQPepKAgAA 19 | headers: 20 | CF-Cache-Status: 21 | - DYNAMIC 22 | CF-RAY: 23 | - 82ea3ef42efe5ed4-PDX 24 | Cache-Control: 25 | - no-cache, must-revalidate 26 | Connection: 27 | - keep-alive 28 | Content-Encoding: 29 | - gzip 30 | Content-Type: 31 | - application/json 32 | Date: 33 | - Fri, 01 Dec 2023 09:18:59 GMT 34 | Transfer-Encoding: 35 | - chunked 36 | openai-model: 37 | - text-ada-001 38 | openai-processing-ms: 39 | - '163' 40 | x-ratelimit-limit-tokens_usage_based: 41 | - '250000' 42 | x-ratelimit-remaining-tokens_usage_based: 43 | - '249744' 44 | x-ratelimit-reset-tokens_usage_based: 45 | - 61ms 46 | status: 47 | code: 200 48 | message: OK 49 | version: 1 50 | -------------------------------------------------------------------------------- /tests/prompts/langchain_getting_started/no_inputs_prompt_demo.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"prompt": ["Tell me a joke."], "model": "text-ada-001", "temperature": 4 | 0.0, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": 5 | {}, "max_tokens": 256}' 6 | headers: {} 7 | method: POST 8 | uri: https://api.openai.com/v1/completions 9 | response: 10 | body: 11 | string: !!binary | 12 | H4sIAAAAAAAAA0RQTU8bQQy9769w50ySzZKWshfEsRSEqNJWCKpoMuPsGnbHI4+TJkL572g2gVx8 13 | eF9+9lsBYP5bCRQaU4OZt5SgZ48dbFAScQBK4DEKOqvox3BHjVhFWOKKBeHGhrWVHczOoCqrGSiD 14 | 3TB58JRkHTUn8AoSyoYcjuEWrQTos7VVjameTGJndcXSjzlisDR23E88uzT5WEsckjnLTcnnkq6P 15 | 3ej7g25f7O95urF6zxfxTv7s7n/++vYo1z/8Qc3LF3SaHYpbXTjuY4c57UA7wXySqWF6UU5nVXV+ 16 | fjkQw/0ftpH1dlSW06OnZXKYTA1PBQDA2zDhIM2W5/Ac/rY78ORBWwTXknvFAE44pQERtv4qy+YM 17 | DWr+WIZZWxRI5PHLsGpIpeBxa2ooP5GOmyi8zA3Cuus+8RUFSu1C0CYOuUhSjmZg9wXAv6H9OtkG 18 | TX1sbaJwH3Wh/IohB349pJnTo07c9PJIKqvtTng1K/KGffEOAAD//wMA3iSTiEoCAAA= 19 | headers: 20 | CF-Cache-Status: 21 | - DYNAMIC 22 | CF-RAY: 23 | - 82ea3ef619528e62-PDX 24 | Cache-Control: 25 | - no-cache, must-revalidate 26 | Connection: 27 | - keep-alive 28 | Content-Encoding: 29 | - gzip 30 | Content-Type: 31 | - application/json 32 | Date: 33 | - Fri, 01 Dec 2023 09:18:59 GMT 34 | Transfer-Encoding: 35 | - chunked 36 | openai-model: 37 | - text-ada-001 38 | openai-processing-ms: 39 | - '164' 40 | x-ratelimit-limit-tokens_usage_based: 41 | - '250000' 42 | x-ratelimit-remaining-tokens_usage_based: 43 | - '249744' 44 | x-ratelimit-reset-tokens_usage_based: 45 | - 61ms 46 | status: 47 | code: 200 48 | message: OK 49 | version: 1 50 | -------------------------------------------------------------------------------- /tests/prompts/langchain_getting_started/one_input_prompt_demo.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"prompt": ["Tell me a funny joke."], "model": "text-ada-001", "temperature": 4 | 0.0, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, "n": 1, "logit_bias": 5 | {}, "max_tokens": 256}' 6 | headers: {} 7 | method: POST 8 | uri: https://api.openai.com/v1/completions 9 | response: 10 | body: 11 | string: !!binary | 12 | H4sIAAAAAAAAA0RQTU8jMQy9z68wOdN2Zuiy27lwQCutEBxWIO2BoipN3BlDEkeJW1oh/vsq00Iv 13 | PrwvP/ujAlDvOgUKvepAPQ2UwbNFBztMmTgAZbAYExotaKfwQH3SgrDGDSeEOx22Oh1gfglt3c5B 14 | GPSOyYKlnLZRSgJvIGPakcEp3KNOAXyxDiIxd7NZdFo2nPyUIwZNU8N+Ztnk2dda4pDVZWlKtpQ0 15 | PrrJr7+yf/1Tu54X2t/dXh/e93Xz27v+0T7Go5rXr2ikOAT3sjLso8OSdqRNwnKS6qD5WTfztr26 16 | WozEeP+XbaKtntR1c/IMTAaz6uC5AgD4GCccpcWyDMvwbziAJQsyIJiBzBsGMIlzHpHE2t4U2RND 17 | j1I+VmCWARNksngxrhpTKVjcqw7qb8RxHxOvS4Owde4b31CgPKwS6syhFMnCUY3sZwXwMrbfZt2j 18 | 6k6tVUzso6yE3zCUwOtjmjo/6sw1ixMpLNqd8fZHVTZ8Vv8BAAD//wMA7+kV8EoCAAA= 19 | headers: 20 | CF-Cache-Status: 21 | - DYNAMIC 22 | CF-RAY: 23 | - 82ea3ef80a3b5ed3-PDX 24 | Cache-Control: 25 | - no-cache, must-revalidate 26 | Connection: 27 | - keep-alive 28 | Content-Encoding: 29 | - gzip 30 | Content-Type: 31 | - application/json 32 | Date: 33 | - Fri, 01 Dec 2023 09:19:00 GMT 34 | Transfer-Encoding: 35 | - chunked 36 | openai-model: 37 | - text-ada-001 38 | openai-processing-ms: 39 | - '170' 40 | x-ratelimit-limit-tokens_usage_based: 41 | - '250000' 42 | x-ratelimit-remaining-tokens_usage_based: 43 | - '249744' 44 | x-ratelimit-reset-tokens_usage_based: 45 | - 61ms 46 | status: 47 | code: 200 48 | message: OK 49 | version: 1 50 | -------------------------------------------------------------------------------- /tests/prompts/langchain_getting_started/test_dynamic_prompt.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from fvalues import FValue 6 | from langchain import FewShotPromptTemplate, PromptTemplate 7 | from langchain.llms import OpenAI 8 | from langchain.prompts.example_selector import LengthBasedExampleSelector 9 | 10 | # ========================== Start of langchain example code ========================== 11 | # https://langchain.readthedocs.io/en/latest/modules/prompts/getting_started.html 12 | 13 | 14 | # Next, we specify the template to format the examples we have provided. 15 | # We use the `PromptTemplate` class for this. 16 | example_formatter_template = """ 17 | Word: {word} 18 | Antonym: {antonym}\n 19 | """.strip() 20 | example_prompt = PromptTemplate( 21 | input_variables=["word", "antonym"], 22 | template=example_formatter_template, 23 | ) 24 | 25 | # These are a lot of examples of a pretend task of creating antonyms. 26 | examples = [ 27 | {"word": "happy", "antonym": "sad"}, 28 | {"word": "tall", "antonym": "short"}, 29 | {"word": "energetic", "antonym": "lethargic"}, 30 | {"word": "sunny", "antonym": "gloomy"}, 31 | {"word": "windy", "antonym": "calm"}, 32 | ] 33 | 34 | # We'll use the `LengthBasedExampleSelector` to select the examples. 35 | example_selector = LengthBasedExampleSelector( 36 | examples=examples, example_prompt=example_prompt, max_length=25 37 | ) 38 | 39 | # We can now use the `example_selector` to create a `FewShotPromptTemplate`. 40 | dynamic_prompt = FewShotPromptTemplate( 41 | # We provide an ExampleSelector instead of examples. 42 | example_selector=example_selector, 43 | example_prompt=example_prompt, 44 | prefix="Give the antonym of every input", 45 | suffix="Word: {input}\nAntonym: ", 46 | input_variables=["input"], 47 | example_separator="\n\n", 48 | ) 49 | 50 | # We can now generate a prompt using the `format` method. 51 | long_string = ( 52 | "big and huge and massive and large and gigantic and tall and much much " 53 | "much much much bigger than everything else" 54 | ) 55 | prompt = dynamic_prompt.format(input=long_string) 56 | 57 | 58 | # ================================== Execute example ================================== 59 | 60 | 61 | def test_prompt(): 62 | print([repr(x) for x in prompt.flatten().parts]) 63 | assert prompt.flatten().parts == ( 64 | "Give the antonym of every input", 65 | FValue(source="self.example_separator", value="\n\n", formatted="\n\n"), 66 | "Word: ", 67 | FValue(source="word", value="happy", formatted="happy"), 68 | "\nAntonym: ", 69 | FValue(source="antonym", value="sad", formatted="sad"), 70 | FValue(source="self.example_separator", value="\n\n", formatted="\n\n"), 71 | "Word: ", 72 | FValue( 73 | source="input", 74 | value="big and huge and massive and large and gigantic and tall " 75 | "and much much much much much bigger than everything else", 76 | formatted="big and huge and massive and large and gigantic and " 77 | "tall and much much much much much bigger than everything else", 78 | ), 79 | "\nAntonym: ", 80 | ) 81 | 82 | 83 | @vcr.use_cassette() 84 | async def dynamic_prompt_demo(): 85 | agent = OpenAI(model_name="text-ada-001", temperature=0) 86 | return agent(prompt) 87 | 88 | 89 | def test_llm_usage_succeeds(): 90 | """ 91 | Check that it works like a regular prompt. 92 | Also, record playback for easy visualization. 93 | """ 94 | result = asyncio.get_event_loop().run_until_complete(dynamic_prompt_demo()) 95 | assert result.strip().startswith("small") 96 | 97 | 98 | if __name__ == "__main__": 99 | from langchain_visualizer import visualize 100 | 101 | visualize(dynamic_prompt_demo) 102 | -------------------------------------------------------------------------------- /tests/prompts/langchain_getting_started/test_few_shot.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain import FewShotPromptTemplate, PromptTemplate 6 | from langchain.llms import OpenAI 7 | 8 | # ========================== Start of langchain example code ========================== 9 | # https://langchain.readthedocs.io/en/latest/modules/prompts/getting_started.html 10 | 11 | 12 | # First, create the list of few shot examples. 13 | examples = [ 14 | {"word": "happy", "antonym": "sad"}, 15 | {"word": "tall", "antonym": "short"}, 16 | ] 17 | 18 | # Next, we specify the template to format the examples we have provided. 19 | # We use the `PromptTemplate` class for this. 20 | example_formatter_template = """ 21 | Word: {word} 22 | Antonym: {antonym} 23 | """.strip() 24 | example_prompt = PromptTemplate( 25 | input_variables=["word", "antonym"], template=example_formatter_template 26 | ) 27 | 28 | few_shot_prompt = FewShotPromptTemplate( 29 | examples=examples, 30 | example_prompt=example_prompt, 31 | prefix="Give the antonym of every input", 32 | suffix="Word: {input}\nAntonym: ", 33 | input_variables=["input"], 34 | example_separator="\n\n", 35 | ) 36 | 37 | # We can now generate a prompt using the `format` method. 38 | prompt = few_shot_prompt.format(input="big") 39 | 40 | 41 | # ================================== Execute example ================================== 42 | 43 | 44 | def test_prompt(): 45 | assert ( 46 | prompt 47 | == """ 48 | Give the antonym of every input 49 | 50 | Word: happy 51 | Antonym: sad 52 | 53 | Word: tall 54 | Antonym: short 55 | 56 | Word: big 57 | Antonym: """.lstrip() 58 | ) 59 | 60 | 61 | @vcr.use_cassette() 62 | async def few_shot_prompt_demo(): 63 | agent = OpenAI(model_name="text-ada-001", temperature=0) 64 | return agent(prompt) 65 | 66 | 67 | def test_llm_usage_succeeds(): 68 | """ 69 | Check that it works like a regular prompt. 70 | Also, record playback for easy visualization. 71 | """ 72 | result = asyncio.get_event_loop().run_until_complete(few_shot_prompt_demo()) 73 | assert result.strip().startswith("small") 74 | 75 | 76 | if __name__ == "__main__": 77 | from langchain_visualizer import visualize 78 | 79 | visualize(few_shot_prompt_demo) 80 | -------------------------------------------------------------------------------- /tests/prompts/langchain_getting_started/test_multiple_inputs.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from fvalues import FValue 6 | from langchain import PromptTemplate 7 | from langchain.llms import OpenAI 8 | 9 | # ========================== Start of langchain example code ========================== 10 | # https://langchain.readthedocs.io/en/latest/modules/prompts/getting_started.html 11 | 12 | 13 | # An example prompt with multiple input variables 14 | multiple_input_prompt = PromptTemplate( 15 | input_variables=["adjective", "content"], 16 | template="Tell me a {adjective} joke about {content}.", 17 | ) 18 | prompt = multiple_input_prompt.format(adjective="funny", content="chickens") 19 | 20 | 21 | # ================================== Execute example ================================== 22 | 23 | 24 | def test_prompt(): 25 | assert prompt.parts == ( 26 | "Tell me a ", 27 | FValue(source="adjective", value="funny", formatted="funny"), 28 | " joke about ", 29 | FValue(source="content", value="chickens", formatted="chickens"), 30 | ".", 31 | ) 32 | 33 | 34 | @vcr.use_cassette() 35 | async def multiple_inputs_prompt_demo(): 36 | agent = OpenAI(model_name="text-ada-001", temperature=0) 37 | return agent(prompt) 38 | 39 | 40 | def test_llm_usage_succeeds(): 41 | """ 42 | Check that it works like a regular prompt. 43 | Also, record playback for easy visualization. 44 | """ 45 | result = asyncio.get_event_loop().run_until_complete(multiple_inputs_prompt_demo()) 46 | assert result.strip().startswith("Why did the chicken cross the road?") 47 | 48 | 49 | if __name__ == "__main__": 50 | from langchain_visualizer import visualize 51 | 52 | visualize(multiple_inputs_prompt_demo) 53 | -------------------------------------------------------------------------------- /tests/prompts/langchain_getting_started/test_no_inputs.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from langchain import PromptTemplate 6 | from langchain.llms import OpenAI 7 | 8 | # ========================== Start of langchain example code ========================== 9 | # https://langchain.readthedocs.io/en/latest/modules/prompts/getting_started.html 10 | 11 | 12 | # An example prompt with no input variables 13 | no_input_prompt = PromptTemplate(input_variables=[], template="Tell me a joke.") 14 | prompt = no_input_prompt.format() 15 | 16 | 17 | # ================================== Execute example ================================== 18 | 19 | 20 | def test_prompt(): 21 | assert prompt.parts == ("Tell me a joke.",) 22 | 23 | 24 | @vcr.use_cassette() 25 | async def no_inputs_prompt_demo(): 26 | agent = OpenAI(model_name="text-ada-001", temperature=0) 27 | return agent(prompt) 28 | 29 | 30 | def test_llm_usage_succeeds(): 31 | """ 32 | Check that it works like a regular prompt. 33 | Also, record playback for easy visualization. 34 | """ 35 | result = asyncio.get_event_loop().run_until_complete(no_inputs_prompt_demo()) 36 | assert result.strip().startswith("Why did the chicken cross the road?") 37 | 38 | 39 | if __name__ == "__main__": 40 | from langchain_visualizer import visualize 41 | 42 | visualize(no_inputs_prompt_demo) 43 | -------------------------------------------------------------------------------- /tests/prompts/langchain_getting_started/test_one_input.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | import asyncio 3 | 4 | import vcr_langchain as vcr 5 | from fvalues import FValue 6 | from langchain import PromptTemplate 7 | from langchain.llms import OpenAI 8 | 9 | # ========================== Start of langchain example code ========================== 10 | # https://langchain.readthedocs.io/en/latest/modules/prompts/getting_started.html 11 | 12 | 13 | # An example prompt with one input variable 14 | one_input_prompt = PromptTemplate( 15 | input_variables=["adjective"], template="Tell me a {adjective} joke." 16 | ) 17 | prompt = one_input_prompt.format(adjective="funny") 18 | 19 | 20 | # ================================== Execute example ================================== 21 | 22 | 23 | def test_prompt(): 24 | assert prompt.parts == ( 25 | "Tell me a ", 26 | FValue(source="adjective", value="funny", formatted="funny"), 27 | " joke.", 28 | ) 29 | 30 | 31 | @vcr.use_cassette() 32 | async def one_input_prompt_demo(): 33 | agent = OpenAI(model_name="text-ada-001", temperature=0) 34 | return agent(prompt) 35 | 36 | 37 | def test_llm_usage_succeeds(): 38 | """ 39 | Check that it works like a regular prompt. 40 | Also, record playback for easy visualization. 41 | """ 42 | result = asyncio.get_event_loop().run_until_complete(one_input_prompt_demo()) 43 | assert result.strip().startswith("Why did the chicken cross the road?") 44 | 45 | 46 | if __name__ == "__main__": 47 | from langchain_visualizer import visualize 48 | 49 | visualize(one_input_prompt_demo) 50 | -------------------------------------------------------------------------------- /tests/prompts/partial/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/tests/prompts/partial/__init__.py -------------------------------------------------------------------------------- /tests/prompts/partial/test_partial_with_strings.yaml: -------------------------------------------------------------------------------- 1 | interactions: 2 | - request: 3 | body: '{"prompt": ["Why did the chicken cross the road?"], "model": "text-ada-001", 4 | "temperature": 0.0, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 5 | 0, "n": 1, "logit_bias": {}, "max_tokens": 256}' 6 | headers: {} 7 | method: POST 8 | uri: https://api.openai.com/v1/completions 9 | response: 10 | body: 11 | string: !!binary | 12 | H4sIAAAAAAAAA1RQQW7bMBC8+xUDnm1ZVhwk1QOapEgPRVOgRVMYFLmSWFNclqRs10H+XlB2YvSy 13 | h5md2Zl9mQFiL4MzrhM1xFNvIgbWZLGjEA07mAhNPpCSiXSBz6YLMhEaajkQPkk3yvAX6zmqsloj 14 | MeSOjYY2MYw+ZQduESnsjKICjySDw5ClfUo+1sultzK1HIaCPTlpCsXDUrOKy7ezhl0U85zU6BxS 15 | Dd4ubr+kw/bb9wf943j3eP3H7j8ehvT1/vp4vBua0zY3v0mlrEh0SBvFg7eU3U60CpQriRqrm3K1 16 | rqqrdTkRU/832UJquSjL1VnTs1EURY2fMwB4mSZOq1ny7J7dU09QvVFbclCBYySN1BMCS42GlBwj 17 | wSTsZYRl3hrXoeUACW+lovzDjrHvKUxriker4Tih4zmaMf0PtsZpsKNiSjiFMU7TQdQo3xHLnQ/c 18 | 5OButPYdb40zsd8EkpFdzh8TezGxrzPg11R6jLIjUZ/LCh948GmTeEsuG96e3MTlvxeu+nAmEydp 19 | L/jVzSxfeJ39AwAA//8DAPCT7VWBAgAA 20 | headers: 21 | CF-Cache-Status: 22 | - DYNAMIC 23 | CF-RAY: 24 | - 82ea3ef9fb7aefe2-PDX 25 | Cache-Control: 26 | - no-cache, must-revalidate 27 | Connection: 28 | - keep-alive 29 | Content-Encoding: 30 | - gzip 31 | Content-Type: 32 | - application/json 33 | Date: 34 | - Fri, 01 Dec 2023 09:19:00 GMT 35 | Transfer-Encoding: 36 | - chunked 37 | openai-model: 38 | - text-ada-001 39 | openai-processing-ms: 40 | - '216' 41 | x-ratelimit-limit-tokens_usage_based: 42 | - '250000' 43 | x-ratelimit-remaining-tokens_usage_based: 44 | - '249744' 45 | x-ratelimit-reset-tokens_usage_based: 46 | - 61ms 47 | status: 48 | code: 200 49 | message: OK 50 | version: 1 51 | -------------------------------------------------------------------------------- /tests/prompts/partial/test_with_strings.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | 3 | import vcr_langchain as vcr 4 | from langchain import PromptTemplate 5 | from langchain.llms import OpenAI 6 | 7 | # ========================== Start of langchain example code ========================== 8 | # https://langchain.readthedocs.io/en/latest/modules/prompts/examples/partial.html 9 | 10 | 11 | @vcr.use_cassette() 12 | async def test_partial_with_strings(): 13 | agent = OpenAI(model_name="text-ada-001", temperature=0) 14 | prompt = PromptTemplate.from_template("Why did the {animal} cross the {surface}?") 15 | partial_prompt = prompt.partial(surface="road") 16 | final_prompt = partial_prompt.format(animal="chicken") 17 | assert final_prompt == "Why did the chicken cross the road?" 18 | return agent(final_prompt) 19 | 20 | 21 | # ================================== Execute example ================================== 22 | 23 | if __name__ == "__main__": 24 | from langchain_visualizer import visualize 25 | 26 | visualize(test_partial_with_strings) 27 | -------------------------------------------------------------------------------- /tests/prompts/test_few_shot_prompt_template_f.py: -------------------------------------------------------------------------------- 1 | import langchain_visualizer # isort:skip # noqa: F401 2 | 3 | from fvalues import FValue 4 | from langchain import FewShotPromptTemplate, PromptTemplate 5 | 6 | 7 | def test_few_shot_f(): 8 | examples = [ 9 | {"word": "happy", "antonym": "sad"}, 10 | {"word": "tall", "antonym": "short"}, 11 | # Should be able to handle extra keys that is not exists in input_variables 12 | {"word": "better", "antonym": "worse", "extra": "extra"}, 13 | ] 14 | 15 | example_prompt = PromptTemplate( 16 | input_variables=["word", "antonym"], 17 | template="w={word},a={antonym}", 18 | ) 19 | 20 | few_shot_prompt = FewShotPromptTemplate( 21 | examples=examples, 22 | example_prompt=example_prompt, 23 | prefix="Give the antonym of every input:", 24 | suffix="w={input},a=", 25 | input_variables=["input"], 26 | example_separator=" ", 27 | ) 28 | 29 | s = few_shot_prompt.format(input="big") 30 | assert s == ( 31 | "Give the antonym of every input: " 32 | "w=happy,a=sad w=tall,a=short w=better,a=worse w=big,a=" 33 | ) 34 | print([repr(x) for x in s.flatten().parts]) 35 | assert s.flatten().parts == ( 36 | "Give the antonym of every input:", 37 | FValue(source="self.example_separator", value=" ", formatted=" "), 38 | "w=", 39 | FValue(source="word", value="happy", formatted="happy"), 40 | ",a=", 41 | FValue(source="antonym", value="sad", formatted="sad"), 42 | FValue(source="self.example_separator", value=" ", formatted=" "), 43 | "w=", 44 | FValue(source="word", value="tall", formatted="tall"), 45 | ",a=", 46 | FValue(source="antonym", value="short", formatted="short"), 47 | FValue(source="self.example_separator", value=" ", formatted=" "), 48 | "w=", 49 | FValue(source="word", value="better", formatted="better"), 50 | ",a=", 51 | FValue(source="antonym", value="worse", formatted="worse"), 52 | FValue(source="self.example_separator", value=" ", formatted=" "), 53 | "w=", 54 | FValue(source="input", value="big", formatted="big"), 55 | ",a=", 56 | ) 57 | -------------------------------------------------------------------------------- /tests/prompts/test_prompt_template_f.py: -------------------------------------------------------------------------------- 1 | # ensure compatibility with fvalues by modifying tests from 2 | # https://github.com/oughtinc/fvalues/blob/4baf69e/tests/test_f.py 3 | # langchain does not appear to support numerical format options, so we'll skip those 4 | # tests 5 | 6 | import langchain_visualizer # isort:skip # noqa: F401 7 | 8 | from fvalues import FValue 9 | from langchain import PromptTemplate 10 | 11 | 12 | def test_add_f(): 13 | f1 = PromptTemplate(template="hello {foo}", input_variables=["foo"]).format(foo="3") 14 | f2 = PromptTemplate(template="world {bar}", input_variables=["bar"]).format(bar="7") 15 | f3 = f1 + " " + f2 16 | assert f3 == "hello 3 world 7" 17 | assert f3.parts == ( 18 | FValue(source='f1 + " "', value="hello 3 ", formatted="hello 3 "), 19 | FValue(source="f2", value="world 7", formatted="world 7"), 20 | ) 21 | assert f3.flatten().parts == ( 22 | "hello ", 23 | FValue(source="foo", value="3", formatted="3"), 24 | " ", 25 | "world ", 26 | FValue(source="bar", value="7", formatted="7"), 27 | ) 28 | 29 | 30 | def test_still_node_from_eval(): 31 | # unlike the original fvalues, PromptTemplate should work regardless 32 | s = eval( 33 | 'PromptTemplate(template="hello {foo}", ' 34 | 'input_variables=["foo"]).format(foo="world")' 35 | ) 36 | assert s == "hello world" 37 | assert s.parts == ( 38 | "hello ", 39 | FValue(source="foo", value="world", formatted="world"), 40 | ) 41 | 42 | 43 | def test_strip(): 44 | space = " " 45 | s = PromptTemplate( 46 | template=" {space} hello {space} ", input_variables=["space"] 47 | ).format(space=space) 48 | assert s == " hello " 49 | assert s.parts == ( 50 | " ", 51 | FValue(source="space", value=" ", formatted=" "), 52 | " hello ", 53 | FValue(source="space", value=" ", formatted=" "), 54 | " ", 55 | ) 56 | assert s.strip() == "hello" 57 | assert s.strip(space) == "hello" 58 | assert s.lstrip() == "hello " 59 | assert s.lstrip(space) == "hello " 60 | assert s.rstrip() == " hello" 61 | assert s.rstrip(space) == " hello" 62 | assert s.strip().parts == ("hello",) 63 | assert s.lstrip().parts == ( 64 | "hello ", 65 | FValue(source="space", value=" ", formatted=" "), 66 | " ", 67 | ) 68 | assert s.rstrip().parts == ( 69 | " ", 70 | FValue(source="space", value=" ", formatted=" "), 71 | " hello", 72 | ) 73 | assert s.strip().strip("ho").strip() == "ell" 74 | 75 | 76 | def test_partial_f(): 77 | partial = PromptTemplate.from_template("hello {foo} world {bar}").partial(foo="3") 78 | final = partial.format(bar="7") 79 | assert final == "hello 3 world 7" 80 | assert final.parts == ( 81 | "hello ", 82 | FValue(source="foo", value="3", formatted="3"), 83 | " world ", 84 | FValue(source="bar", value="7", formatted="7"), 85 | ) 86 | -------------------------------------------------------------------------------- /tests/resources/Chinook.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/tests/resources/Chinook.db -------------------------------------------------------------------------------- /tests/resources/sotu_faiss.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amosjyng/langchain-visualizer/e51a63e5842a02548927532e1926c2b409ba43d1/tests/resources/sotu_faiss.pkl -------------------------------------------------------------------------------- /tests/sotu.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | from io import BufferedWriter 4 | 5 | from langchain.document_loaders import TextLoader 6 | from langchain.embeddings.openai import OpenAIEmbeddings 7 | from langchain.text_splitter import CharacterTextSplitter 8 | from langchain.vectorstores.faiss import FAISS 9 | 10 | FAISS_PATH = "tests/resources/sotu_faiss.pkl" 11 | 12 | 13 | def load_sotu() -> FAISS: 14 | if os.path.isfile(FAISS_PATH): 15 | with open(FAISS_PATH, "rb") as f: 16 | return pickle.load(f) 17 | 18 | loader = TextLoader("tests/resources/state_of_the_union.txt") 19 | state_of_the_union = loader.load() 20 | text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) 21 | docs = text_splitter.split_documents(state_of_the_union) 22 | embeddings = OpenAIEmbeddings() 23 | docsearch = FAISS.from_documents(docs, embeddings) 24 | with open(FAISS_PATH, "wb") as f: # type: ignore 25 | assert isinstance(f, BufferedWriter) # mypy complains otherwise 26 | pickle.dump(docsearch, f) 27 | return docsearch 28 | -------------------------------------------------------------------------------- /tests/test_cli_args.py: -------------------------------------------------------------------------------- 1 | """Test that CLI args do not affect script startup.""" 2 | 3 | import subprocess 4 | 5 | 6 | def test_start_with_args(): 7 | result = subprocess.run( 8 | "python3 tests/dummy_viz.py asdf", 9 | shell=True, 10 | stdout=subprocess.PIPE, 11 | stderr=subprocess.PIPE, 12 | ) 13 | assert "error: unrecognized arguments" not in result.stderr.decode() 14 | assert "Opening trace in browser" in result.stdout.decode() 15 | --------------------------------------------------------------------------------