├── .editorconfig ├── .env.example ├── .gitattributes ├── .github └── workflows │ ├── ci.yml │ └── integration.yml ├── .gitignore ├── .husky └── pre-commit ├── .nvmrc ├── .prettierignore ├── .prettierrc ├── .vscode └── settings.json ├── .watchmanconfig ├── .yarn ├── patches │ └── dpdm-npm-3.12.0-0dfdd8e3b8.patch ├── plugins │ └── @yarnpkg │ │ └── plugin-typescript.cjs └── releases │ └── yarn-3.4.1.cjs ├── .yarnrc.yml ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── docker-compose.yml ├── docs ├── .eslintrc.js ├── .gitignore ├── .prettierignore ├── README.md ├── babel.config.js ├── docs │ ├── ecosystem │ │ ├── databerry.md │ │ ├── helicone.md │ │ └── unstructured.md │ ├── getting-started │ │ ├── guide-chat.mdx │ │ ├── guide-llm.mdx │ │ └── install.md │ ├── index.md │ ├── modules │ │ ├── agents │ │ │ ├── agents │ │ │ │ ├── custom_llm.mdx │ │ │ │ ├── custom_llm_chat.mdx │ │ │ │ ├── examples │ │ │ │ │ ├── chat_mrkl.mdx │ │ │ │ │ ├── conversational_agent.mdx │ │ │ │ │ ├── custom_agent_chat.mdx │ │ │ │ │ ├── index.mdx │ │ │ │ │ └── llm_mrkl.mdx │ │ │ │ └── index.mdx │ │ │ ├── executor │ │ │ │ ├── getting-started.md │ │ │ │ └── index.mdx │ │ │ ├── index.mdx │ │ │ ├── toolkits │ │ │ │ ├── examples │ │ │ │ │ ├── index.mdx │ │ │ │ │ ├── json.md │ │ │ │ │ ├── openapi.md │ │ │ │ │ ├── sql.mdx │ │ │ │ │ └── vectorstore.md │ │ │ │ └── index.mdx │ │ │ └── tools │ │ │ │ ├── agents_with_vectorstores.md │ │ │ │ ├── aiplugin-tool.mdx │ │ │ │ ├── index.mdx │ │ │ │ ├── integrations │ │ │ │ └── index.mdx │ │ │ │ ├── lambda_agent.md │ │ │ │ ├── webbrowser.mdx │ │ │ │ └── zapier_agent.md │ │ ├── chains │ │ │ ├── index.mdx │ │ │ ├── index_related_chains │ │ │ │ ├── conversational_retrieval.mdx │ │ │ │ ├── document_qa.mdx │ │ │ │ ├── index.mdx │ │ │ │ └── retrieval_qa.mdx │ │ │ ├── llmchain │ │ │ │ └── index.mdx │ │ │ ├── other_chains │ │ │ │ ├── analyze_document.mdx │ │ │ │ ├── index.mdx │ │ │ │ ├── sql.mdx │ │ │ │ └── summarization.mdx │ │ │ └── prompt_selectors │ │ │ │ └── index.mdx │ │ ├── indexes │ │ │ ├── document_loaders │ │ │ │ ├── examples │ │ │ │ │ ├── file_loaders │ │ │ │ │ │ ├── csv.md │ │ │ │ │ │ ├── directory.md │ │ │ │ │ │ ├── docx.md │ │ │ │ │ │ ├── epub.md │ │ │ │ │ │ ├── index.mdx │ │ │ │ │ │ ├── json.md │ │ │ │ │ │ ├── jsonlines.md │ │ │ │ │ │ ├── notion_markdown.mdx │ │ │ │ │ │ ├── pdf.md │ │ │ │ │ │ ├── subtitles.md │ │ │ │ │ │ ├── text.md │ │ │ │ │ │ └── unstructured.mdx │ │ │ │ │ ├── index.mdx │ │ │ │ │ └── web_loaders │ │ │ │ │ │ ├── college_confidential.md │ │ │ │ │ │ ├── gitbook.md │ │ │ │ │ │ ├── github.md │ │ │ │ │ │ ├── hn.md │ │ │ │ │ │ ├── imsdb.md │ │ │ │ │ │ ├── index.mdx │ │ │ │ │ │ ├── s3.mdx │ │ │ │ │ │ ├── web_cheerio.md │ │ │ │ │ │ ├── web_playwright.md │ │ │ │ │ │ └── web_puppeteer.md │ │ │ │ └── index.mdx │ │ │ ├── index.mdx │ │ │ ├── retrievers │ │ │ │ ├── chatgpt-retriever-plugin.mdx │ │ │ │ ├── databerry-retriever.mdx │ │ │ │ ├── index.mdx │ │ │ │ ├── metal-retriever.mdx │ │ │ │ ├── remote-retriever.mdx │ │ │ │ ├── supabase-hybrid.mdx │ │ │ │ └── vectorstore.md │ │ │ ├── text_splitters │ │ │ │ ├── examples │ │ │ │ │ ├── character.mdx │ │ │ │ │ ├── index.mdx │ │ │ │ │ ├── markdown.mdx │ │ │ │ │ ├── recursive_character.mdx │ │ │ │ │ └── token.mdx │ │ │ │ └── index.mdx │ │ │ └── vector_stores │ │ │ │ ├── index.mdx │ │ │ │ └── integrations │ │ │ │ ├── chroma.md │ │ │ │ ├── hnswlib.mdx │ │ │ │ ├── index.mdx │ │ │ │ ├── memory.mdx │ │ │ │ ├── milvus.md │ │ │ │ ├── opensearch.md │ │ │ │ ├── pinecone.md │ │ │ │ ├── prisma.mdx │ │ │ │ ├── supabase.mdx │ │ │ │ └── weaviate.mdx │ │ ├── memory │ │ │ ├── examples │ │ │ │ ├── buffer_memory.md │ │ │ │ ├── buffer_memory_chat.mdx │ │ │ │ ├── buffer_window_memory.md │ │ │ │ ├── conversation_summary.mdx │ │ │ │ ├── index.mdx │ │ │ │ └── motorhead_memory.md │ │ │ └── index.mdx │ │ ├── models │ │ │ ├── chat │ │ │ │ ├── additional_functionality.mdx │ │ │ │ ├── index.mdx │ │ │ │ └── integrations.mdx │ │ │ ├── embeddings │ │ │ │ ├── additional_functionality.mdx │ │ │ │ ├── index.mdx │ │ │ │ └── integrations.mdx │ │ │ ├── index.mdx │ │ │ └── llms │ │ │ │ ├── additional_functionality.mdx │ │ │ │ ├── index.mdx │ │ │ │ └── integrations.mdx │ │ ├── prompts │ │ │ ├── example_selectors │ │ │ │ └── index.mdx │ │ │ ├── index.mdx │ │ │ ├── output_parsers │ │ │ │ └── index.mdx │ │ │ └── prompt_templates │ │ │ │ ├── additional_functionality.mdx │ │ │ │ └── index.mdx │ │ └── schema │ │ │ ├── chat-messages.md │ │ │ ├── document.md │ │ │ ├── example.md │ │ │ └── index.mdx │ ├── production │ │ ├── callbacks.mdx │ │ ├── deployment.md │ │ └── tracing.md │ └── use_cases │ │ ├── api.mdx │ │ ├── personal_assistants.mdx │ │ ├── question_answering.mdx │ │ ├── summarization.mdx │ │ └── tabular.mdx ├── docusaurus.config.js ├── package.json ├── sidebars.js ├── src │ ├── css │ │ └── custom.css │ ├── pages │ │ └── index.js │ └── theme │ │ └── SearchBar.js └── static │ ├── .nojekyll │ └── img │ ├── DataberryDashboard.png │ ├── HeliconeDashboard.png │ ├── HeliconeKeys.png │ ├── android-chrome-192x192.png │ ├── android-chrome-512x512.png │ ├── apple-touch-icon.png │ ├── favicon-16x16.png │ ├── favicon-32x32.png │ ├── favicon.ico │ ├── parrot-chainlink-icon.png │ └── parrot-icon.png ├── examples ├── .env.example ├── .eslintrc.cjs ├── .yarn │ └── install-state.gz ├── openai_openapi.yaml ├── package.json ├── src │ ├── README.md │ ├── agents │ │ ├── aiplugin-tool.ts │ │ ├── chat_convo_with_tracing.ts │ │ ├── chat_mrkl.ts │ │ ├── chat_mrkl_with_tracing.ts │ │ ├── concurrent_mrkl.ts │ │ ├── custom_agent.ts │ │ ├── custom_llm_agent.ts │ │ ├── custom_llm_agent_chat.ts │ │ ├── custom_tool.ts │ │ ├── json.ts │ │ ├── load_from_hub.ts │ │ ├── mrkl.ts │ │ ├── mrkl_browser.ts │ │ ├── mrkl_with_tracing.ts │ │ ├── openapi.ts │ │ ├── sql.ts │ │ ├── streaming.ts │ │ ├── vectorstore.ts │ │ └── zapier_mrkl.ts │ ├── callbacks │ │ └── console_handler.ts │ ├── chains │ │ ├── analyze_document_chain_summarize.ts │ │ ├── chat_vector_db_chroma.ts │ │ ├── conversation_chain.ts │ │ ├── conversational_qa.ts │ │ ├── llm_chain.ts │ │ ├── llm_chain_stream.ts │ │ ├── load_from_hub.ts │ │ ├── qa_refine.ts │ │ ├── question_answering.ts │ │ ├── question_answering_map_reduce.ts │ │ ├── retrieval_qa.ts │ │ ├── retrieval_qa_with_remote.ts │ │ ├── sql_db.ts │ │ ├── summarization.ts │ │ └── summarization_map_reduce.ts │ ├── chat │ │ ├── agent.ts │ │ ├── llm_chain.ts │ │ ├── memory.ts │ │ └── overview.ts │ ├── customParameters │ │ └── differentBaseUrl.ts │ ├── document_loaders │ │ ├── cheerio_web.ts │ │ ├── college_confidential.ts │ │ ├── example_data │ │ │ ├── Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt │ │ │ ├── example.txt │ │ │ └── notion.md │ │ ├── gitbook.ts │ │ ├── github.ts │ │ ├── hn.ts │ │ ├── imsdb.ts │ │ ├── notion_markdown.ts │ │ ├── puppeteer_web.ts │ │ ├── s3.ts │ │ ├── srt.ts │ │ ├── text.ts │ │ └── unstructured.ts │ ├── embeddings │ │ ├── cohere.ts │ │ ├── max_concurrency.ts │ │ └── openai.ts │ ├── index.ts │ ├── indexes │ │ ├── recursive_text_splitter.ts │ │ ├── text_splitter.ts │ │ ├── token_text_splitter.ts │ │ └── vector_stores │ │ │ ├── hnswlib.ts │ │ │ ├── hnswlib_fromdocs.ts │ │ │ ├── hnswlib_saveload.ts │ │ │ ├── memory.ts │ │ │ ├── memory_custom_similarity.ts │ │ │ ├── memory_fromdocs.ts │ │ │ ├── milvus.ts │ │ │ ├── mongo_fromTexts.ts │ │ │ ├── mongo_search.ts │ │ │ ├── opensearch │ │ │ ├── docker-compose.yml │ │ │ └── opensearch.ts │ │ │ ├── pinecone.ts │ │ │ ├── prisma_vectorstore │ │ │ ├── .env.example │ │ │ ├── .gitignore │ │ │ ├── docker-compose.example.yml │ │ │ ├── prisma.ts │ │ │ └── prisma │ │ │ │ ├── migrations │ │ │ │ ├── 00_init │ │ │ │ │ └── migration.sql │ │ │ │ └── migration_lock.toml │ │ │ │ └── schema.prisma │ │ │ ├── supabase.ts │ │ │ ├── weaviate_fromTexts.ts │ │ │ └── weaviate_search.ts │ ├── llms │ │ ├── cohere.ts │ │ ├── hf.ts │ │ ├── openai-chat.ts │ │ ├── openai.ts │ │ └── replicate.ts │ ├── memory │ │ ├── buffer.ts │ │ ├── buffer_window.ts │ │ ├── summary_chat.ts │ │ └── summary_llm.ts │ ├── models │ │ ├── chat │ │ │ ├── chat.ts │ │ │ ├── chat_quick_start.ts │ │ │ ├── chat_streaming.ts │ │ │ ├── chat_streaming_stdout.ts │ │ │ └── chat_timeout.ts │ │ ├── embeddings │ │ │ ├── cohere.ts │ │ │ ├── openai.ts │ │ │ └── openai_timeout.ts │ │ └── llm │ │ │ ├── llm.ts │ │ │ ├── llm_debugging.ts │ │ │ ├── llm_promptlayer.ts │ │ │ ├── llm_quick_start.ts │ │ │ ├── llm_streaming.ts │ │ │ ├── llm_streaming_stdout.ts │ │ │ ├── llm_timeout.ts │ │ │ ├── llm_with_tracing.ts │ │ │ └── replicate.ts │ ├── prompts │ │ ├── combining_parser.ts │ │ ├── comma_list_parser.ts │ │ ├── few_shot.ts │ │ ├── fix_parser.ts │ │ ├── length_based_example_selector.ts │ │ ├── load_from_hub.ts │ │ ├── partial.ts │ │ ├── prompt_value.ts │ │ ├── prompts.ts │ │ ├── semantic_similarity_example_selector.ts │ │ ├── structured_parser.ts │ │ └── structured_parser_zod.ts │ ├── retrievers │ │ ├── chatgpt-plugin.ts │ │ ├── databerry.ts │ │ ├── metal.ts │ │ └── supabase_hybrid.ts │ └── tools │ │ └── webbrowser.ts ├── state_of_the_union.txt └── tsconfig.json ├── langchain ├── .env.example ├── .eslintrc.cjs ├── .gitignore ├── .release-it.json ├── README.md ├── babel.config.cjs ├── docker-compose.yaml ├── jest.config.cjs ├── package.json ├── scripts │ ├── check-tree-shaking.js │ ├── create-entrypoints.js │ └── move-cjs-to-dist.js ├── src │ ├── agents │ │ ├── agent.ts │ │ ├── agent_toolkits │ │ │ ├── base.ts │ │ │ ├── index.ts │ │ │ ├── json │ │ │ │ ├── json.ts │ │ │ │ └── prompt.ts │ │ │ ├── openapi │ │ │ │ ├── openapi.ts │ │ │ │ └── prompt.ts │ │ │ ├── sql │ │ │ │ ├── prompt.ts │ │ │ │ └── sql.ts │ │ │ ├── vectorstore │ │ │ │ ├── prompt.ts │ │ │ │ └── vectorstore.ts │ │ │ └── zapier │ │ │ │ └── zapier.ts │ │ ├── chat │ │ │ ├── index.ts │ │ │ └── prompt.ts │ │ ├── chat_convo │ │ │ ├── index.ts │ │ │ └── prompt.ts │ │ ├── executor.ts │ │ ├── helpers.ts │ │ ├── index.ts │ │ ├── initialize.ts │ │ ├── load.ts │ │ ├── mrkl │ │ │ ├── index.ts │ │ │ └── prompt.ts │ │ ├── tests │ │ │ ├── agent.int.test.ts │ │ │ ├── aws_lambda.test.ts │ │ │ ├── calculator.test.ts │ │ │ ├── json.test.ts │ │ │ ├── sql.test.ts │ │ │ └── zapier_toolkit.int.test.ts │ │ └── types.ts │ ├── base_language │ │ ├── count_tokens.ts │ │ └── index.ts │ ├── cache.ts │ ├── callbacks │ │ ├── base.ts │ │ ├── index.ts │ │ ├── stream.ts │ │ ├── tests │ │ │ ├── callbacks.test.ts │ │ │ ├── langchain_tracer.int.test.ts │ │ │ └── tracer.test.ts │ │ ├── tracers.ts │ │ └── utils.ts │ ├── chains │ │ ├── analyze_documents_chain.ts │ │ ├── base.ts │ │ ├── chat_vector_db_chain.ts │ │ ├── combine_docs_chain.ts │ │ ├── conversation.ts │ │ ├── conversational_retrieval_chain.ts │ │ ├── index.ts │ │ ├── llm_chain.ts │ │ ├── load.ts │ │ ├── prompt_selector.ts │ │ ├── question_answering │ │ │ ├── load.ts │ │ │ ├── map_reduce_prompts.ts │ │ │ ├── refine_prompts.ts │ │ │ ├── stuff_prompts.ts │ │ │ └── tests │ │ │ │ └── load.int.test.ts │ │ ├── retrieval_qa.ts │ │ ├── serde.ts │ │ ├── sql_db │ │ │ ├── sql_db_chain.ts │ │ │ └── sql_db_prompt.ts │ │ ├── summarization │ │ │ ├── load.ts │ │ │ ├── stuff_prompts.ts │ │ │ └── tests │ │ │ │ └── load.int.test.ts │ │ ├── tests │ │ │ ├── chat_vector_db_qa_chain.int.test.ts │ │ │ ├── combine_docs_chain.int.test.ts │ │ │ ├── combine_docs_chain.test.ts │ │ │ ├── llm_chain.int.test.ts │ │ │ ├── sql_db_chain.int.test.ts │ │ │ └── vector_db_qa_chain.int.test.ts │ │ └── vector_db_qa.ts │ ├── chat_models │ │ ├── anthropic.ts │ │ ├── base.ts │ │ ├── index.ts │ │ ├── openai.ts │ │ └── tests │ │ │ ├── chatanthropic.int.test.ts │ │ │ └── chatopenai.int.test.ts │ ├── docstore │ │ ├── base.ts │ │ ├── in_memory.ts │ │ └── index.ts │ ├── document.ts │ ├── document_loaders │ │ ├── base.ts │ │ ├── fs │ │ │ ├── buffer.ts │ │ │ ├── csv.ts │ │ │ ├── directory.ts │ │ │ ├── docx.ts │ │ │ ├── epub.ts │ │ │ ├── json.ts │ │ │ ├── notion.ts │ │ │ ├── pdf.ts │ │ │ ├── srt.ts │ │ │ ├── text.ts │ │ │ └── unstructured.ts │ │ ├── index.ts │ │ ├── tests │ │ │ ├── cheerio.test.ts │ │ │ ├── college_confidential.int.test.ts │ │ │ ├── csv-blob.test.ts │ │ │ ├── csv.test.ts │ │ │ ├── directory.test.ts │ │ │ ├── docx.test.ts │ │ │ ├── epub.test.ts │ │ │ ├── example_data │ │ │ │ ├── 1706.03762.pdf │ │ │ │ ├── Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.csv │ │ │ │ ├── Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.json │ │ │ │ ├── Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.jsonl │ │ │ │ ├── Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt │ │ │ │ ├── attention.docx │ │ │ │ ├── attention.epub │ │ │ │ ├── complex.json │ │ │ │ ├── example.txt │ │ │ │ └── notion.md │ │ │ ├── github.int.test.ts │ │ │ ├── hn.int.test.ts │ │ │ ├── imsdb.test.ts │ │ │ ├── json-blob.test.ts │ │ │ ├── json.test.ts │ │ │ ├── jsonl-blob.test.ts │ │ │ ├── jsonl.test.ts │ │ │ ├── notion.test.ts │ │ │ ├── pdf-blob.test.ts │ │ │ ├── pdf.test.ts │ │ │ ├── playwright_web.int.test.ts │ │ │ ├── puppeteer.int.test.ts │ │ │ ├── s3.test.ts │ │ │ ├── srt-blob.test.ts │ │ │ ├── srt.test.ts │ │ │ ├── text-blob.test.ts │ │ │ ├── text.test.ts │ │ │ └── unstructured.test.ts │ │ └── web │ │ │ ├── cheerio.ts │ │ │ ├── college_confidential.ts │ │ │ ├── gitbook.ts │ │ │ ├── github.ts │ │ │ ├── hn.ts │ │ │ ├── imsdb.ts │ │ │ ├── playwright.ts │ │ │ ├── puppeteer.ts │ │ │ └── s3.ts │ ├── embeddings │ │ ├── base.ts │ │ ├── cohere.ts │ │ ├── fake.ts │ │ ├── index.ts │ │ ├── openai.ts │ │ └── tests │ │ │ ├── cohere.int.test.ts │ │ │ └── openai.int.test.ts │ ├── index.ts │ ├── llms │ │ ├── base.ts │ │ ├── cohere.ts │ │ ├── hf.ts │ │ ├── index.ts │ │ ├── load.ts │ │ ├── openai-chat.ts │ │ ├── openai.ts │ │ ├── replicate.ts │ │ └── tests │ │ │ ├── cohere.int.test.ts │ │ │ ├── huggingface_hub.int.test.ts │ │ │ ├── openai-chat.int.test.ts │ │ │ ├── openai.int.test.ts │ │ │ └── replicate.int.test.ts │ ├── memory │ │ ├── base.ts │ │ ├── buffer_memory.ts │ │ ├── buffer_window_memory.ts │ │ ├── chat_memory.ts │ │ ├── index.ts │ │ ├── motorhead_memory.ts │ │ ├── prompt.ts │ │ ├── summary.ts │ │ └── tests │ │ │ ├── buffer_memory.test.ts │ │ │ ├── buffer_window_memory.test.ts │ │ │ ├── motorhead_memory.test.ts │ │ │ └── summary.int.test.ts │ ├── output_parsers │ │ ├── combining.ts │ │ ├── fix.ts │ │ ├── index.ts │ │ ├── list.ts │ │ ├── prompts.ts │ │ ├── regex.ts │ │ ├── structured.ts │ │ └── tests │ │ │ ├── list.test.ts │ │ │ └── structured.test.ts │ ├── prompts │ │ ├── base.ts │ │ ├── chat.ts │ │ ├── few_shot.ts │ │ ├── index.ts │ │ ├── load.ts │ │ ├── prompt.ts │ │ ├── selectors │ │ │ ├── LengthBasedExampleSelector.ts │ │ │ └── SemanticSimilarityExampleSelector.ts │ │ ├── serde.ts │ │ ├── template.ts │ │ └── tests │ │ │ ├── __snapshots__ │ │ │ └── chat.test.ts.snap │ │ │ ├── chat.test.ts │ │ │ ├── few_shot.test.ts │ │ │ ├── load.int.test.ts │ │ │ ├── prompt.test.ts │ │ │ ├── prompts │ │ │ └── hello_world.yaml │ │ │ ├── selectors.test.ts │ │ │ └── template.test.ts │ ├── retrievers │ │ ├── databerry.ts │ │ ├── index.ts │ │ ├── metal.ts │ │ ├── remote │ │ │ ├── base.ts │ │ │ ├── chatgpt-plugin.ts │ │ │ ├── index.ts │ │ │ └── remote-retriever.ts │ │ ├── supabase.ts │ │ └── tests │ │ │ ├── metal.int.test.ts │ │ │ └── supabase.int.test.ts │ ├── schema │ │ └── index.ts │ ├── sql_db.ts │ ├── tests │ │ ├── cache.test.ts │ │ ├── sql_database.int.test.ts │ │ └── text_splitter.test.ts │ ├── text_splitter.ts │ ├── tools │ │ ├── IFTTTWebhook.ts │ │ ├── aiplugin.ts │ │ ├── aws_lambda.ts │ │ ├── base.ts │ │ ├── bingserpapi.ts │ │ ├── calculator.ts │ │ ├── chain.ts │ │ ├── dadjokeapi.ts │ │ ├── dynamic.ts │ │ ├── fixtures │ │ │ └── wordoftheday.html │ │ ├── index.ts │ │ ├── json.ts │ │ ├── requests.ts │ │ ├── serpapi.ts │ │ ├── serper.ts │ │ ├── sql.ts │ │ ├── tests │ │ │ ├── webbrowser.int.test.ts │ │ │ └── webbrowser.test.ts │ │ ├── vectorstore.ts │ │ ├── webbrowser.ts │ │ └── zapier.ts │ ├── types │ │ └── pdf-parse.d.ts │ ├── util │ │ ├── async_caller.ts │ │ ├── axios-fetch-adapter.d.ts │ │ ├── axios-fetch-adapter.js │ │ ├── axios-types.ts │ │ ├── chunk.ts │ │ ├── env.ts │ │ ├── event-source-parse.ts │ │ ├── extname.ts │ │ ├── hub.ts │ │ ├── load.ts │ │ ├── parse.ts │ │ ├── sql_utils.ts │ │ └── tests │ │ │ ├── async_caller.test.ts │ │ │ └── sql_utils.test.ts │ └── vectorstores │ │ ├── base.ts │ │ ├── chroma.ts │ │ ├── hnswlib.ts │ │ ├── index.ts │ │ ├── memory.ts │ │ ├── milvus.ts │ │ ├── mongo.ts │ │ ├── opensearch.ts │ │ ├── pinecone.ts │ │ ├── prisma.ts │ │ ├── supabase.ts │ │ ├── tests │ │ ├── chroma.test.ts │ │ ├── hnswlib.int.test.ts │ │ ├── hnswlib.test.ts │ │ ├── memory.int.test.ts │ │ ├── milvus.int.test.ts │ │ ├── mongo.int.test.ts │ │ ├── opensearch.int.test.ts │ │ ├── pinecone.int.test.ts │ │ ├── pinecone.test.ts │ │ ├── supabase.int.test.ts │ │ └── weaviate.int.test.ts │ │ └── weaviate.ts ├── tsconfig.cjs.json └── tsconfig.json ├── package.json ├── scripts ├── docker-ci-entrypoint.sh └── release-branch.sh ├── test-exports-vercel ├── .eslintrc.json ├── .gitignore ├── README.md ├── next.config.js ├── package.json ├── public │ ├── favicon.ico │ ├── next.svg │ ├── thirteen.svg │ └── vercel.svg ├── src │ ├── entrypoints.js │ ├── images │ │ ├── howToWork.png │ │ ├── sSLCertified.png │ │ └── safeFromBugs.png │ ├── pages │ │ ├── _app.tsx │ │ ├── _document.tsx │ │ ├── api │ │ │ ├── ask-question.ts │ │ │ ├── hello-edge.ts │ │ │ ├── hello-serverless.ts │ │ │ └── index-codebase.ts │ │ ├── components │ │ │ ├── ChatComponent.tsx │ │ │ └── IndexCodebaseForm.tsx │ │ └── index.tsx │ └── styles │ │ ├── Home.module.css │ │ └── globals.css └── tsconfig.json ├── turbo.json └── yarn.lock /.editorconfig: -------------------------------------------------------------------------------- 1 | # top-most EditorConfig file 2 | root = true 3 | 4 | # Unix-style newlines with a newline ending every file 5 | [*] 6 | end_of_line = lf -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | NEXT_PUBLIC_OPENAI_API_KEY= -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto eol=lf -------------------------------------------------------------------------------- /.github/workflows/integration.yml: -------------------------------------------------------------------------------- 1 | # This workflow will do a clean installation of node dependencies, cache/restore them, build the source code and run tests across different versions of node 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-nodejs 3 | 4 | name: Node.js Integration Tests 5 | 6 | on: 7 | workflow_dispatch: 8 | 9 | jobs: 10 | build: 11 | strategy: 12 | matrix: 13 | os: [macos-latest, windows-latest, ubuntu-latest] 14 | node-version: [18.x] 15 | # See supported Node.js release schedule at https://nodejs.org/en/about/releases/ 16 | 17 | runs-on: ${{ matrix.os }} 18 | 19 | steps: 20 | - uses: actions/checkout@v3 21 | - name: Use Node.js ${{ matrix.node-version }} 22 | uses: actions/setup-node@v3 23 | with: 24 | node-version: ${{ matrix.node-version }} 25 | cache: "yarn" 26 | - name: Install dependencies 27 | run: yarn install --immutable 28 | - run: yarn run ci 29 | - run: yarn workspace langchain run test:integration 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | dist/ 3 | dist-cjs/ 4 | lib/ 5 | .turbo 6 | .eslintcache 7 | .env 8 | yarn-error.log 9 | 10 | .yarn/* 11 | !.yarn/patches 12 | !.yarn/plugins 13 | !.yarn/releases 14 | !.yarn/sdks 15 | !.yarn/versions 16 | 17 | langchain/docs/ 18 | 19 | .idea/ 20 | 21 | .DS_Store 22 | 23 | Chinook.db 24 | Chinook_Sqlite.sql 25 | 26 | .envrc -------------------------------------------------------------------------------- /.husky/pre-commit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | . "$(dirname -- "$0")/_/husky.sh" 3 | 4 | npx turbo run precommit 5 | -------------------------------------------------------------------------------- /.nvmrc: -------------------------------------------------------------------------------- 1 | 18 2 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | babel.config.js 2 | jest.config.js 3 | .eslintrc.js 4 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "endOfLine": "lf" 3 | } -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "eslint.workingDirectories": [ 3 | "./langchain", 4 | "./examples", 5 | "./docs", 6 | "./test-exports-vercel", 7 | "./test-exports-cra", 8 | ], 9 | "yaml.schemas": { 10 | "https://json.schemastore.org/github-workflow.json": "./.github/workflows/deploy.yml" 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /.watchmanconfig: -------------------------------------------------------------------------------- 1 | { 2 | "ignore_dirs": [ 3 | "langchain/dist", 4 | "langchain/dist-cjs", 5 | "docs/build", 6 | "node_modules", 7 | "langchain/.turbo", 8 | "docs/.turbo", 9 | "test-exports/.turbo", 10 | "test-exports-cjs/.turbo" 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /.yarnrc.yml: -------------------------------------------------------------------------------- 1 | nodeLinker: node-modules 2 | 3 | plugins: 4 | - path: .yarn/plugins/@yarnpkg/plugin-typescript.cjs 5 | spec: "@yarnpkg/plugin-typescript" 6 | 7 | yarnPath: .yarn/releases/yarn-3.4.1.cjs 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) Harrison Chase 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | /node_modules 3 | 4 | # Production 5 | /build 6 | 7 | # Generated files 8 | .docusaurus 9 | .cache-loader 10 | docs/api 11 | 12 | # Misc 13 | .DS_Store 14 | .env.local 15 | .env.development.local 16 | .env.test.local 17 | .env.production.local 18 | 19 | npm-debug.log* 20 | yarn-debug.log* 21 | yarn-error.log* 22 | 23 | # ESLint 24 | .eslintcache 25 | -------------------------------------------------------------------------------- /docs/.prettierignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | build 3 | .docusaurus 4 | docs/api -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Website 2 | 3 | This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator. 4 | 5 | ### Installation 6 | 7 | ``` 8 | $ yarn 9 | ``` 10 | 11 | ### Local Development 12 | 13 | ``` 14 | $ yarn start 15 | ``` 16 | 17 | This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. 18 | 19 | ### Build 20 | 21 | ``` 22 | $ yarn build 23 | ``` 24 | 25 | This command generates static content into the `build` directory and can be served using any static contents hosting service. 26 | 27 | ### Deployment 28 | 29 | Using SSH: 30 | 31 | ``` 32 | $ USE_SSH=true yarn deploy 33 | ``` 34 | 35 | Not using SSH: 36 | 37 | ``` 38 | $ GIT_USER= yarn deploy 39 | ``` 40 | 41 | If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. 42 | 43 | ### Continuous Integration 44 | 45 | Some common defaults for linting/formatting have been set for you. If you integrate your project with an open source Continuous Integration system (e.g. Travis CI, CircleCI), you may check for issues using the following command. 46 | 47 | ``` 48 | $ yarn ci 49 | ``` 50 | -------------------------------------------------------------------------------- /docs/babel.config.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Meta Platforms, Inc. and affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | * 7 | * @format 8 | */ 9 | 10 | module.exports = { 11 | presets: [require.resolve("@docusaurus/core/lib/babel/preset")], 12 | }; 13 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/agents/custom_llm.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 1 4 | --- 5 | 6 | import CodeBlock from "@theme/CodeBlock"; 7 | import Example from "@examples/agents/custom_llm_agent.ts"; 8 | 9 | # Custom LLM Agent 10 | 11 | This example covers how to create a custom Agent powered by an LLM. 12 | 13 | {Example} 14 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/agents/custom_llm_chat.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 1 4 | --- 5 | 6 | import CodeBlock from "@theme/CodeBlock"; 7 | import Example from "@examples/agents/custom_llm_agent_chat.ts"; 8 | 9 | # Custom LLM Agent (with Chat Model) 10 | 11 | This example covers how to create a custom Agent powered by a Chat Model. 12 | 13 | {Example} 14 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/agents/examples/chat_mrkl.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 2 4 | --- 5 | 6 | import CodeBlock from "@theme/CodeBlock"; 7 | import Example from "@examples/agents/chat_mrkl.ts"; 8 | 9 | # MRKL Agent for Chat Models 10 | 11 | This example covers how to use an agent that uses the ReAct Framework (based on the descriptions of tools) to decide what action to take. This agent is optimized to be used with Chat Models. If you want to use it with an LLM, you can use the [LLM MRKL Agent](./llm_mrkl) instead. 12 | 13 | {Example} 14 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/agents/examples/custom_agent_chat.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | import CodeBlock from "@theme/CodeBlock"; 6 | import Example from "@examples/chat/agent.ts"; 7 | 8 | # Agent with Custom Prompt, using Chat Models 9 | 10 | This example covers how to create a custom agent for a chat model. It will utilize chat specific prompts. 11 | 12 | {Example} 13 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/agents/examples/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: Examples 3 | hide_table_of_contents: true 4 | --- 5 | 6 | import DocCardList from "@theme/DocCardList"; 7 | 8 | # Examples: Agents 9 | 10 | 11 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/agents/examples/llm_mrkl.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 1 4 | --- 5 | 6 | import CodeBlock from "@theme/CodeBlock"; 7 | import Example from "@examples/agents/mrkl.ts"; 8 | 9 | # MRKL Agent for LLMs 10 | 11 | This example covers how to use an agent that uses the ReAct Framework (based on the descriptions of tools) to decide what action to take. This agent is optimized to be used with LLMs. If you want to use it with a chat model, try the [Chat MRKL Agent](./chat_mrkl). 12 | 13 | {Example} 14 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/executor/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Agent Executors 6 | 7 | :::info 8 | [Conceptual Guide](https://docs.langchain.com/docs/components/agents/agent-executor) 9 | ::: 10 | 11 | To make agents more powerful we need to make them iterative, ie. call the model multiple times until they arrive at the final answer. That's the job of the AgentExecutor. 12 | 13 | ```typescript 14 | class AgentExecutor { 15 | // a simplified implementation 16 | run(inputs: object) { 17 | const steps = []; 18 | while (true) { 19 | const step = await this.agent.plan(steps, inputs); 20 | if (step instanceof AgentFinish) { 21 | return step.returnValues; 22 | } 23 | steps.push(step); 24 | } 25 | } 26 | } 27 | ``` 28 | 29 | import DocCardList from "@theme/DocCardList"; 30 | 31 | 32 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 7 3 | hide_table_of_contents: true 4 | --- 5 | 6 | import DocCardList from "@theme/DocCardList"; 7 | 8 | # Agents 9 | 10 | :::info 11 | [Conceptual Guide](https://docs.langchain.com/docs/components/agents) 12 | ::: 13 | 14 | 15 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/toolkits/examples/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: Examples 3 | hide_table_of_contents: true 4 | --- 5 | 6 | # Examples: Toolkits 7 | 8 | import DocCardList from "@theme/DocCardList"; 9 | 10 | 11 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/toolkits/examples/sql.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # SQL Agent Toolkit 6 | 7 | This example shows how to load and use an agent with a SQL toolkit. 8 | 9 | import CodeBlock from "@theme/CodeBlock"; 10 | import Example from "@examples/agents/sql.ts"; 11 | 12 | {Example} 13 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/toolkits/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: Toolkits 3 | sidebar_position: 2 4 | hide_table_of_contents: true 5 | --- 6 | 7 | # Getting Started: Toolkits 8 | 9 | :::info 10 | [Conceptual Guide](https://docs.langchain.com/docs/components/agents/toolkit) 11 | ::: 12 | 13 | Groups of [tools](../tools/) that can be used/are necessary to solve a particular problem. 14 | 15 | ```typescript 16 | interface Toolkit { 17 | tools: Tool[]; 18 | } 19 | ``` 20 | 21 | ## All Toolkits 22 | 23 | import DocCardList from "@theme/DocCardList"; 24 | 25 | 26 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_label: Chains 4 | sidebar_position: 6 5 | --- 6 | 7 | import DocCardList from "@theme/DocCardList"; 8 | 9 | # Getting Started: Chains 10 | 11 | :::info 12 | [Conceptual Guide](https://docs.langchain.com/docs/components/chains) 13 | :::info 14 | 15 | Using a language model in isolation is fine for some applications, but it is often useful to combine language models with other sources of information, third-party APIs, or even other language models. This is where the concept of a chain comes in. 16 | 17 | LangChain provides a standard interface for chains, as well as a number of built-in chains that can be used out of the box. You can also create your own chains. 18 | 19 | 20 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/index_related_chains/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_label: Index Related Chains 4 | --- 5 | 6 | import DocCardList from "@theme/DocCardList"; 7 | 8 | # Index Related Chains 9 | 10 | :::info 11 | [Conceptual Guide](https://docs.langchain.com/docs/components/chains/index_related_chains) 12 | ::: 13 | 14 | Chains related to working with unstructured data stored in indexes. 15 | 16 | 17 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/index_related_chains/retrieval_qa.mdx: -------------------------------------------------------------------------------- 1 | import RetrievalQAExample from "@examples/chains/retrieval_qa.ts"; 2 | import CodeBlock from "@theme/CodeBlock"; 3 | 4 | # `RetrievalQAChain` 5 | 6 | The `RetrievalQAChain` is a chain that combines a `Retriever` and a QA chain (described above). It is used to retrieve documents from a `Retriever` and then use a `QA` chain to answer a question based on the retrieved documents. 7 | 8 | In the below example, we are using a `VectorStore` as the `Retriever`. By default, the `StuffDocumentsChain` is used as the `QA` chain. 9 | 10 | {RetrievalQAExample} 11 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/llmchain/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_label: LLM Chain 4 | --- 5 | 6 | import CodeBlock from "@theme/CodeBlock"; 7 | import Example from "@examples/chains/llm_chain.ts"; 8 | 9 | # Getting Started: LLMChain 10 | 11 | :::info 12 | [Conceptual Guide](https://docs.langchain.com/docs/components/chains/llm-chain) 13 | ::: 14 | 15 | An `LLMChain` is a simple chain that adds some functionality around language models. It is used widely throughout LangChain, including in other chains and agents. 16 | 17 | An `LLMChain` consists of a `PromptTemplate` and a language model (either and LLM or chat model). 18 | 19 | We can construct an LLMChain which takes user input, formats it with a PromptTemplate, and then passes the formatted response to an LLM: 20 | 21 | {Example} 22 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/other_chains/analyze_document.mdx: -------------------------------------------------------------------------------- 1 | import CodeBlock from "@theme/CodeBlock"; 2 | import AnalyzeDocumentExample from "@examples/chains/analyze_document_chain_summarize.ts"; 3 | 4 | # `AnalyzeDocumentChain` 5 | 6 | You can use the `AnalyzeDocumentChain`, which accepts a single piece of text as input and operates over it. 7 | This chain takes care of splitting up the text and then passing it to the `MapReduceDocumentsChain` to generate a summary. 8 | 9 | {AnalyzeDocumentExample} 10 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/other_chains/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_label: Other Chains 4 | --- 5 | 6 | import DocCardList from "@theme/DocCardList"; 7 | 8 | # Other Chains 9 | 10 | This section highlights other examples of chains that exist. 11 | 12 | 13 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/other_chains/sql.mdx: -------------------------------------------------------------------------------- 1 | import CodeBlock from "@theme/CodeBlock"; 2 | import SqlDBExample from "@examples/chains/sql_db.ts"; 3 | 4 | # `SqlDatabaseChain` 5 | 6 | The `SqlDatabaseChain` allows you to answer questions over a SQL database. 7 | This example uses Chinook database, which is a sample database available for SQL Server, Oracle, MySQL, etc. 8 | 9 | ## Set up 10 | 11 | First install `typeorm`: 12 | 13 | ```bash npm2yarn 14 | npm install typeorm 15 | ``` 16 | 17 | Then install the dependencies needed for your database. For example, for SQLite: 18 | 19 | ```bash npm2yarn 20 | npm install sqlite3 21 | ``` 22 | 23 | For other databases see https://typeorm.io/#installation 24 | 25 | Finally follow the instructions on https://database.guide/2-sample-databases-sqlite/ to get the sample database for this example. 26 | 27 | {SqlDBExample} 28 | 29 | You can include or exclude tables when creating the `SqlDatabase` object to help the chain focus on the tables you want. 30 | It can also reduce the number of tokens used in the chain. 31 | 32 | ```typescript 33 | const db = await SqlDatabase.fromDataSourceParams({ 34 | appDataSource: datasource, 35 | includeTables: ["Track"], 36 | }); 37 | ``` 38 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/other_chains/summarization.mdx: -------------------------------------------------------------------------------- 1 | import CodeBlock from "@theme/CodeBlock"; 2 | import SummarizeExample from "@examples/chains/summarization_map_reduce.ts"; 3 | 4 | # Summarization 5 | 6 | A summarization chain can be used to summarize multiple documents. One way is to input multiple smaller documents, after they have been divided into chunks, and operate over them with a `MapReduceDocumentsChain`. 7 | 8 | {SummarizeExample} 9 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/file_loaders/docx.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Docx files 6 | 7 | This example goes over how to load data from docx files. 8 | 9 | # Setup 10 | 11 | ```bash npm2yarn 12 | npm install mammoth 13 | ``` 14 | 15 | # Usage 16 | 17 | ```typescript 18 | import { DocxLoader } from "langchain/document_loaders/fs/docx"; 19 | 20 | const loader = new DocxLoader( 21 | "src/document_loaders/tests/example_data/attention.docx" 22 | ); 23 | 24 | const docs = await loader.load(); 25 | ``` 26 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/file_loaders/epub.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # EPUB files 6 | 7 | This example goes over how to load data from EPUB files. By default, one document will be created for each chapter in the EPUB file, you can change this behavior by setting the `splitChapters` option to `false`. 8 | 9 | # Setup 10 | 11 | ```bash npm2yarn 12 | npm install epub2 html-to-text 13 | ``` 14 | 15 | # Usage, one document per chapter 16 | 17 | ```typescript 18 | import { EPubLoader } from "langchain/document_loaders/fs/epub"; 19 | 20 | const loader = new EPubLoader("src/document_loaders/example_data/example.epub"); 21 | 22 | const docs = await loader.load(); 23 | ``` 24 | 25 | # Usage, one document per file 26 | 27 | ```typescript 28 | import { EPubLoader } from "langchain/document_loaders/fs/epub"; 29 | 30 | const loader = new EPubLoader( 31 | "src/document_loaders/example_data/example.epub", 32 | { 33 | splitChapters: false, 34 | } 35 | ); 36 | 37 | const docs = await loader.load(); 38 | ``` 39 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/file_loaders/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | label: "File Loaders" 3 | hide_table_of_contents: true 4 | sidebar_class_name: node-only-category 5 | --- 6 | 7 | # File Loaders 8 | 9 | :::tip Compatibility 10 | Only available on Node.js. 11 | ::: 12 | 13 | These loaders are used to load files given a filesystem path or a Blob object. 14 | 15 | import DocCardList from "@theme/DocCardList"; 16 | 17 | 18 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/file_loaders/jsonlines.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # JSONLines files 6 | 7 | This example goes over how to load data from JSONLines or JSONL files. The second argument is a JSONPointer to the property to extract from each JSON object in the file. One document will be created for each JSON object in the file. 8 | 9 | Example JSONLines file: 10 | 11 | ```json 12 | {"html": "This is a sentence."} 13 | {"html": "This is another sentence."} 14 | ``` 15 | 16 | Example code: 17 | 18 | ```typescript 19 | import { JSONLinesLoader } from "langchain/document_loaders/fs/json"; 20 | 21 | const loader = new JSONLinesLoader( 22 | "src/document_loaders/example_data/example.jsonl", 23 | "/html" 24 | ); 25 | 26 | const docs = await loader.load(); 27 | /* 28 | [ 29 | Document { 30 | "metadata": { 31 | "blobType": "application/jsonl+json", 32 | "line": 1, 33 | "source": "blob", 34 | }, 35 | "pageContent": "This is a sentence.", 36 | }, 37 | Document { 38 | "metadata": { 39 | "blobType": "application/jsonl+json", 40 | "line": 2, 41 | "source": "blob", 42 | }, 43 | "pageContent": "This is another sentence.", 44 | }, 45 | ] 46 | */ 47 | ``` 48 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/file_loaders/notion_markdown.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Notion markdown export 6 | 7 | This example goes over how to load data from your Notion pages exported from the notion dashboard. 8 | 9 | First, export your notion pages as **Markdown & CSV** as per the offical explanation [here](https://www.notion.so/help/export-your-content). Make sure to select `include subpages` and `Create folders for subpages.` 10 | 11 | Then, unzip the downloaded file and move the unzipped folder into your repository. It should contain the markdown files of your pages. 12 | 13 | Once the folder is in your repository, simply run the example below: 14 | 15 | import CodeBlock from "@theme/CodeBlock"; 16 | import Example from "@examples/document_loaders/notion_markdown.ts"; 17 | 18 | {Example} 19 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/file_loaders/subtitles.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Subtitles 6 | 7 | This example goes over how to load data from subtitle files. One document will be created for each subtitles file. 8 | 9 | ## Setup 10 | 11 | ```bash npm2yarn 12 | npm install srt-parser-2 13 | ``` 14 | 15 | ## Usage 16 | 17 | ```typescript 18 | import { SRTLoader } from "langchain/document_loaders/fs/srt"; 19 | 20 | const loader = new SRTLoader( 21 | "src/document_loaders/example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt" 22 | ); 23 | 24 | const docs = await loader.load(); 25 | ``` 26 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/file_loaders/text.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Text files 6 | 7 | This example goes over how to load data from text files. 8 | 9 | ```typescript 10 | import { TextLoader } from "langchain/document_loaders/fs/text"; 11 | 12 | const loader = new TextLoader("src/document_loaders/example_data/example.txt"); 13 | 14 | const docs = await loader.load(); 15 | ``` 16 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/file_loaders/unstructured.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Unstructured 6 | 7 | This example covers how to use [Unstructured](https://www.unstructured.io) to load files of many types. Unstructured currently supports loading of text files, powerpoints, html, pdfs, images, and more. 8 | 9 | ## Setup 10 | 11 | You can run Unstructured locally in your computer using Docker. To do so, you need to have Docker installed. You can find the instructions to install Docker [here](https://docs.docker.com/get-docker/). 12 | 13 | ```bash 14 | docker run -p 8000:8000 -d --rm --name unstructured-api quay.io/unstructured-io/unstructured-api:latest --port 8000 --host 0.0.0.0 15 | ``` 16 | 17 | ## Usage 18 | 19 | Once Unstructured is running, you can use it to load files from your computer. You can use the following code to load a file from your computer. 20 | 21 | import CodeBlock from "@theme/CodeBlock"; 22 | import Example from "@examples/document_loaders/unstructured.ts"; 23 | 24 | {Example} 25 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: Examples 3 | hide_table_of_contents: true 4 | --- 5 | 6 | import DocCardList from "@theme/DocCardList"; 7 | 8 | # Examples: Document Loaders 9 | 10 | 11 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/web_loaders/college_confidential.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # College Confidential 6 | 7 | This example goes over how to load data from the college confidential website, using Cheerio. One document will be created for each page. 8 | 9 | ## Setup 10 | 11 | ```bash npm2yarn 12 | npm install cheerio 13 | ``` 14 | 15 | ## Usage 16 | 17 | ```typescript 18 | import { CollegeConfidentialLoader } from "langchain/document_loaders/web/college_confidential"; 19 | 20 | const loader = new CollegeConfidentialLoader( 21 | "https://www.collegeconfidential.com/colleges/brown-university/" 22 | ); 23 | 24 | const docs = await loader.load(); 25 | ``` 26 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/web_loaders/gitbook.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # GitBook 6 | 7 | This example goes over how to load data from any GitBook, using Cheerio. One document will be created for each page. 8 | 9 | ## Setup 10 | 11 | ```bash npm2yarn 12 | npm install cheerio 13 | ``` 14 | 15 | ## Load from single GitBook page 16 | 17 | ```typescript 18 | import { GitbookLoader } from "langchain/document_loaders/web/gitbook"; 19 | 20 | const loader = new GitbookLoader( 21 | "https://docs.gitbook.com/product-tour/navigation" 22 | ); 23 | 24 | const docs = await loader.load(); 25 | ``` 26 | 27 | ## Load from all paths in a given GitBook 28 | 29 | For this to work, the GitbookLoader needs to be initialized with the root path (https://docs.gitbook.com in this example) and have `shouldLoadAllPaths` set to `true`. 30 | 31 | ```typescript 32 | import { GitbookLoader } from "langchain/document_loaders/web/gitbook"; 33 | 34 | const loader = new GitbookLoader("https://docs.gitbook.com", { 35 | shouldLoadAllPaths: true, 36 | }); 37 | 38 | const docs = await loader.load(); 39 | ``` 40 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/web_loaders/github.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # GitHub 6 | 7 | This example goes over how to load data from a GitHub repository. 8 | You can set the `GITHUB_ACCESS_TOKEN` environment variable to a GitHub access token to increase the rate limit and access private repositories. 9 | 10 | ```typescript 11 | import { GithubRepoLoader } from "langchain/document_loaders/web/github"; 12 | 13 | const loader = new GithubRepoLoader( 14 | "https://github.com/hwchase17/langchainjs", 15 | { branch: "main", recursive: false, unknown: "warn" } 16 | ); 17 | const docs = await loader.load(); 18 | ``` 19 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/web_loaders/hn.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Hacker News 6 | 7 | This example goes over how to load data from the hacker news website, using Cheerio. One document will be created for each page. 8 | 9 | ## Setup 10 | 11 | ```bash npm2yarn 12 | npm install cheerio 13 | ``` 14 | 15 | ## Usage 16 | 17 | ```typescript 18 | import { HNLoader } from "langchain/document_loaders/web/hn"; 19 | 20 | const loader = new HNLoader("https://news.ycombinator.com/item?id=34817881"); 21 | 22 | const docs = await loader.load(); 23 | ``` 24 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/web_loaders/imsdb.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # IMSDB 6 | 7 | This example goes over how to load data from the internet movie script database website, using Cheerio. One document will be created for each page. 8 | 9 | ## Setup 10 | 11 | ```bash npm2yarn 12 | npm install cheerio 13 | ``` 14 | 15 | ## Usage 16 | 17 | ```typescript 18 | import { IMSDBLoader } from "langchain/document_loaders/web/imsdb"; 19 | 20 | const loader = new IMSDBLoader("https://imsdb.com/scripts/BlacKkKlansman.html"); 21 | 22 | const docs = await loader.load(); 23 | ``` 24 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/web_loaders/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | label: "Web Loaders" 3 | hide_table_of_contents: true 4 | --- 5 | 6 | # Web Loaders 7 | 8 | These loaders are used to load web resources. 9 | 10 | import DocCardList from "@theme/DocCardList"; 11 | 12 | 13 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/web_loaders/s3.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_class_name: node-only 4 | --- 5 | 6 | # S3 File 7 | 8 | :::tip Compatibility 9 | Only available on Node.js. 10 | ::: 11 | 12 | This covers how to load document objects from an s3 file object. 13 | 14 | ## Setup 15 | 16 | To run this index you'll need to have Unstructured already set up and ready to use at an available URL endpoint. It can also be configured to run locally. 17 | 18 | See the docs [here](https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/unstructured) for information on how to do that. 19 | 20 | ## Usage 21 | 22 | Once Unstructured is configured, you can use the S3 loader to load files and then convert them into a Document. 23 | 24 | import CodeBlock from "@theme/CodeBlock"; 25 | import Example from "@examples/document_loaders/s3.ts"; 26 | 27 | {Example} 28 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 4 3 | hide_table_of_contents: true 4 | --- 5 | 6 | import DocCardList from "@theme/DocCardList"; 7 | 8 | # Indexes 9 | 10 | :::info 11 | [Conceptual Guide](https://docs.langchain.com/docs/components/indexing) 12 | ::: 13 | 14 | This section deals with everything related to bringing your own data into LangChain, indexing it, and making it available for LLMs/Chat Models. 15 | 16 | 17 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/retrievers/chatgpt-retriever-plugin.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # ChatGPT Plugin Retriever 6 | 7 | This example shows how to use the ChatGPT Retriever Plugin within LangChain. 8 | 9 | To set up the ChatGPT Retriever Plugin, please follow instructions [here](https://github.com/openai/chatgpt-retrieval-plugin). 10 | 11 | ## Usage 12 | 13 | import CodeBlock from "@theme/CodeBlock"; 14 | import Example from "@examples/retrievers/chatgpt-plugin.ts"; 15 | 16 | {Example} 17 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/retrievers/databerry-retriever.mdx: -------------------------------------------------------------------------------- 1 | # Databerry Retriever 2 | 3 | This example shows how to use the Databerry Retriever in a `RetrievalQAChain` to retrieve documents from a Databerry.ai datastore. 4 | 5 | ## Usage 6 | 7 | import CodeBlock from "@theme/CodeBlock"; 8 | import Example from "@examples/retrievers/databerry.ts"; 9 | 10 | {Example} 11 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/retrievers/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 4 4 | --- 5 | 6 | import DocCardList from "@theme/DocCardList"; 7 | 8 | # Retrievers 9 | 10 | :::info 11 | [Conceptual Guide](https://docs.langchain.com/docs/components/indexing/retriever) 12 | ::: 13 | 14 | A way of storing data such that it can be queried by a language model. The only interface this object must expose is a `getRelevantDocuments` method which takes in a string query and returns a list of Documents. 15 | 16 | 17 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/retrievers/metal-retriever.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Metal Retriever 6 | 7 | This example shows how to use the Metal Retriever in a `RetrievalQAChain` to retrieve documents from a Metal index. 8 | 9 | ## Setup 10 | 11 | ```bash npm2yarn 12 | npm i @getmetal/metal-sdk 13 | ``` 14 | 15 | ## Usage 16 | 17 | import CodeBlock from "@theme/CodeBlock"; 18 | import Example from "@examples/retrievers/metal.ts"; 19 | 20 | {Example} 21 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/retrievers/remote-retriever.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Remote Retriever 6 | 7 | This example shows how to use a Remote Retriever in a `RetrievalQAChain` to retrieve documents from a remote server. 8 | 9 | ## Usage 10 | 11 | import CodeBlock from "@theme/CodeBlock"; 12 | import Example from "@examples/chains/retrieval_qa_with_remote.ts"; 13 | 14 | {Example} 15 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/retrievers/vectorstore.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Vector Store 6 | 7 | Once you've created a [Vector Store](../vector_stores/), the way to use it as a Retriever is very simple: 8 | 9 | ```typescript 10 | vectorStore = ... 11 | retriever = vectorStore.asRetriever() 12 | ``` 13 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/text_splitters/examples/character.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # CharacterTextSplitter 6 | 7 | Besides the `RecursiveCharacterTextSplitter`, there is also the more standard `CharacterTextSplitter`. This splits only on one type of character (defaults to `"\n\n"`). You can use it in the exact same way. 8 | 9 | ```typescript 10 | import { Document } from "langchain/document"; 11 | import { CharacterTextSplitter } from "langchain/text_splitter"; 12 | 13 | const text = "foo bar baz 123"; 14 | const splitter = new CharacterTextSplitter({ 15 | separator: " ", 16 | chunkSize: 7, 17 | chunkOverlap: 3, 18 | }); 19 | const output = await splitter.createDocuments([text]); 20 | ``` 21 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/text_splitters/examples/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: Examples 3 | --- 4 | 5 | import DocCardList from "@theme/DocCardList"; 6 | 7 | # Text Splitters: Examples 8 | 9 | 10 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/text_splitters/examples/token.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # TokenTextSplitter 6 | 7 | Finally, `TokenTextSplitter` splits a raw text string by first converting the text into BPE tokens, then split these tokens into chunks and convert the tokens within a single chunk back into text. 8 | 9 | To utilize the `TokenTextSplitter`, first install the accompanying required library 10 | 11 | ```bash npm2yarn 12 | npm install -S @dqbd/tiktoken 13 | ``` 14 | 15 | Then, you can use it like so: 16 | 17 | ```typescript 18 | import { Document } from "langchain/document"; 19 | import { TokenTextSplitter } from "langchain/text_splitter"; 20 | 21 | const text = "foo bar baz 123"; 22 | 23 | const splitter = new TokenTextSplitter({ 24 | encodingName: "gpt2", 25 | chunkSize: 10, 26 | chunkOverlap: 0, 27 | }); 28 | 29 | const output = await splitter.createDocuments([text]); 30 | ``` 31 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/vector_stores/integrations/hnswlib.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_class_name: node-only 3 | --- 4 | 5 | import CodeBlock from "@theme/CodeBlock"; 6 | 7 | # HNSWLib 8 | 9 | :::tip Compatibility 10 | Only available on Node.js. 11 | ::: 12 | 13 | HNSWLib is an in-memory vectorstore that can be saved to a file. It uses [HNSWLib](https://github.com/nmslib/hnswlib). 14 | 15 | ## Setup 16 | 17 | You can install it with 18 | 19 | ```bash npm2yarn 20 | npm install hnswlib-node 21 | ``` 22 | 23 | ## Usage 24 | 25 | ### Create a new index from texts 26 | 27 | import ExampleTexts from "@examples/indexes/vector_stores/hnswlib.ts"; 28 | 29 | {ExampleTexts} 30 | 31 | ### Create a new index from a loader 32 | 33 | import ExampleLoader from "@examples/indexes/vector_stores/hnswlib_fromdocs.ts"; 34 | 35 | {ExampleLoader} 36 | 37 | ### Save an index to a file and load it again 38 | 39 | import ExampleSave from "@examples/indexes/vector_stores/hnswlib_saveload.ts"; 40 | 41 | {ExampleSave} 42 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/vector_stores/integrations/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: Integrations 3 | --- 4 | 5 | import DocCardList from "@theme/DocCardList"; 6 | 7 | # Vector Stores: Integrations 8 | 9 | 10 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/vector_stores/integrations/memory.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_label: Memory 4 | sidebar_position: 1 5 | --- 6 | 7 | import CodeBlock from "@theme/CodeBlock"; 8 | 9 | # `MemoryVectorStore` 10 | 11 | MemoryVectorStore is an in-memory, ephemeral vectorstore that stores embeddings in-memory and does an exact, linear search for the most similar embeddings. The default similarity metric is cosine similarity, but can be changed to any of the similarity metrics supported by [ml-distance](https://mljs.github.io/distance/modules/similarity.html). 12 | 13 | ## Usage 14 | 15 | ### Create a new index from texts 16 | 17 | import ExampleTexts from "@examples/indexes/vector_stores/memory.ts"; 18 | 19 | {ExampleTexts} 20 | 21 | ### Create a new index from a loader 22 | 23 | import ExampleLoader from "@examples/indexes/vector_stores/memory_fromdocs.ts"; 24 | 25 | {ExampleLoader} 26 | 27 | ### Use a custom similarity metric 28 | 29 | import ExampleCustom from "@examples/indexes/vector_stores/memory_custom_similarity.ts"; 30 | 31 | {ExampleCustom} 32 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/vector_stores/integrations/weaviate.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | import CodeBlock from "@theme/CodeBlock"; 6 | 7 | # Weaviate 8 | 9 | Weaviate is an open source vector database that stores both objects and vectors, allowing for combining vector search with structured filtering. LangChain connects to Weaviate via the `weaviate-ts-client` package, the official Typescript client for Weaviate. 10 | 11 | LangChain inserts vectors directly to Weaviate, and queries Weaviate for the nearest neighbors of a given vector, so that you can use all the LangChain Embeddings integrations with Weaviate. 12 | 13 | ## Setup 14 | 15 | ```bash npm2yarn 16 | npm install weaviate-ts-client graphql 17 | ``` 18 | 19 | You'll need to run Weaviate either locally or on a server, see [the Weaviate documentation](https://weaviate.io/developers/weaviate/installation) for more information. 20 | 21 | ## Usage, insert documents 22 | 23 | import InsertExample from "@examples/indexes/vector_stores/weaviate_fromTexts.ts"; 24 | 25 | {InsertExample} 26 | 27 | ## Usage, query documents 28 | 29 | import QueryExample from "@examples/indexes/vector_stores/weaviate_search.ts"; 30 | 31 | {QueryExample} 32 | -------------------------------------------------------------------------------- /docs/docs/modules/memory/examples/buffer_memory_chat.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | import CodeBlock from "@theme/CodeBlock"; 6 | import Example from "@examples/chat/memory.ts"; 7 | 8 | # Using Buffer Memory with Chat Models 9 | 10 | This example covers how to use chat-specific memory classes with chat models. 11 | The key thing to notice is that setting `returnMessages: true` makes the memory return a list of chat messages instead of a string. 12 | 13 | {Example} 14 | -------------------------------------------------------------------------------- /docs/docs/modules/memory/examples/buffer_window_memory.md: -------------------------------------------------------------------------------- 1 | # Buffer Window Memory 2 | 3 | BufferWindowMemory keeps track of the back-and-forths in conversation, and then uses a window of size `k` to surface the last `k` back-and-forths to use as memory. 4 | 5 | ```typescript 6 | import { OpenAI } from "langchain/llms/openai"; 7 | import { BufferWindowMemory } from "langchain/memory"; 8 | import { ConversationChain } from "langchain/chains"; 9 | 10 | const model = new OpenAI({}); 11 | const memory = new BufferWindowMemory({ k: 1 }); 12 | const chain = new ConversationChain({ llm: model, memory: memory }); 13 | const res1 = await chain.call({ input: "Hi! I'm Jim." }); 14 | console.log({ res1 }); 15 | ``` 16 | 17 | ```shell 18 | {response: " Hi Jim! It's nice to meet you. My name is AI. What would you like to talk about?"} 19 | ``` 20 | 21 | ```typescript 22 | const res2 = await chain.call({ input: "What's my name?" }); 23 | console.log({ res2 }); 24 | ``` 25 | 26 | ```shell 27 | {response: ' You said your name is Jim. Is there anything else you would like to talk about?'} 28 | ``` 29 | -------------------------------------------------------------------------------- /docs/docs/modules/memory/examples/conversation_summary.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_label: Conversation Summry 4 | --- 5 | 6 | import CodeBlock from "@theme/CodeBlock"; 7 | 8 | # Conversation Summary Memory 9 | 10 | The Conversation Summary Memory summarizes the conversation as it happens and stores the current summary in memory. This memory can then be used to inject the summary of the conversation so far into a prompt/chain. This memory is most useful for longer conversations, where keeping the past message history in the prompt verbatim would take up too many tokens. 11 | 12 | ## Usage, with an LLM 13 | 14 | import TextExample from "@examples/memory/summary_llm.ts"; 15 | 16 | {TextExample} 17 | 18 | ## Usage, with a Chat Model 19 | 20 | import ChatExample from "@examples/memory/summary_chat.ts"; 21 | 22 | {ChatExample} 23 | -------------------------------------------------------------------------------- /docs/docs/modules/memory/examples/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: Examples 3 | --- 4 | 5 | import DocCardList from "@theme/DocCardList"; 6 | 7 | # Examples: Memory 8 | 9 | 10 | -------------------------------------------------------------------------------- /docs/docs/modules/models/chat/integrations.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 3 3 | sidebar_label: Integrations 4 | --- 5 | 6 | # Integrations: Chat Models 7 | 8 | LangChain offers a number of Chat Models implementations that integrate with various model providers. These are: 9 | 10 | ## `OpenAI` 11 | 12 | ```typescript 13 | import { ChatOpenAI } from "langchain/chat_models/openai"; 14 | 15 | const model = new ChatOpenAI({ 16 | temperature: 0.9, 17 | openAIApiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.OPENAI_API_KEY 18 | }); 19 | ``` 20 | 21 | ## `Anthropic` 22 | 23 | ```typescript 24 | import { ChatAnthropic } from "langchain/chat_models/anthropic"; 25 | 26 | const model = new ChatAnthropic({ 27 | temperature: 0.9, 28 | apiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.ANTHROPIC_API_KEY 29 | }); 30 | ``` 31 | -------------------------------------------------------------------------------- /docs/docs/modules/models/embeddings/integrations.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 3 3 | sidebar_label: Integrations 4 | --- 5 | 6 | # Integrations: Embeddings 7 | 8 | LangChain offers a number of Embeddings implementations that integrate with various model providers. These are: 9 | 10 | ## `OpenAIEmbeddings` 11 | 12 | The `OpenAIEmbeddings` class uses the OpenAI API to generate embeddings for a given text. By default it strips new line characters from the text, as recommended by OpenAI, but you can disable this by passing `stripNewLines: false` to the constructor. 13 | 14 | ```typescript 15 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 16 | 17 | const embeddings = new OpenAIEmbeddings({ 18 | openAIApiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.OPENAI_API_KEY 19 | }); 20 | ``` 21 | 22 | ## `CohereEmbeddings` 23 | 24 | ```bash npm2yarn 25 | npm install cohere-ai 26 | ``` 27 | 28 | ```typescript 29 | import { CohereEmbeddings } from "langchain/embeddings/cohere"; 30 | 31 | const embeddings = new CohereEmbeddings({ 32 | apiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.COHERE_API_KEY 33 | }); 34 | ``` 35 | -------------------------------------------------------------------------------- /docs/docs/modules/models/llms/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_label: LLMs 4 | --- 5 | 6 | import CodeBlock from "@theme/CodeBlock"; 7 | import Example from "@examples/models/llm/llm_quick_start.ts"; 8 | import DocCardList from "@theme/DocCardList"; 9 | 10 | # Getting Started: LLMs 11 | 12 | :::info 13 | [Conceptual Guide](https://docs.langchain.com/docs/components/models/language-model) 14 | ::: 15 | 16 | LangChain provides a standard interface for using a variety of LLMs. 17 | 18 | To get started, simply use the `call` method of an `LLM` implementation, passing in a `string` input. In this example, we are using the `OpenAI` implementation: 19 | 20 | {Example} 21 | 22 | ## Dig deeper 23 | 24 | 25 | -------------------------------------------------------------------------------- /docs/docs/modules/prompts/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 3 3 | hide_table_of_contents: true 4 | sidebar_label: Prompts 5 | --- 6 | 7 | import DocCardList from "@theme/DocCardList"; 8 | 9 | # Prompts 10 | 11 | :::info 12 | [Conceptual Guide](https://docs.langchain.com/docs/components/prompts) 13 | ::: 14 | 15 | LangChain provides several utilities to help manage prompts for language models, including chat models. 16 | 17 | 18 | -------------------------------------------------------------------------------- /docs/docs/modules/prompts/prompt_templates/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_label: Prompt Templates 4 | sidebar_position: 1 5 | --- 6 | 7 | import CodeBlock from "@theme/CodeBlock"; 8 | import Example from "@examples/prompts/prompts.ts"; 9 | import DocCardList from "@theme/DocCardList"; 10 | 11 | # Prompt Templates 12 | 13 | :::info 14 | [Conceptual Guide](https://docs.langchain.com/docs/components/prompts/prompt-template) 15 | ::: 16 | 17 | A `PromptTemplate` allows you to make use of templating to generate a prompt. This is useful for when you want to use the same prompt outline in multiple places, but with certain values changed. 18 | Prompt templates are supported for both LLMs and chat models, as shown below: 19 | 20 | {Example} 21 | 22 | ## Dig deeper 23 | 24 | 25 | -------------------------------------------------------------------------------- /docs/docs/modules/schema/chat-messages.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 1 4 | --- 5 | 6 | # Chat Messages 7 | 8 | The primary interface through which end users interact with LLMs is a chat interface. For this reason, some model providers have started providing access to the underlying API in a way that expects chat messages. These messages have a content field (which is usually text) and are associated with a user (or role). Right now the supported users are System, Human, and AI. 9 | 10 | ## SystemChatMessage 11 | 12 | A chat message representing information that should be instructions to the AI system. 13 | 14 | ```typescript 15 | new SystemChatMessage("You are a nice assistant"); 16 | ``` 17 | 18 | ## HumanChatMessage 19 | 20 | A chat message representing information coming from a human interacting with the AI system. 21 | 22 | ```typescript 23 | new HumanChatMessage("Hello, how are you?"); 24 | ``` 25 | 26 | ## AIChatMessage 27 | 28 | A chat message representing information coming from the AI system. 29 | 30 | ```typescript 31 | new AIChatMessage("I am doing well, thank you!"); 32 | ``` 33 | -------------------------------------------------------------------------------- /docs/docs/modules/schema/document.md: -------------------------------------------------------------------------------- 1 | # Document 2 | 3 | Language models only know information about what they were trained on. In order to get them answer questions or summarize other information you have to pass it to the language model. Therefore, it is very important to have a concept of a document. 4 | 5 | A document at its core is fairly simple. It consists of a piece of text and optional metadata. The piece of text is what we interact with the language model, while the optional metadata is useful for keeping track of metadata about the document (such as the source). 6 | 7 | ```typescript 8 | interface Document { 9 | pageContent: string; 10 | metadata: Record; 11 | } 12 | ``` 13 | 14 | ## Creating a Document 15 | 16 | You can create a document object rather easily in LangChain with: 17 | 18 | ```typescript 19 | import { Document } from "langchain/document"; 20 | 21 | const doc = new Document({ pageContent: "foo" }); 22 | ``` 23 | 24 | You can create one with metadata with: 25 | 26 | ```typescript 27 | import { Document } from "langchain/document"; 28 | 29 | const doc = new Document({ pageContent: "foo", metadata: { source: "1" } }); 30 | ``` 31 | 32 | Also check out [Document Loaders](../indexes/document_loaders/) for a way to load documents from a variety of sources. 33 | -------------------------------------------------------------------------------- /docs/docs/modules/schema/example.md: -------------------------------------------------------------------------------- 1 | --- 2 | --- 3 | 4 | # Examples 5 | 6 | Examples are input/output pairs that represent inputs to a function and then expected output. They can be used in both training and evaluation of models. 7 | 8 | ```typescript 9 | type Example = Record; 10 | ``` 11 | 12 | ## Creating an Example 13 | 14 | You can create an Example like this: 15 | 16 | ```typescript 17 | const example = { 18 | input: "foo", 19 | output: "bar", 20 | }; 21 | ``` 22 | -------------------------------------------------------------------------------- /docs/docs/modules/schema/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 1 3 | --- 4 | 5 | import DocCardList from "@theme/DocCardList"; 6 | 7 | # Schema 8 | 9 | This section speaks about interfaces that are used throughout the rest of the library. 10 | 11 | 12 | -------------------------------------------------------------------------------- /docs/docs/use_cases/api.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 4 4 | --- 5 | 6 | # Interacting with APIs 7 | 8 | :::info 9 | [Conceptual Guide](https://docs.langchain.com/docs/use-cases/apis) 10 | ::: 11 | 12 | Lots of data and information is stored behind APIs. 13 | This page covers all resources available in LangChain for working with APIs. 14 | 15 | ## Chains 16 | 17 | If you are just getting started, and you have relatively apis, you should get started with chains. 18 | Chains are a sequence of predetermined steps, so they are good to get started with as they give you more control and let you 19 | understand what is happening better. 20 | 21 | TODO: add an API chain and then add an example here. 22 | 23 | ## Agents 24 | 25 | Agents are more complex, and involve multiple queries to the LLM to understand what to do. 26 | The downside of agents are that you have less control. The upside is that they are more powerful, 27 | which allows you to use them on larger and more complex schemas. 28 | 29 | - [OpenAPI Agent](../modules/agents/toolkits/examples/openapi.md) 30 | -------------------------------------------------------------------------------- /docs/docs/use_cases/personal_assistants.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 1 4 | --- 5 | 6 | # Personal Assistants 7 | 8 | :::info 9 | [Conceptual Guide](https://docs.langchain.com/docs/use-cases/personal-assistants) 10 | ::: 11 | 12 | We use "personal assistant" here in a very broad sense. 13 | Personal assistants have a few characteristics: 14 | 15 | - They can interact with the outside world 16 | - They have knowledge of your data 17 | - They remember your interactions 18 | 19 | Really all of the functionality in LangChain is relevant for building a personal assistant. 20 | Highlighting specific parts: 21 | 22 | - [Agent Documentation](../modules/agents/index.mdx) (for interacting with the outside world) 23 | - [Index Documentation](../modules/indexes/index.mdx) (for giving them knowledge of your data) 24 | - [Memory](../modules/memory/index.mdx) (for helping them remember interactions) 25 | -------------------------------------------------------------------------------- /docs/docs/use_cases/summarization.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 6 4 | --- 5 | 6 | # Summarization 7 | 8 | :::info 9 | [Conceptual Guide](https://docs.langchain.com/docs/use-cases/summarization) 10 | ::: 11 | 12 | A common use case is wanting to summarize long documents. 13 | This naturally runs into the context window limitations. 14 | Unlike in question-answering, you can't just do some semantic search hacks to only select the chunks of text most relevant to the question (because, in this case, there is no particular question - you want to summarize everything). 15 | So what do you do then? 16 | 17 | To get started, we would recommend checking out the summarization chain which attacks this problem in a recursive manner. 18 | 19 | - [Summarization Chain](../modules/chains/other_chains/summarization) 20 | -------------------------------------------------------------------------------- /docs/docs/use_cases/tabular.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 3 4 | --- 5 | 6 | # Tabular Question Answering 7 | 8 | :::info 9 | [Conceptual Guide](https://docs.langchain.com/docs/use-cases/qa-tabular) 10 | ::: 11 | 12 | Lots of data and information is stored in tabular data, whether it be csvs, excel sheets, or SQL tables. 13 | This page covers all resources available in LangChain for working with data in this format. 14 | 15 | ## Chains 16 | 17 | If you are just getting started, and you have relatively small/simple tabular data, you should get started with chains. 18 | Chains are a sequence of predetermined steps, so they are good to get started with as they give you more control and let you 19 | understand what is happening better. 20 | 21 | - [SQL Database Chain](../modules/chains/other_chains/sql) 22 | 23 | ## Agents 24 | 25 | Agents are more complex, and involve multiple queries to the LLM to understand what to do. 26 | The downside of agents are that you have less control. The upside is that they are more powerful, 27 | which allows you to use them on larger databases and more complex schemas. 28 | 29 | - [SQL Agent](../modules/agents/toolkits/examples/sql.mdx) 30 | -------------------------------------------------------------------------------- /docs/src/pages/index.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Meta Platforms, Inc. and affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | * 7 | * @format 8 | */ 9 | 10 | import React from "react"; 11 | import { Redirect } from "@docusaurus/router"; 12 | 13 | export default function Home() { 14 | return ; 15 | } 16 | -------------------------------------------------------------------------------- /docs/src/theme/SearchBar.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Meta Platforms, Inc. and affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | * 7 | * @format 8 | */ 9 | import React from "react"; 10 | import { MendableSearchBar } from "@mendable/search"; 11 | import useDocusaurusContext from "@docusaurus/useDocusaurusContext"; 12 | 13 | export default function SearchBarWrapper() { 14 | const { 15 | siteConfig: { customFields }, 16 | } = useDocusaurusContext(); 17 | return ( 18 |
19 | 27 |
28 | ); 29 | } 30 | -------------------------------------------------------------------------------- /docs/static/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/docs/static/.nojekyll -------------------------------------------------------------------------------- /docs/static/img/DataberryDashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/docs/static/img/DataberryDashboard.png -------------------------------------------------------------------------------- /docs/static/img/HeliconeDashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/docs/static/img/HeliconeDashboard.png -------------------------------------------------------------------------------- /docs/static/img/HeliconeKeys.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/docs/static/img/HeliconeKeys.png -------------------------------------------------------------------------------- /docs/static/img/android-chrome-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/docs/static/img/android-chrome-192x192.png -------------------------------------------------------------------------------- /docs/static/img/android-chrome-512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/docs/static/img/android-chrome-512x512.png -------------------------------------------------------------------------------- /docs/static/img/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/docs/static/img/apple-touch-icon.png -------------------------------------------------------------------------------- /docs/static/img/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/docs/static/img/favicon-16x16.png -------------------------------------------------------------------------------- /docs/static/img/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/docs/static/img/favicon-32x32.png -------------------------------------------------------------------------------- /docs/static/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/docs/static/img/favicon.ico -------------------------------------------------------------------------------- /docs/static/img/parrot-chainlink-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/docs/static/img/parrot-chainlink-icon.png -------------------------------------------------------------------------------- /docs/static/img/parrot-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/docs/static/img/parrot-icon.png -------------------------------------------------------------------------------- /examples/.env.example: -------------------------------------------------------------------------------- 1 | ANTHROPIC_API_KEY=ADD_YOURS_HERE # https://www.anthropic.com/ 2 | COHERE_API_KEY=ADD_YOURS_HERE # https://dashboard.cohere.ai/api-keys 3 | HUGGINGFACEHUB_API_KEY=ADD_YOURS_HERE # https://huggingface.co/settings/tokens 4 | OPENAI_API_KEY=ADD_YOURS_HERE # https://platform.openai.com/account/api-keys 5 | OPENSEARCH_URL=ADD_YOURS_HERE # http://127.0.0.1:9200 6 | PINECONE_API_KEY=ADD_YOURS_HERE # https://app.pinecone.io/organizations 7 | PINECONE_ENVIRONMENT=ADD_YOURS_HERE 8 | PINECONE_INDEX=ADD_YOURS_HERE # E.g. "trec-question-classification" when using "Cohere Trec" example index 9 | REPLICATE_API_KEY=ADD_YOURS_HERE # https://replicate.com/account 10 | SERPAPI_API_KEY=ADD_YOURS_HERE # https://serpapi.com/manage-api-key 11 | SERPER_API_KEY=ADD_YOURS_HERE # https://serper.dev/api-key 12 | SUPABASE_PRIVATE_KEY=ADD_YOURS_HERE # https://app.supabase.com/project/YOUR_PROJECT_ID/settings/api 13 | SUPABASE_URL=ADD_YOURS_HERE # # https://app.supabase.com/project/YOUR_PROJECT_ID/settings/api 14 | WEAVIATE_HOST=ADD_YOURS_HERE 15 | WEAVIATE_SCHEME=ADD_YOURS_HERE 16 | WEAVIATE_API_KEY=ADD_YOURS_HERE 17 | -------------------------------------------------------------------------------- /examples/.yarn/install-state.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/examples/.yarn/install-state.gz -------------------------------------------------------------------------------- /examples/src/README.md: -------------------------------------------------------------------------------- 1 | # langchain-examples 2 | 3 | This folder contains examples of how to use LangChain. 4 | 5 | ## Run an example 6 | 7 | What you'll usually want to do. 8 | 9 | First, build langchain. From the repository root, run: 10 | 11 | ```sh 12 | yarn 13 | yarn build 14 | ``` 15 | 16 | Most examples require API keys. Run `cp .env.example .env`, then edit `.env` with your API keys. 17 | 18 | Then from the `examples/` directory, run: 19 | 20 | `yarn run start ` 21 | 22 | eg. 23 | 24 | `yarn run start ./src/prompts/few_shot.ts` 25 | 26 | ## Run an example with the transpiled JS 27 | 28 | You shouldn't need to do this, but if you want to run an example with the transpiled JS, you can do so with: 29 | 30 | `yarn run start:dist ` 31 | 32 | eg. 33 | 34 | `yarn run start:dist ./dist/prompts/few_shot.js` 35 | -------------------------------------------------------------------------------- /examples/src/agents/aiplugin-tool.ts: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain/chat_models/openai"; 2 | import { initializeAgentExecutor } from "langchain/agents"; 3 | import { 4 | RequestsGetTool, 5 | RequestsPostTool, 6 | AIPluginTool, 7 | } from "langchain/tools"; 8 | 9 | export const run = async () => { 10 | const tools = [ 11 | new RequestsGetTool(), 12 | new RequestsPostTool(), 13 | await AIPluginTool.fromPluginUrl( 14 | "https://www.klarna.com/.well-known/ai-plugin.json" 15 | ), 16 | ]; 17 | const agent = await initializeAgentExecutor( 18 | tools, 19 | new ChatOpenAI({ temperature: 0 }), 20 | "chat-zero-shot-react-description", 21 | true 22 | ); 23 | 24 | const result = await agent.call({ 25 | input: "what t shirts are available in klarna?", 26 | }); 27 | 28 | console.log({ result }); 29 | }; 30 | -------------------------------------------------------------------------------- /examples/src/agents/chat_mrkl.ts: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain/chat_models/openai"; 2 | import { initializeAgentExecutor } from "langchain/agents"; 3 | import { SerpAPI } from "langchain/tools"; 4 | import { Calculator } from "langchain/tools/calculator"; 5 | 6 | export const run = async () => { 7 | const model = new ChatOpenAI({ temperature: 0 }); 8 | const tools = [ 9 | new SerpAPI(process.env.SERPAPI_API_KEY, { 10 | location: "Austin,Texas,United States", 11 | hl: "en", 12 | gl: "us", 13 | }), 14 | new Calculator(), 15 | ]; 16 | 17 | const executor = await initializeAgentExecutor( 18 | tools, 19 | model, 20 | "chat-zero-shot-react-description" 21 | ); 22 | console.log("Loaded agent."); 23 | 24 | const input = `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`; 25 | 26 | console.log(`Executing with input "${input}"...`); 27 | 28 | const result = await executor.call({ input }); 29 | 30 | console.log(`Got output ${result.output}`); 31 | 32 | console.log( 33 | `Got intermediate steps ${JSON.stringify( 34 | result.intermediateSteps, 35 | null, 36 | 2 37 | )}` 38 | ); 39 | }; 40 | -------------------------------------------------------------------------------- /examples/src/agents/chat_mrkl_with_tracing.ts: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain/chat_models/openai"; 2 | import { initializeAgentExecutor } from "langchain/agents"; 3 | import { SerpAPI } from "langchain/tools"; 4 | import { Calculator } from "langchain/tools/calculator"; 5 | 6 | export const run = async () => { 7 | process.env.LANGCHAIN_HANDLER = "langchain"; 8 | const model = new ChatOpenAI({ temperature: 0 }); 9 | const tools = [ 10 | new SerpAPI(process.env.SERPAPI_API_KEY, { 11 | location: "Austin,Texas,United States", 12 | hl: "en", 13 | gl: "us", 14 | }), 15 | new Calculator(), 16 | ]; 17 | 18 | const executor = await initializeAgentExecutor( 19 | tools, 20 | model, 21 | "chat-zero-shot-react-description", 22 | true 23 | ); 24 | console.log("Loaded agent."); 25 | 26 | const input = `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`; 27 | 28 | console.log(`Executing with input "${input}"...`); 29 | 30 | const result = await executor.call({ input }); 31 | 32 | console.log(`Got output ${result.output}`); 33 | 34 | console.log( 35 | `Got intermediate steps ${JSON.stringify( 36 | result.intermediateSteps, 37 | null, 38 | 2 39 | )}` 40 | ); 41 | }; 42 | -------------------------------------------------------------------------------- /examples/src/agents/custom_tool.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import { initializeAgentExecutor } from "langchain/agents"; 3 | import { DynamicTool } from "langchain/tools"; 4 | 5 | export const run = async () => { 6 | const model = new OpenAI({ temperature: 0 }); 7 | const tools = [ 8 | new DynamicTool({ 9 | name: "FOO", 10 | description: 11 | "call this to get the value of foo. input should be an empty string.", 12 | func: () => 13 | new Promise((resolve) => { 14 | resolve("foo"); 15 | }), 16 | }), 17 | new DynamicTool({ 18 | name: "BAR", 19 | description: 20 | "call this to get the value of bar. input should be an empty string.", 21 | func: () => 22 | new Promise((resolve) => { 23 | resolve("baz1"); 24 | }), 25 | }), 26 | ]; 27 | 28 | const executor = await initializeAgentExecutor( 29 | tools, 30 | model, 31 | "zero-shot-react-description" 32 | ); 33 | 34 | console.log("Loaded agent."); 35 | 36 | const input = `What is the value of foo?`; 37 | 38 | console.log(`Executing with input "${input}"...`); 39 | 40 | const result = await executor.call({ input }); 41 | 42 | console.log(`Got output ${result.output}`); 43 | }; 44 | -------------------------------------------------------------------------------- /examples/src/agents/json.ts: -------------------------------------------------------------------------------- 1 | import * as fs from "fs"; 2 | import * as yaml from "js-yaml"; 3 | import { OpenAI } from "langchain/llms/openai"; 4 | import { JsonSpec, JsonObject } from "langchain/tools"; 5 | import { JsonToolkit, createJsonAgent } from "langchain/agents"; 6 | 7 | export const run = async () => { 8 | let data: JsonObject; 9 | try { 10 | const yamlFile = fs.readFileSync("openai_openapi.yaml", "utf8"); 11 | data = yaml.load(yamlFile) as JsonObject; 12 | if (!data) { 13 | throw new Error("Failed to load OpenAPI spec"); 14 | } 15 | } catch (e) { 16 | console.error(e); 17 | return; 18 | } 19 | 20 | const toolkit = new JsonToolkit(new JsonSpec(data)); 21 | const model = new OpenAI({ temperature: 0 }); 22 | const executor = createJsonAgent(model, toolkit); 23 | 24 | const input = `What are the required parameters in the request body to the /completions endpoint?`; 25 | 26 | console.log(`Executing with input "${input}"...`); 27 | 28 | const result = await executor.call({ input }); 29 | 30 | console.log(`Got output ${result.output}`); 31 | 32 | console.log( 33 | `Got intermediate steps ${JSON.stringify( 34 | result.intermediateSteps, 35 | null, 36 | 2 37 | )}` 38 | ); 39 | }; 40 | -------------------------------------------------------------------------------- /examples/src/agents/load_from_hub.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import { AgentExecutor } from "langchain/agents"; 3 | import { loadAgent } from "langchain/agents/load"; 4 | import { SerpAPI } from "langchain/tools"; 5 | import { Calculator } from "langchain/tools/calculator"; 6 | 7 | export const run = async () => { 8 | const model = new OpenAI({ temperature: 0 }); 9 | const tools = [ 10 | new SerpAPI(process.env.SERPAPI_API_KEY, { 11 | location: "Austin,Texas,United States", 12 | hl: "en", 13 | gl: "us", 14 | }), 15 | new Calculator(), 16 | ]; 17 | 18 | const agent = await loadAgent( 19 | "lc://agents/zero-shot-react-description/agent.json", 20 | { llm: model, tools } 21 | ); 22 | console.log("Loaded agent from Langchain hub"); 23 | 24 | const executor = AgentExecutor.fromAgentAndTools({ 25 | agent, 26 | tools, 27 | returnIntermediateSteps: true, 28 | }); 29 | 30 | const input = `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`; 31 | console.log(`Executing with input "${input}"...`); 32 | 33 | const result = await executor.call({ input }); 34 | 35 | console.log(`Got output ${result.output}`); 36 | }; 37 | -------------------------------------------------------------------------------- /examples/src/agents/mrkl.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import { initializeAgentExecutor } from "langchain/agents"; 3 | import { SerpAPI } from "langchain/tools"; 4 | import { Calculator } from "langchain/tools/calculator"; 5 | 6 | export const run = async () => { 7 | const model = new OpenAI({ temperature: 0 }); 8 | const tools = [ 9 | new SerpAPI(process.env.SERPAPI_API_KEY, { 10 | location: "Austin,Texas,United States", 11 | hl: "en", 12 | gl: "us", 13 | }), 14 | new Calculator(), 15 | ]; 16 | 17 | const executor = await initializeAgentExecutor( 18 | tools, 19 | model, 20 | "zero-shot-react-description", 21 | true 22 | ); 23 | console.log("Loaded agent."); 24 | 25 | const input = `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`; 26 | 27 | console.log(`Executing with input "${input}"...`); 28 | 29 | const result = await executor.call({ input }); 30 | 31 | console.log(`Got output ${result.output}`); 32 | }; 33 | -------------------------------------------------------------------------------- /examples/src/agents/mrkl_with_tracing.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import { initializeAgentExecutor } from "langchain/agents"; 3 | import { SerpAPI } from "langchain/tools"; 4 | import { Calculator } from "langchain/tools/calculator"; 5 | import process from "process"; 6 | 7 | export const run = async () => { 8 | process.env.LANGCHAIN_HANDLER = "langchain"; 9 | const model = new OpenAI({ temperature: 0 }); 10 | const tools = [ 11 | new SerpAPI(process.env.SERPAPI_API_KEY, { 12 | location: "Austin,Texas,United States", 13 | hl: "en", 14 | gl: "us", 15 | }), 16 | new Calculator(), 17 | ]; 18 | 19 | const executor = await initializeAgentExecutor( 20 | tools, 21 | model, 22 | "zero-shot-react-description", 23 | true 24 | ); 25 | console.log("Loaded agent."); 26 | 27 | const input = `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`; 28 | 29 | console.log(`Executing with input "${input}"...`); 30 | 31 | const result = await executor.call({ input }); 32 | 33 | console.log(`Got output ${result.output}`); 34 | }; 35 | -------------------------------------------------------------------------------- /examples/src/agents/zapier_mrkl.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import { initializeAgentExecutor, ZapierToolKit } from "langchain/agents"; 3 | import { ZapierNLAWrapper } from "langchain/tools"; 4 | 5 | export const run = async () => { 6 | const model = new OpenAI({ temperature: 0 }); 7 | const zapier = new ZapierNLAWrapper(); 8 | const toolkit = await ZapierToolKit.fromZapierNLAWrapper(zapier); 9 | 10 | const executor = await initializeAgentExecutor( 11 | toolkit.tools, 12 | model, 13 | "zero-shot-react-description", 14 | true 15 | ); 16 | console.log("Loaded agent."); 17 | 18 | const input = `Summarize the last email I received regarding Silicon Valley Bank. Send the summary to the #test-zapier Slack channel.`; 19 | 20 | console.log(`Executing with input "${input}"...`); 21 | 22 | const result = await executor.call({ input }); 23 | 24 | console.log(`Got output ${result.output}`); 25 | }; 26 | -------------------------------------------------------------------------------- /examples/src/callbacks/console_handler.ts: -------------------------------------------------------------------------------- 1 | import { CallbackManager, ConsoleCallbackHandler } from "langchain/callbacks"; 2 | import { LLMChain } from "langchain/chains"; 3 | import { OpenAI } from "langchain/llms/openai"; 4 | import { PromptTemplate } from "langchain/prompts"; 5 | 6 | export const run = async () => { 7 | const callbackManager = new CallbackManager(); 8 | callbackManager.addHandler(new ConsoleCallbackHandler()); 9 | 10 | const llm = new OpenAI({ temperature: 0, callbackManager }); 11 | const prompt = PromptTemplate.fromTemplate("1 + {number} ="); 12 | const chain = new LLMChain({ prompt, llm, callbackManager }); 13 | 14 | await chain.call({ number: 2 }); 15 | /* 16 | Entering new llm_chain chain... 17 | Finished chain. 18 | */ 19 | }; 20 | -------------------------------------------------------------------------------- /examples/src/chains/conversation_chain.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import { ConversationChain } from "langchain/chains"; 3 | 4 | export const run = async () => { 5 | const model = new OpenAI({}); 6 | const chain = new ConversationChain({ llm: model }); 7 | const res1 = await chain.call({ input: "Hi! I'm Jim." }); 8 | console.log({ res1 }); 9 | const res2 = await chain.call({ input: "What's my name?" }); 10 | console.log({ res2 }); 11 | }; 12 | -------------------------------------------------------------------------------- /examples/src/chains/llm_chain_stream.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import { PromptTemplate } from "langchain/prompts"; 3 | import { LLMChain } from "langchain/chains"; 4 | import { CallbackManager } from "langchain/callbacks"; 5 | 6 | export const run = async () => { 7 | const manager = CallbackManager.fromHandlers({ 8 | async handleLLMNewToken(token: string) { 9 | console.log({ token }); 10 | }, 11 | }); 12 | 13 | const model = new OpenAI({ 14 | temperature: 0.9, 15 | streaming: true, 16 | callbackManager: manager, 17 | }); 18 | 19 | const template = "What is a good name for a company that makes {product}?"; 20 | const prompt = new PromptTemplate({ template, inputVariables: ["product"] }); 21 | const chain = new LLMChain({ llm: model, prompt }); 22 | const res = await chain.call({ product: "colorful socks" }); 23 | console.log({ res }); 24 | }; 25 | -------------------------------------------------------------------------------- /examples/src/chains/load_from_hub.ts: -------------------------------------------------------------------------------- 1 | import { loadChain } from "langchain/chains/load"; 2 | 3 | export const run = async () => { 4 | const chain = await loadChain("lc://chains/hello-world/chain.json"); 5 | const res = chain.call({ topic: "foo" }); 6 | console.log(res); 7 | }; 8 | -------------------------------------------------------------------------------- /examples/src/chains/question_answering.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import { loadQAStuffChain, loadQAMapReduceChain } from "langchain/chains"; 3 | import { Document } from "langchain/document"; 4 | 5 | export const run = async () => { 6 | // This first example uses the `StuffDocumentsChain`. 7 | const llmA = new OpenAI({}); 8 | const chainA = loadQAStuffChain(llmA); 9 | const docs = [ 10 | new Document({ pageContent: "Harrison went to Harvard." }), 11 | new Document({ pageContent: "Ankush went to Princeton." }), 12 | ]; 13 | const resA = await chainA.call({ 14 | input_documents: docs, 15 | question: "Where did Harrison go to college?", 16 | }); 17 | console.log({ resA }); 18 | // { resA: { text: ' Harrison went to Harvard.' } } 19 | 20 | // This second example uses the `MapReduceChain`. 21 | // Optionally limit the number of concurrent requests to the language model. 22 | const llmB = new OpenAI({ maxConcurrency: 10 }); 23 | const chainB = loadQAMapReduceChain(llmB); 24 | const resB = await chainB.call({ 25 | input_documents: docs, 26 | question: "Where did Harrison go to college?", 27 | }); 28 | console.log({ resB }); 29 | // { resB: { text: ' Harrison went to Harvard.' } } 30 | }; 31 | -------------------------------------------------------------------------------- /examples/src/chains/question_answering_map_reduce.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import { loadQAMapReduceChain } from "langchain/chains"; 3 | import { Document } from "langchain/document"; 4 | 5 | export const run = async () => { 6 | const model = new OpenAI({ temperature: 0 }); 7 | const chain = loadQAMapReduceChain(model); 8 | const docs = [ 9 | new Document({ pageContent: "harrison went to harvard" }), 10 | new Document({ pageContent: "ankush went to princeton" }), 11 | ]; 12 | const res = await chain.call({ 13 | input_documents: docs, 14 | question: "Where did harrison go to college", 15 | }); 16 | console.log({ res }); 17 | }; 18 | -------------------------------------------------------------------------------- /examples/src/chains/retrieval_qa_with_remote.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import { RetrievalQAChain } from "langchain/chains"; 3 | import { RemoteLangChainRetriever } from "langchain/retrievers/remote"; 4 | 5 | export const run = async () => { 6 | // Initialize the LLM to use to answer the question. 7 | const model = new OpenAI({}); 8 | 9 | // Initialize the remote retriever. 10 | const retriever = new RemoteLangChainRetriever({ 11 | url: "http://0.0.0.0:8080/retrieve", // Replace with your own URL. 12 | auth: { bearer: "foo" }, // Replace with your own auth. 13 | inputKey: "message", 14 | responseKey: "response", 15 | }); 16 | 17 | // Create a chain that uses the OpenAI LLM and remote retriever. 18 | const chain = RetrievalQAChain.fromLLM(model, retriever); 19 | 20 | // Call the chain with a query. 21 | const res = await chain.call({ 22 | query: "What did the president say about Justice Breyer?", 23 | }); 24 | console.log({ res }); 25 | /* 26 | { 27 | res: { 28 | text: 'The president said that Justice Breyer was an Army veteran, Constitutional scholar, 29 | and retiring Justice of the United States Supreme Court and thanked him for his service.' 30 | } 31 | } 32 | */ 33 | }; 34 | -------------------------------------------------------------------------------- /examples/src/chains/sql_db.ts: -------------------------------------------------------------------------------- 1 | import { DataSource } from "typeorm"; 2 | import { OpenAI } from "langchain/llms/openai"; 3 | import { SqlDatabase } from "langchain/sql_db"; 4 | import { SqlDatabaseChain } from "langchain/chains"; 5 | 6 | /** 7 | * This example uses Chinook database, which is a sample database available for SQL Server, Oracle, MySQL, etc. 8 | * To set it up follow the instructions on https://database.guide/2-sample-databases-sqlite/, placing the .db file 9 | * in the examples folder. 10 | */ 11 | export const run = async () => { 12 | const datasource = new DataSource({ 13 | type: "sqlite", 14 | database: "Chinook.db", 15 | }); 16 | 17 | const db = await SqlDatabase.fromDataSourceParams({ 18 | appDataSource: datasource, 19 | }); 20 | 21 | const chain = new SqlDatabaseChain({ 22 | llm: new OpenAI({ temperature: 0 }), 23 | database: db, 24 | }); 25 | 26 | const res = await chain.run("How many tracks are there?"); 27 | console.log(res); 28 | // There are 3503 tracks. 29 | }; 30 | -------------------------------------------------------------------------------- /examples/src/chains/summarization.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import { loadSummarizationChain } from "langchain/chains"; 3 | import { Document } from "langchain/document"; 4 | 5 | export const run = async () => { 6 | const model = new OpenAI({}); 7 | const chain = loadSummarizationChain(model, { type: "stuff" }); 8 | const docs = [ 9 | new Document({ pageContent: "harrison went to harvard" }), 10 | new Document({ pageContent: "ankush went to princeton" }), 11 | ]; 12 | const res = await chain.call({ 13 | input_documents: docs, 14 | }); 15 | console.log(res); 16 | }; 17 | -------------------------------------------------------------------------------- /examples/src/chat/llm_chain.ts: -------------------------------------------------------------------------------- 1 | import { LLMChain } from "langchain/chains"; 2 | import { ChatOpenAI } from "langchain/chat_models/openai"; 3 | import { 4 | ChatPromptTemplate, 5 | HumanMessagePromptTemplate, 6 | SystemMessagePromptTemplate, 7 | } from "langchain/prompts"; 8 | 9 | export const run = async () => { 10 | const chat = new ChatOpenAI({ temperature: 0 }); 11 | 12 | const chatPrompt = ChatPromptTemplate.fromPromptMessages([ 13 | SystemMessagePromptTemplate.fromTemplate( 14 | "You are a helpful assistant that translates {input_language} to {output_language}." 15 | ), 16 | HumanMessagePromptTemplate.fromTemplate("{text}"), 17 | ]); 18 | 19 | const chain = new LLMChain({ 20 | prompt: chatPrompt, 21 | llm: chat, 22 | }); 23 | 24 | const response = await chain.call({ 25 | input_language: "English", 26 | output_language: "French", 27 | text: "I love programming.", 28 | }); 29 | 30 | console.log(response); 31 | }; 32 | -------------------------------------------------------------------------------- /examples/src/chat/memory.ts: -------------------------------------------------------------------------------- 1 | import { ConversationChain } from "langchain/chains"; 2 | import { ChatOpenAI } from "langchain/chat_models/openai"; 3 | import { 4 | ChatPromptTemplate, 5 | HumanMessagePromptTemplate, 6 | SystemMessagePromptTemplate, 7 | MessagesPlaceholder, 8 | } from "langchain/prompts"; 9 | import { BufferMemory } from "langchain/memory"; 10 | 11 | export const run = async () => { 12 | const chat = new ChatOpenAI({ temperature: 0 }); 13 | 14 | const chatPrompt = ChatPromptTemplate.fromPromptMessages([ 15 | SystemMessagePromptTemplate.fromTemplate( 16 | "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know." 17 | ), 18 | new MessagesPlaceholder("history"), 19 | HumanMessagePromptTemplate.fromTemplate("{input}"), 20 | ]); 21 | 22 | const chain = new ConversationChain({ 23 | memory: new BufferMemory({ returnMessages: true, memoryKey: "history" }), 24 | prompt: chatPrompt, 25 | llm: chat, 26 | }); 27 | 28 | const response = await chain.call({ 29 | input: "hi! whats up?", 30 | }); 31 | 32 | console.log(response); 33 | }; 34 | -------------------------------------------------------------------------------- /examples/src/customParameters/differentBaseUrl.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | 3 | export const run = async () => { 4 | const model = new OpenAI( 5 | { temperature: 0 }, 6 | { 7 | basePath: "https://oai.hconeai.com/v1", 8 | } 9 | ); 10 | const res = await model.call( 11 | "What would be a good company name a company that makes colorful socks?" 12 | ); 13 | console.log(res); 14 | }; 15 | -------------------------------------------------------------------------------- /examples/src/document_loaders/cheerio_web.ts: -------------------------------------------------------------------------------- 1 | import { CheerioWebBaseLoader } from "langchain/document_loaders/web/cheerio"; 2 | 3 | export const run = async () => { 4 | const loader = new CheerioWebBaseLoader( 5 | "https://news.ycombinator.com/item?id=34817881" 6 | ); 7 | const docs = await loader.load(); 8 | console.log({ docs }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/document_loaders/college_confidential.ts: -------------------------------------------------------------------------------- 1 | import { CollegeConfidentialLoader } from "langchain/document_loaders/web/college_confidential"; 2 | 3 | export const run = async () => { 4 | const loader = new CollegeConfidentialLoader( 5 | "https://www.collegeconfidential.com/colleges/brown-university/" 6 | ); 7 | const docs = await loader.load(); 8 | console.log({ docs }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/document_loaders/example_data/example.txt: -------------------------------------------------------------------------------- 1 | Foo 2 | Bar 3 | Baz 4 | 5 | -------------------------------------------------------------------------------- /examples/src/document_loaders/gitbook.ts: -------------------------------------------------------------------------------- 1 | import { GitbookLoader } from "langchain/document_loaders/web/gitbook"; 2 | 3 | export const run = async () => { 4 | const loader = new GitbookLoader("https://docs.gitbook.com"); 5 | const docs = await loader.load(); // load single path 6 | console.log(docs); 7 | const allPathsLoader = new GitbookLoader("https://docs.gitbook.com", { 8 | shouldLoadAllPaths: true, 9 | }); 10 | const docsAllPaths = await allPathsLoader.load(); // loads all paths of the given gitbook 11 | console.log(docsAllPaths); 12 | }; 13 | -------------------------------------------------------------------------------- /examples/src/document_loaders/github.ts: -------------------------------------------------------------------------------- 1 | import { GithubRepoLoader } from "langchain/document_loaders/web/github"; 2 | 3 | export const run = async () => { 4 | const loader = new GithubRepoLoader( 5 | "https://github.com/hwchase17/langchainjs", 6 | { branch: "main", recursive: false, unknown: "warn" } 7 | ); 8 | const docs = await loader.load(); 9 | console.log({ docs }); 10 | }; 11 | -------------------------------------------------------------------------------- /examples/src/document_loaders/hn.ts: -------------------------------------------------------------------------------- 1 | import { HNLoader } from "langchain/document_loaders/web/hn"; 2 | 3 | export const run = async () => { 4 | const loader = new HNLoader("https://news.ycombinator.com/item?id=34817881"); 5 | const docs = await loader.load(); 6 | console.log({ docs }); 7 | }; 8 | -------------------------------------------------------------------------------- /examples/src/document_loaders/imsdb.ts: -------------------------------------------------------------------------------- 1 | import { IMSDBLoader } from "langchain/document_loaders/web/imsdb"; 2 | 3 | export const run = async () => { 4 | const loader = new IMSDBLoader( 5 | "https://imsdb.com/scripts/BlacKkKlansman.html" 6 | ); 7 | const docs = await loader.load(); 8 | console.log({ docs }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/document_loaders/notion_markdown.ts: -------------------------------------------------------------------------------- 1 | import { NotionLoader } from "langchain/document_loaders/fs/notion"; 2 | 3 | export const run = async () => { 4 | /** Provide the directory path of your notion folder */ 5 | const directoryPath = "Notion_DB"; 6 | const loader = new NotionLoader(directoryPath); 7 | const docs = await loader.load(); 8 | console.log({ docs }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/document_loaders/puppeteer_web.ts: -------------------------------------------------------------------------------- 1 | import { PuppeteerWebBaseLoader } from "langchain/document_loaders/web/puppeteer"; 2 | 3 | export const run = async () => { 4 | const loader = new PuppeteerWebBaseLoader("https://www.tabnews.com.br/"); 5 | 6 | /** Loader use evaluate function ` await page.evaluate(() => document.body.innerHTML);` as default evaluate */ 7 | const docs = await loader.load(); 8 | console.log({ docs }); 9 | 10 | const loaderWithOptions = new PuppeteerWebBaseLoader( 11 | "https://www.tabnews.com.br/", 12 | { 13 | launchOptions: { 14 | headless: true, 15 | }, 16 | gotoOptions: { 17 | waitUntil: "domcontentloaded", 18 | }, 19 | /** Pass custom evaluate , in this case you get page and browser instances */ 20 | async evaluate(page, browser) { 21 | await page.waitForResponse("https://www.tabnews.com.br/va/view"); 22 | 23 | const result = await page.evaluate(() => document.body.innerHTML); 24 | await browser.close(); 25 | return result; 26 | }, 27 | } 28 | ); 29 | const docsFromLoaderWithOptions = await loaderWithOptions.load(); 30 | console.log({ docsFromLoaderWithOptions }); 31 | }; 32 | -------------------------------------------------------------------------------- /examples/src/document_loaders/s3.ts: -------------------------------------------------------------------------------- 1 | import { S3Loader } from "langchain/document_loaders/web/s3"; 2 | 3 | const loader = new S3Loader({ 4 | bucket: "my-document-bucket-123", 5 | key: "AccountingOverview.pdf", 6 | unstructuredAPIURL: "http://localhost:8000/general/v0/general", 7 | }); 8 | 9 | const docs = await loader.load(); 10 | 11 | console.log(docs); 12 | -------------------------------------------------------------------------------- /examples/src/document_loaders/srt.ts: -------------------------------------------------------------------------------- 1 | import { SRTLoader } from "langchain/document_loaders/fs/srt"; 2 | 3 | export const run = async () => { 4 | const loader = new SRTLoader( 5 | "src/document_loaders/example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt" 6 | ); 7 | const docs = await loader.load(); 8 | console.log({ docs }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/document_loaders/text.ts: -------------------------------------------------------------------------------- 1 | import { TextLoader } from "langchain/document_loaders/fs/text"; 2 | 3 | export const run = async () => { 4 | const loader = new TextLoader( 5 | "src/document_loaders/example_data/example.txt" 6 | ); 7 | const docs = await loader.load(); 8 | console.log({ docs }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/document_loaders/unstructured.ts: -------------------------------------------------------------------------------- 1 | import { UnstructuredLoader } from "langchain/document_loaders/fs/unstructured"; 2 | 3 | export const run = async () => { 4 | const loader = new UnstructuredLoader( 5 | "http://localhost:8000/general/v0/general", 6 | "src/document_loaders/example_data/notion.md" 7 | ); 8 | const docs = await loader.load(); 9 | console.log({ docs }); 10 | }; 11 | -------------------------------------------------------------------------------- /examples/src/embeddings/cohere.ts: -------------------------------------------------------------------------------- 1 | import { CohereEmbeddings } from "langchain/embeddings/cohere"; 2 | 3 | export const run = async () => { 4 | const model = new CohereEmbeddings(); 5 | const res = await model.embedQuery( 6 | "What would be a good company name a company that makes colorful socks?" 7 | ); 8 | console.log({ res }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/embeddings/max_concurrency.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 2 | 3 | export const run = async () => { 4 | const model = new OpenAIEmbeddings({ 5 | maxConcurrency: 1, 6 | }); 7 | const res = await model.embedQuery( 8 | "What would be a good company name a company that makes colorful socks?" 9 | ); 10 | console.log({ res }); 11 | }; 12 | -------------------------------------------------------------------------------- /examples/src/embeddings/openai.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 2 | 3 | export const run = async () => { 4 | const model = new OpenAIEmbeddings(); 5 | const res = await model.embedQuery( 6 | "What would be a good company name a company that makes colorful socks?" 7 | ); 8 | console.log({ res }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/indexes/recursive_text_splitter.ts: -------------------------------------------------------------------------------- 1 | import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; 2 | 3 | export const run = async () => { 4 | const text = `Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f. 5 | This is a weird text to write, but gotta test the splittingggg some how.\n\n 6 | Bye!\n\n-H.`; 7 | const splitter = new RecursiveCharacterTextSplitter({ 8 | chunkSize: 10, 9 | chunkOverlap: 1, 10 | }); 11 | const output = splitter.createDocuments([text]); 12 | console.log(output); 13 | }; 14 | -------------------------------------------------------------------------------- /examples/src/indexes/text_splitter.ts: -------------------------------------------------------------------------------- 1 | import { Document } from "langchain/document"; 2 | import { CharacterTextSplitter } from "langchain/text_splitter"; 3 | 4 | export const run = async () => { 5 | /* Split text */ 6 | const text = "foo bar baz 123"; 7 | const splitter = new CharacterTextSplitter({ 8 | separator: " ", 9 | chunkSize: 7, 10 | chunkOverlap: 3, 11 | }); 12 | const output = splitter.createDocuments([text]); 13 | console.log({ output }); 14 | /* Split documents */ 15 | const docOutput = splitter.splitDocuments([ 16 | new Document({ pageContent: text }), 17 | ]); 18 | console.log({ docOutput }); 19 | }; 20 | -------------------------------------------------------------------------------- /examples/src/indexes/token_text_splitter.ts: -------------------------------------------------------------------------------- 1 | import { Document } from "langchain/document"; 2 | import { TokenTextSplitter } from "langchain/text_splitter"; 3 | import fs from "fs"; 4 | import path from "path"; 5 | 6 | export const run = async () => { 7 | /* Split text */ 8 | const text = fs.readFileSync( 9 | path.resolve(__dirname, "../../state_of_the_union.txt"), 10 | "utf8" 11 | ); 12 | 13 | const splitter = new TokenTextSplitter({ 14 | encodingName: "r50k_base", 15 | chunkSize: 10, 16 | chunkOverlap: 0, 17 | allowedSpecial: ["<|endoftext|>"], 18 | disallowedSpecial: [], 19 | }); 20 | 21 | const output = splitter.createDocuments([text]); 22 | console.log({ output }); 23 | 24 | const docOutput = splitter.splitDocuments([ 25 | new Document({ pageContent: text }), 26 | ]); 27 | 28 | console.log({ docOutput }); 29 | }; 30 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/hnswlib.ts: -------------------------------------------------------------------------------- 1 | import { HNSWLib } from "langchain/vectorstores/hnswlib"; 2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 3 | 4 | export const run = async () => { 5 | const vectorStore = await HNSWLib.fromTexts( 6 | ["Hello world", "Bye bye", "hello nice world"], 7 | [{ id: 2 }, { id: 1 }, { id: 3 }], 8 | new OpenAIEmbeddings() 9 | ); 10 | 11 | const resultOne = await vectorStore.similaritySearch("hello world", 1); 12 | console.log(resultOne); 13 | }; 14 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/hnswlib_fromdocs.ts: -------------------------------------------------------------------------------- 1 | import { HNSWLib } from "langchain/vectorstores/hnswlib"; 2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 3 | import { TextLoader } from "langchain/document_loaders/fs/text"; 4 | 5 | export const run = async () => { 6 | // Create docs with a loader 7 | const loader = new TextLoader( 8 | "src/document_loaders/example_data/example.txt" 9 | ); 10 | const docs = await loader.load(); 11 | 12 | // Load the docs into the vector store 13 | const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings()); 14 | 15 | // Search for the most similar document 16 | const resultOne = await vectorStore.similaritySearch("hello world", 1); 17 | console.log(resultOne); 18 | }; 19 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/hnswlib_saveload.ts: -------------------------------------------------------------------------------- 1 | import { HNSWLib } from "langchain/vectorstores/hnswlib"; 2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 3 | 4 | export const run = async () => { 5 | // Create a vector store through any method, here from texts as an example 6 | const vectorStore = await HNSWLib.fromTexts( 7 | ["Hello world", "Bye bye", "hello nice world"], 8 | [{ id: 2 }, { id: 1 }, { id: 3 }], 9 | new OpenAIEmbeddings() 10 | ); 11 | 12 | // Save the vector store to a directory 13 | const directory = "your/directory/here"; 14 | await vectorStore.save(directory); 15 | 16 | // Load the vector store from the same directory 17 | const loadedVectorStore = await HNSWLib.load( 18 | directory, 19 | new OpenAIEmbeddings() 20 | ); 21 | 22 | // vectorStore and loadedVectorStore are identical 23 | 24 | const result = await loadedVectorStore.similaritySearch("hello world", 1); 25 | console.log(result); 26 | }; 27 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/memory.ts: -------------------------------------------------------------------------------- 1 | import { MemoryVectorStore } from "langchain/vectorstores/memory"; 2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 3 | 4 | export const run = async () => { 5 | const vectorStore = await MemoryVectorStore.fromTexts( 6 | ["Hello world", "Bye bye", "hello nice world"], 7 | [{ id: 2 }, { id: 1 }, { id: 3 }], 8 | new OpenAIEmbeddings() 9 | ); 10 | 11 | const resultOne = await vectorStore.similaritySearch("hello world", 1); 12 | console.log(resultOne); 13 | }; 14 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/memory_custom_similarity.ts: -------------------------------------------------------------------------------- 1 | import { MemoryVectorStore } from "langchain/vectorstores/memory"; 2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 3 | import { similarity } from "ml-distance"; 4 | 5 | export const run = async () => { 6 | const vectorStore = await MemoryVectorStore.fromTexts( 7 | ["Hello world", "Bye bye", "hello nice world"], 8 | [{ id: 2 }, { id: 1 }, { id: 3 }], 9 | new OpenAIEmbeddings(), 10 | { similarity: similarity.pearson } 11 | ); 12 | 13 | const resultOne = await vectorStore.similaritySearch("hello world", 1); 14 | console.log(resultOne); 15 | }; 16 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/memory_fromdocs.ts: -------------------------------------------------------------------------------- 1 | import { MemoryVectorStore } from "langchain/vectorstores/memory"; 2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 3 | import { TextLoader } from "langchain/document_loaders/fs/text"; 4 | 5 | export const run = async () => { 6 | // Create docs with a loader 7 | const loader = new TextLoader( 8 | "src/document_loaders/example_data/example.txt" 9 | ); 10 | const docs = await loader.load(); 11 | 12 | // Load the docs into the vector store 13 | const vectorStore = await MemoryVectorStore.fromDocuments( 14 | docs, 15 | new OpenAIEmbeddings() 16 | ); 17 | 18 | // Search for the most similar document 19 | const resultOne = await vectorStore.similaritySearch("hello world", 1); 20 | 21 | console.log(resultOne); 22 | }; 23 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/milvus.ts: -------------------------------------------------------------------------------- 1 | import { Milvus } from "langchain/vectorstores/milvus"; 2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 3 | 4 | export const run = async () => { 5 | const vectorStore = await Milvus.fromTexts( 6 | ["Hello world", "Bye bye", "hello nice world"], 7 | [{ id: 2 }, { id: 1 }, { id: 3 }], 8 | new OpenAIEmbeddings() 9 | ); 10 | 11 | const resultOne = await vectorStore.similaritySearch("hello world", 1); 12 | console.log(resultOne); 13 | }; 14 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/mongo_fromTexts.ts: -------------------------------------------------------------------------------- 1 | import { MongoVectorStore } from "langchain/vectorstores/mongo"; 2 | import { CohereEmbeddings } from "langchain/embeddings/cohere"; 3 | import { MongoClient } from "mongodb"; 4 | 5 | export const run = async () => { 6 | const client = new MongoClient(process.env.MONGO_URI || ""); 7 | 8 | const collection = client.db("langchain").collection("test"); 9 | 10 | await MongoVectorStore.fromTexts( 11 | ["Hello world", "Bye bye", "What's this?"], 12 | [{ id: 2 }, { id: 1 }, { id: 3 }], 13 | new CohereEmbeddings(), 14 | { 15 | client, 16 | collection, 17 | // indexName: "default", // make sure that this matches the index name in atlas if not using "default" 18 | } 19 | ); 20 | 21 | // remember to close the client 22 | await client.close(); 23 | }; 24 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/mongo_search.ts: -------------------------------------------------------------------------------- 1 | import { MongoVectorStore } from "langchain/vectorstores/mongo"; 2 | import { CohereEmbeddings } from "langchain/embeddings/cohere"; 3 | import { MongoClient } from "mongodb"; 4 | 5 | export const run = async () => { 6 | const client = new MongoClient(process.env.MONGO_URI || ""); 7 | 8 | const collection = client.db("langchain").collection("test"); 9 | 10 | const vectorStore = new MongoVectorStore(new CohereEmbeddings(), { 11 | client, 12 | collection, 13 | // indexName: "default", // make sure that this matches the index name in atlas if not using "default" 14 | }); 15 | 16 | const resultOne = await vectorStore.similaritySearch("Hello world", 1); 17 | 18 | console.log(resultOne); 19 | 20 | // remember to close the client 21 | await client.close(); 22 | }; 23 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/opensearch/opensearch.ts: -------------------------------------------------------------------------------- 1 | import { Client } from "@opensearch-project/opensearch"; 2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 3 | import { OpenSearchVectorStore } from "langchain/vectorstores/opensearch"; 4 | 5 | export async function run() { 6 | const client = new Client({ 7 | nodes: [process.env.OPENSEARCH_URL ?? "http://127.0.0.1:9200"], 8 | }); 9 | 10 | const vectorStore = await OpenSearchVectorStore.fromTexts( 11 | ["Hello world", "Bye bye", "What's this?"], 12 | [{ id: 2 }, { id: 1 }, { id: 3 }], 13 | new OpenAIEmbeddings(), 14 | { 15 | client, 16 | indexName: "documents", 17 | } 18 | ); 19 | 20 | const resultOne = await vectorStore.similaritySearch("Hello world", 1); 21 | console.log(resultOne); 22 | } 23 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/prisma_vectorstore/.env.example: -------------------------------------------------------------------------------- 1 | # Add DATABASE_URL to .env file in this directory 2 | DATABASE_URL=postgresql://[USERNAME]:[PASSWORD]@[ADDR]/[DBNAME] -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/prisma_vectorstore/.gitignore: -------------------------------------------------------------------------------- 1 | data 2 | docker-compose.yml -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/prisma_vectorstore/docker-compose.example.yml: -------------------------------------------------------------------------------- 1 | services: 2 | db: 3 | image: ankane/pgvector 4 | ports: 5 | - 5432:5432 6 | volumes: 7 | - ./data:/var/lib/postgresql/data 8 | environment: 9 | - POSTGRES_PASSWORD= 10 | - POSTGRES_USER= 11 | - POSTGRES_DB= -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/prisma_vectorstore/prisma.ts: -------------------------------------------------------------------------------- 1 | import { PrismaVectorStore } from "langchain/vectorstores/prisma"; 2 | import { OpenAIEmbeddings } from "langchain/embeddings"; 3 | import { PrismaClient, Prisma, Document } from "@prisma/client"; 4 | 5 | export const run = async () => { 6 | const db = new PrismaClient(); 7 | 8 | const vectorStore = PrismaVectorStore.withModel(db).create( 9 | new OpenAIEmbeddings(), 10 | { 11 | prisma: Prisma, 12 | tableName: "Document", 13 | vectorColumnName: "vector", 14 | columns: { 15 | id: PrismaVectorStore.IdColumn, 16 | content: PrismaVectorStore.ContentColumn, 17 | }, 18 | } 19 | ); 20 | 21 | const texts = ["Hello world", "Bye bye", "What's this?"]; 22 | await vectorStore.addModels( 23 | await db.$transaction( 24 | texts.map((content) => db.document.create({ data: { content } })) 25 | ) 26 | ); 27 | 28 | const resultOne = await vectorStore.similaritySearch("Hello world", 1); 29 | console.log(resultOne.at(0)?.metadata.content); 30 | }; 31 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/prisma_vectorstore/prisma/migrations/00_init/migration.sql: -------------------------------------------------------------------------------- 1 | -- CreateTable 2 | CREATE EXTENSION IF NOT EXISTS vector; 3 | CREATE TABLE "Document" ( 4 | "id" TEXT NOT NULL, 5 | "content" TEXT NOT NULL, 6 | "vector" vector, 7 | 8 | CONSTRAINT "Document_pkey" PRIMARY KEY ("id") 9 | ); 10 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/prisma_vectorstore/prisma/migrations/migration_lock.toml: -------------------------------------------------------------------------------- 1 | # Please do not edit this file manually 2 | # It should be added in your version-control system (i.e. Git) 3 | provider = "postgresql" -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/prisma_vectorstore/prisma/schema.prisma: -------------------------------------------------------------------------------- 1 | // This is your Prisma schema file, 2 | // learn more about it in the docs: https://pris.ly/d/prisma-schema 3 | 4 | generator client { 5 | provider = "prisma-client-js" 6 | } 7 | 8 | datasource db { 9 | provider = "postgresql" 10 | url = env("DATABASE_URL") 11 | } 12 | 13 | model Document { 14 | id String @id @default(cuid()) 15 | content String 16 | vector Unsupported("vector")? 17 | } 18 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/supabase.ts: -------------------------------------------------------------------------------- 1 | import { SupabaseVectorStore } from "langchain/vectorstores/supabase"; 2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 3 | import { createClient } from "@supabase/supabase-js"; 4 | 5 | // First, follow set-up instructions at 6 | // https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase 7 | 8 | const privateKey = process.env.SUPABASE_PRIVATE_KEY; 9 | if (!privateKey) throw new Error(`Expected env var SUPABASE_PRIVATE_KEY`); 10 | 11 | const url = process.env.SUPABASE_URL; 12 | if (!url) throw new Error(`Expected env var SUPABASE_URL`); 13 | 14 | export const run = async () => { 15 | const client = createClient(url, privateKey); 16 | 17 | const vectorStore = await SupabaseVectorStore.fromTexts( 18 | ["Hello world", "Bye bye", "What's this?"], 19 | [{ id: 2 }, { id: 1 }, { id: 3 }], 20 | new OpenAIEmbeddings(), 21 | { 22 | client, 23 | tableName: "documents", 24 | queryName: "match_documents", 25 | } 26 | ); 27 | 28 | const resultOne = await vectorStore.similaritySearch("Hello world", 1); 29 | 30 | console.log(resultOne); 31 | }; 32 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/weaviate_fromTexts.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable @typescript-eslint/no-explicit-any */ 2 | import weaviate from "weaviate-ts-client"; 3 | import { WeaviateStore } from "langchain/vectorstores/weaviate"; 4 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 5 | 6 | export async function run() { 7 | // Something wrong with the weaviate-ts-client types, so we need to disable 8 | const client = (weaviate as any).client({ 9 | scheme: process.env.WEAVIATE_SCHEME || "https", 10 | host: process.env.WEAVIATE_HOST || "localhost", 11 | apiKey: new (weaviate as any).ApiKey( 12 | process.env.WEAVIATE_API_KEY || "default" 13 | ), 14 | }); 15 | 16 | // Create a store and fill it with some texts + metadata 17 | await WeaviateStore.fromTexts( 18 | ["hello world", "hi there", "how are you", "bye now"], 19 | [{ foo: "bar" }, { foo: "baz" }, { foo: "qux" }, { foo: "bar" }], 20 | new OpenAIEmbeddings(), 21 | { 22 | client, 23 | indexName: "Test", 24 | textKey: "text", 25 | metadataKeys: ["foo"], 26 | } 27 | ); 28 | } 29 | -------------------------------------------------------------------------------- /examples/src/llms/cohere.ts: -------------------------------------------------------------------------------- 1 | import { Cohere } from "langchain/llms/cohere"; 2 | 3 | export const run = async () => { 4 | const model = new Cohere({ 5 | temperature: 0.7, 6 | verbose: true, 7 | maxTokens: 20, 8 | maxRetries: 5, 9 | }); 10 | const res = await model.call( 11 | "Question: What would be a good company name a company that makes colorful socks?\nAnswer:" 12 | ); 13 | console.log({ res }); 14 | }; 15 | -------------------------------------------------------------------------------- /examples/src/llms/hf.ts: -------------------------------------------------------------------------------- 1 | import { HuggingFaceInference } from "langchain/llms/hf"; 2 | 3 | export const run = async () => { 4 | const model = new HuggingFaceInference({ 5 | model: "gpt2", 6 | temperature: 0.7, 7 | verbose: true, 8 | maxTokens: 50, 9 | }); 10 | const res = await model.call( 11 | "Question: What would be a good company name a company that makes colorful socks?\nAnswer:" 12 | ); 13 | console.log({ res }); 14 | }; 15 | -------------------------------------------------------------------------------- /examples/src/llms/openai-chat.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIChat } from "langchain/llms/openai"; 2 | 3 | export const run = async () => { 4 | const model = new OpenAIChat({ 5 | prefixMessages: [ 6 | { 7 | role: "system", 8 | content: "You are a helpful assistant that answers in pirate language", 9 | }, 10 | ], 11 | maxTokens: 50, 12 | }); 13 | const res = await model.call( 14 | "What would be a good company name a company that makes colorful socks?" 15 | ); 16 | console.log({ res }); 17 | }; 18 | -------------------------------------------------------------------------------- /examples/src/llms/openai.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | 3 | export const run = async () => { 4 | const model = new OpenAI({ 5 | modelName: "gpt-4", 6 | temperature: 0.7, 7 | verbose: true, 8 | maxTokens: 1000, 9 | maxRetries: 5, 10 | }); 11 | const res = await model.call( 12 | "Question: What would be a good company name a company that makes colorful socks?\nAnswer:" 13 | ); 14 | console.log({ res }); 15 | }; 16 | -------------------------------------------------------------------------------- /examples/src/llms/replicate.ts: -------------------------------------------------------------------------------- 1 | import { Replicate } from "langchain/llms/replicate"; 2 | 3 | export const run = async () => { 4 | const model = new Replicate({ 5 | model: 6 | "replicate/flan-t5-xl:3ae0799123a1fe11f8c89fd99632f843fc5f7a761630160521c4253149754523", 7 | }); 8 | const res = await model.call( 9 | "Question: What would be a good company name a company that makes colorful socks?\nAnswer:" 10 | ); 11 | console.log({ res }); 12 | }; 13 | -------------------------------------------------------------------------------- /examples/src/memory/buffer.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import { BufferMemory } from "langchain/memory"; 3 | import { LLMChain } from "langchain/chains"; 4 | import { PromptTemplate } from "langchain/prompts"; 5 | 6 | export const run = async () => { 7 | const memory = new BufferMemory({ memoryKey: "chat_history" }); 8 | const model = new OpenAI({ temperature: 0.9 }); 9 | const template = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. 10 | 11 | Current conversation: 12 | {chat_history} 13 | Human: {input} 14 | AI:`; 15 | 16 | const prompt = PromptTemplate.fromTemplate(template); 17 | const chain = new LLMChain({ llm: model, prompt, memory }); 18 | const res1 = await chain.call({ input: "Hi! I'm Jim." }); 19 | console.log({ res1 }); 20 | const res2 = await chain.call({ input: "What's my name?" }); 21 | console.log({ res2 }); 22 | }; 23 | -------------------------------------------------------------------------------- /examples/src/memory/buffer_window.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import { BufferWindowMemory } from "langchain/memory"; 3 | import { LLMChain } from "langchain/chains"; 4 | import { PromptTemplate } from "langchain/prompts"; 5 | 6 | export const run = async () => { 7 | const memory = new BufferWindowMemory({ memoryKey: "chat_history", k: 1 }); 8 | const model = new OpenAI({ temperature: 0.9 }); 9 | const template = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. 10 | 11 | Current conversation: 12 | {chat_history} 13 | Human: {input} 14 | AI:`; 15 | 16 | const prompt = PromptTemplate.fromTemplate(template); 17 | const chain = new LLMChain({ llm: model, prompt, memory }); 18 | const res1 = await chain.call({ input: "Hi! I'm Jim." }); 19 | console.log({ res1 }); 20 | const res2 = await chain.call({ input: "What's my name?" }); 21 | console.log({ res2 }); 22 | }; 23 | -------------------------------------------------------------------------------- /examples/src/models/chat/chat_quick_start.ts: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain/chat_models/openai"; 2 | import { HumanChatMessage } from "langchain/schema"; 3 | 4 | export const run = async () => { 5 | const chat = new ChatOpenAI(); 6 | // Pass in a list of messages to `call` to start a conversation. In this simple example, we only pass in one message. 7 | const response = await chat.call([ 8 | new HumanChatMessage( 9 | "What is a good name for a company that makes colorful socks?" 10 | ), 11 | ]); 12 | console.log(response); 13 | // AIChatMessage { text: '\n\nRainbow Sox Co.' } 14 | }; 15 | -------------------------------------------------------------------------------- /examples/src/models/chat/chat_streaming.ts: -------------------------------------------------------------------------------- 1 | import { CallbackManager } from "langchain/callbacks"; 2 | import { ChatOpenAI } from "langchain/chat_models/openai"; 3 | import { HumanChatMessage } from "langchain/schema"; 4 | 5 | export const run = async () => { 6 | const chat = new ChatOpenAI({ 7 | maxTokens: 25, 8 | streaming: true, 9 | callbackManager: CallbackManager.fromHandlers({ 10 | async handleLLMNewToken(token: string) { 11 | console.log({ token }); 12 | }, 13 | }), 14 | }); 15 | 16 | const response = await chat.call([new HumanChatMessage("Tell me a joke.")]); 17 | 18 | console.log(response); 19 | // { token: '' } 20 | // { token: '\n\n' } 21 | // { token: 'Why' } 22 | // { token: ' don' } 23 | // { token: "'t" } 24 | // { token: ' scientists' } 25 | // { token: ' trust' } 26 | // { token: ' atoms' } 27 | // { token: '?\n\n' } 28 | // { token: 'Because' } 29 | // { token: ' they' } 30 | // { token: ' make' } 31 | // { token: ' up' } 32 | // { token: ' everything' } 33 | // { token: '.' } 34 | // { token: '' } 35 | // AIChatMessage { 36 | // text: "\n\nWhy don't scientists trust atoms?\n\nBecause they make up everything." 37 | // } 38 | }; 39 | -------------------------------------------------------------------------------- /examples/src/models/chat/chat_streaming_stdout.ts: -------------------------------------------------------------------------------- 1 | import { CallbackManager } from "langchain/callbacks"; 2 | import { ChatOpenAI } from "langchain/chat_models"; 3 | import { HumanChatMessage } from "langchain/schema"; 4 | 5 | export const run = async () => { 6 | const chat = new ChatOpenAI({ 7 | streaming: true, 8 | callbackManager: CallbackManager.fromHandlers({ 9 | async handleLLMNewToken(token: string) { 10 | process.stdout.write(token); 11 | }, 12 | }), 13 | }); 14 | 15 | await chat.call([ 16 | new HumanChatMessage("Write me a song about sparkling water."), 17 | ]); 18 | /* 19 | Verse 1: 20 | Bubbles rise, crisp and clear 21 | Refreshing taste that brings us cheer 22 | Sparkling water, so light and pure 23 | Quenches our thirst, it's always secure 24 | 25 | Chorus: 26 | Sparkling water, oh how we love 27 | Its fizzy bubbles and grace above 28 | It's the perfect drink, anytime, anyplace 29 | Refreshing as it gives us a taste 30 | 31 | Verse 2: 32 | From morning brunch to evening feast 33 | It's the perfect drink for a treat 34 | A sip of it brings a smile so bright 35 | Our thirst is quenched in just one sip so light 36 | ... 37 | */ 38 | }; 39 | -------------------------------------------------------------------------------- /examples/src/models/chat/chat_timeout.ts: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain/chat_models/openai"; 2 | import { HumanChatMessage } from "langchain/schema"; 3 | 4 | export const run = async () => { 5 | const chat = new ChatOpenAI( 6 | { temperature: 1, timeout: 1000 } // 1s timeout 7 | ); 8 | 9 | const response = await chat.call([ 10 | new HumanChatMessage( 11 | "What is a good name for a company that makes colorful socks?" 12 | ), 13 | ]); 14 | console.log(response); 15 | // AIChatMessage { text: '\n\nRainbow Sox Co.' } 16 | }; 17 | -------------------------------------------------------------------------------- /examples/src/models/embeddings/cohere.ts: -------------------------------------------------------------------------------- 1 | import { CohereEmbeddings } from "langchain/embeddings/cohere"; 2 | 3 | export const run = async () => { 4 | /* Embed queries */ 5 | const embeddings = new CohereEmbeddings(); 6 | const res = await embeddings.embedQuery("Hello world"); 7 | console.log(res); 8 | /* Embed documents */ 9 | const documentRes = await embeddings.embedDocuments([ 10 | "Hello world", 11 | "Bye bye", 12 | ]); 13 | console.log({ documentRes }); 14 | }; 15 | -------------------------------------------------------------------------------- /examples/src/models/embeddings/openai.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 2 | 3 | export const run = async () => { 4 | /* Embed queries */ 5 | const embeddings = new OpenAIEmbeddings(); 6 | const res = await embeddings.embedQuery("Hello world"); 7 | console.log(res); 8 | /* Embed documents */ 9 | const documentRes = await embeddings.embedDocuments([ 10 | "Hello world", 11 | "Bye bye", 12 | ]); 13 | console.log({ documentRes }); 14 | }; 15 | -------------------------------------------------------------------------------- /examples/src/models/embeddings/openai_timeout.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 2 | 3 | export const run = async () => { 4 | const embeddings = new OpenAIEmbeddings({ 5 | timeout: 1000, // 1s timeout 6 | }); 7 | /* Embed queries */ 8 | const res = await embeddings.embedQuery("Hello world"); 9 | console.log(res); 10 | /* Embed documents */ 11 | const documentRes = await embeddings.embedDocuments([ 12 | "Hello world", 13 | "Bye bye", 14 | ]); 15 | console.log({ documentRes }); 16 | }; 17 | -------------------------------------------------------------------------------- /examples/src/models/llm/llm_promptlayer.ts: -------------------------------------------------------------------------------- 1 | import { PromptLayerOpenAI } from "langchain/llms/openai"; 2 | 3 | export const run = async () => { 4 | const model = new PromptLayerOpenAI({ temperature: 0.9 }); 5 | const res = await model.call( 6 | "What would be a good company name a company that makes colorful socks?" 7 | ); 8 | console.log({ res }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/models/llm/llm_quick_start.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | 3 | export const run = async () => { 4 | const model = new OpenAI(); 5 | // `call` is a simple string-in, string-out method for interacting with the model. 6 | const resA = await model.call( 7 | "What would be a good company name a company that makes colorful socks?" 8 | ); 9 | console.log({ resA }); 10 | // { resA: '\n\nSocktastic Colors' } 11 | }; 12 | -------------------------------------------------------------------------------- /examples/src/models/llm/llm_timeout.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | 3 | export const run = async () => { 4 | const model = new OpenAI( 5 | { temperature: 1, timeout: 1000 } // 1s timeout 6 | ); 7 | 8 | const resA = await model.call( 9 | "What would be a good company name a company that makes colorful socks?" 10 | ); 11 | 12 | console.log({ resA }); 13 | // '\n\nSocktastic Colors' } 14 | }; 15 | -------------------------------------------------------------------------------- /examples/src/models/llm/llm_with_tracing.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain/llms/openai"; 2 | import { ChatOpenAI } from "langchain/chat_models/openai"; 3 | import { SystemChatMessage, HumanChatMessage } from "langchain/schema"; 4 | import * as process from "process"; 5 | 6 | export const run = async () => { 7 | process.env.LANGCHAIN_HANDLER = "langchain"; 8 | const model = new OpenAI({ temperature: 0.9 }); 9 | const resA = await model.call( 10 | "What would be a good company name a company that makes colorful socks?" 11 | ); 12 | console.log({ resA }); 13 | 14 | const chat = new ChatOpenAI({ temperature: 0 }); 15 | const system_message = new SystemChatMessage("You are to chat with a user."); 16 | const message = new HumanChatMessage("Hello!"); 17 | const resB = await chat.call([system_message, message]); 18 | console.log({ resB }); 19 | }; 20 | -------------------------------------------------------------------------------- /examples/src/prompts/load_from_hub.ts: -------------------------------------------------------------------------------- 1 | import { loadPrompt } from "langchain/prompts/load"; 2 | 3 | export const run = async () => { 4 | const prompt = await loadPrompt("lc://prompts/hello-world/prompt.yaml"); 5 | const res = await prompt.format({}); 6 | console.log({ res }); 7 | }; 8 | -------------------------------------------------------------------------------- /examples/src/retrievers/chatgpt-plugin.ts: -------------------------------------------------------------------------------- 1 | import { ChatGPTPluginRetriever } from "langchain/retrievers/remote"; 2 | 3 | export const run = async () => { 4 | const retriever = new ChatGPTPluginRetriever({ 5 | url: "http://0.0.0.0:8000", 6 | auth: { 7 | bearer: "super-secret-jwt-token-with-at-least-32-characters-long", 8 | }, 9 | }); 10 | 11 | const docs = await retriever.getRelevantDocuments("hello world"); 12 | 13 | console.log(docs); 14 | }; 15 | -------------------------------------------------------------------------------- /examples/src/retrievers/databerry.ts: -------------------------------------------------------------------------------- 1 | import { DataberryRetriever } from "langchain/retrievers/databerry"; 2 | 3 | export const run = async () => { 4 | const retriever = new DataberryRetriever({ 5 | datastoreUrl: "https://api.databerry.ai/query/clg1xg2h80000l708dymr0fxc", 6 | apiKey: "DATABERRY_API_KEY", // optional: needed for private datastores 7 | topK: 8, // optional: default value is 3 8 | }); 9 | 10 | const docs = await retriever.getRelevantDocuments("hello"); 11 | 12 | console.log(docs); 13 | }; 14 | -------------------------------------------------------------------------------- /examples/src/retrievers/metal.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable @typescript-eslint/no-non-null-assertion */ 2 | import Metal from "@getmetal/metal-sdk"; 3 | import { MetalRetriever } from "langchain/retrievers/metal"; 4 | 5 | export const run = async () => { 6 | const MetalSDK = Metal.default; 7 | 8 | const client = new MetalSDK( 9 | process.env.METAL_API_KEY!, 10 | process.env.METAL_CLIENT_ID!, 11 | process.env.METAL_APP_ID 12 | ); 13 | const retriever = new MetalRetriever({ client }); 14 | 15 | const docs = await retriever.getRelevantDocuments("hello"); 16 | 17 | console.log(docs); 18 | }; 19 | -------------------------------------------------------------------------------- /examples/src/retrievers/supabase_hybrid.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIEmbeddings } from "langchain/embeddings/openai"; 2 | import { createClient } from "@supabase/supabase-js"; 3 | import { SupabaseHybridSearch } from "langchain/retrievers/supabase"; 4 | 5 | export const run = async () => { 6 | const client = createClient( 7 | process.env.SUPABASE_URL || "", 8 | process.env.SUPABASE_PRIVATE_KEY || "" 9 | ); 10 | 11 | const embeddings = new OpenAIEmbeddings(); 12 | 13 | const retriever = new SupabaseHybridSearch(embeddings, { 14 | client, 15 | // Below are the defaults, expecting that you set up your supabase table and functions according to the guide above. Please change if necessary. 16 | similarityK: 2, 17 | keywordK: 2, 18 | tableName: "documents", 19 | similarityQueryName: "match_documents", 20 | keywordQueryName: "kw_match_documents", 21 | }); 22 | 23 | const results = await retriever.getRelevantDocuments("hello bye"); 24 | 25 | console.log(results); 26 | }; 27 | -------------------------------------------------------------------------------- /examples/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "@tsconfig/recommended", 3 | "compilerOptions": { 4 | "outDir": "dist", 5 | "lib": [ 6 | "ES2021", 7 | "ES2022.Object", 8 | "DOM" 9 | ], 10 | "target": "ES2021", 11 | "module": "nodenext", 12 | "sourceMap": true, 13 | "allowSyntheticDefaultImports": true, 14 | "baseUrl": "./src", 15 | "declaration": true, 16 | "experimentalDecorators": true, 17 | "noImplicitReturns": true, 18 | "noFallthroughCasesInSwitch": true, 19 | "noUnusedLocals": true, 20 | "noUnusedParameters": true, 21 | "useDefineForClassFields": true, 22 | "strictPropertyInitialization": false 23 | }, 24 | "exclude": [ 25 | "node_modules/", 26 | "dist/", 27 | "tests/" 28 | ], 29 | "include": [ 30 | "./src" 31 | ] 32 | } 33 | -------------------------------------------------------------------------------- /langchain/.env.example: -------------------------------------------------------------------------------- 1 | ANTHROPIC_API_KEY=ADD_YOURS_HERE 2 | COHERE_API_KEY=ADD_YOURS_HERE 3 | HUGGINGFACEHUB_API_KEY=ADD_YOURS_HERE 4 | OPENAI_API_KEY=ADD_YOURS_HERE 5 | OPENSEARCH_URL=http://127.0.0.1:9200 6 | PINECONE_API_KEY=ADD_YOURS_HERE 7 | PINECONE_ENVIRONMENT=ADD_YOURS_HERE 8 | PINECONE_INDEX=ADD_YOURS_HERE 9 | SERPAPI_API_KEY=ADD_YOURS_HERE 10 | SERPER_API_KEY=ADD_YOURS_HERE 11 | SUPABASE_PRIVATE_KEY=ADD_YOURS_HERE 12 | SUPABASE_URL=ADD_YOURS_HERE 13 | ZAPIER_NLA_API_KEY=ADD_YOURS_HERE 14 | ANTHROPIC_API_KEY=ADD_YOURS_HERE 15 | REPLICATE_API_KEY=ADD_YOURS_HERE 16 | MONGO_URI=ADD_YOURS_HERE 17 | MILVUS_URL=ADD_YOURS_HERE 18 | WEAVIATE_HOST=ADD_YOURS_HERE 19 | WEAVIATE_SCHEME=ADD_YOURS_HERE 20 | WEAVIATE_API_KEY=ADD_YOURS_HERE 21 | -------------------------------------------------------------------------------- /langchain/.release-it.json: -------------------------------------------------------------------------------- 1 | { 2 | "github": { 3 | "release": true, 4 | "autoGenerate": true, 5 | "tokenRef": "GITHUB_TOKEN_RELEASE" 6 | }, 7 | "npm": { 8 | "versionArgs": [ 9 | "--workspaces-update=false" 10 | ] 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /langchain/babel.config.cjs: -------------------------------------------------------------------------------- 1 | // babel.config.js 2 | module.exports = { 3 | presets: [["@babel/preset-env", { targets: { node: true } }]], 4 | }; 5 | -------------------------------------------------------------------------------- /langchain/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | langchain-frontend: 4 | platform: linux/amd64 5 | image: notlangchain/langchainplus-frontend:latest 6 | ports: 7 | - 4173:4173 8 | environment: 9 | - BACKEND_URL=http://langchain-backend:8000 10 | - PUBLIC_BASE_URL=http://localhost:8000 11 | - PUBLIC_DEV_MODE=true 12 | depends_on: 13 | - langchain-backend 14 | langchain-backend: 15 | platform: linux/amd64 16 | image: notlangchain/langchainplus:latest 17 | environment: 18 | - PORT=8000 19 | - LANGCHAIN_ENV=local 20 | ports: 21 | - 8000:8000 22 | depends_on: 23 | - langchain-db 24 | langchain-db: 25 | image: postgres:14.1 26 | environment: 27 | - POSTGRES_PASSWORD=postgres 28 | - POSTGRES_USER=postgres 29 | - POSTGRES_DB=postgres 30 | ports: 31 | - 5432:5432 32 | -------------------------------------------------------------------------------- /langchain/jest.config.cjs: -------------------------------------------------------------------------------- 1 | /** @type {import('ts-jest').JestConfigWithTsJest} */ 2 | module.exports = { 3 | preset: "ts-jest/presets/default-esm", 4 | testEnvironment: "node", 5 | modulePathIgnorePatterns: ["dist/", "docs/"], 6 | moduleNameMapper: { 7 | "^(\\.{1,2}/.*)\\.js$": "$1", 8 | }, 9 | transform: { 10 | "^.+\\.m?[tj]sx?$": ["ts-jest", { useESM: true }], 11 | }, 12 | setupFiles: ["dotenv/config"], 13 | testTimeout: 20_000, 14 | }; 15 | -------------------------------------------------------------------------------- /langchain/src/agents/agent_toolkits/base.ts: -------------------------------------------------------------------------------- 1 | import { Tool } from "../../tools/base.js"; 2 | 3 | export abstract class Toolkit { 4 | abstract tools: Tool[]; 5 | } 6 | -------------------------------------------------------------------------------- /langchain/src/agents/agent_toolkits/index.ts: -------------------------------------------------------------------------------- 1 | export { JsonToolkit, createJsonAgent } from "./json/json.js"; 2 | export { SqlToolkit, createSqlAgent } from "./sql/sql.js"; 3 | export { 4 | RequestsToolkit, 5 | OpenApiToolkit, 6 | createOpenApiAgent, 7 | } from "./openapi/openapi.js"; 8 | export { 9 | VectorStoreInfo, 10 | VectorStoreToolkit, 11 | VectorStoreRouterToolkit, 12 | createVectorStoreAgent, 13 | createVectorStoreRouterAgent, 14 | } from "./vectorstore/vectorstore.js"; 15 | export { ZapierToolKit } from "./zapier/zapier.js"; 16 | -------------------------------------------------------------------------------- /langchain/src/agents/agent_toolkits/vectorstore/prompt.ts: -------------------------------------------------------------------------------- 1 | export const VECTOR_PREFIX = `You are an agent designed to answer questions about sets of documents. 2 | You have access to tools for interacting with the documents, and the inputs to the tools are questions. 3 | Sometimes, you will be asked to provide sources for your questions, in which case you should use the appropriate tool to do so. 4 | If the question does not seem relevant to any of the tools provided, just return "I don't know" as the answer.`; 5 | 6 | export const VECTOR_ROUTER_PREFIX = `You are an agent designed to answer questions. 7 | You have access to tools for interacting with different sources, and the inputs to the tools are questions. 8 | Your main task is to decide which of the tools is relevant for answering question at hand. 9 | For complex questions, you can break the question down into sub questions and use tools to answers the sub questions.`; 10 | -------------------------------------------------------------------------------- /langchain/src/agents/agent_toolkits/zapier/zapier.ts: -------------------------------------------------------------------------------- 1 | import { Toolkit } from "../base.js"; 2 | import { Tool } from "../../../tools/base.js"; 3 | import { ZapierNLARunAction, ZapierNLAWrapper } from "../../../tools/zapier.js"; 4 | 5 | export class ZapierToolKit extends Toolkit { 6 | tools: Tool[] = []; 7 | 8 | static async fromZapierNLAWrapper( 9 | zapierNLAWrapper: ZapierNLAWrapper 10 | ): Promise { 11 | const toolkit = new ZapierToolKit(); 12 | const actions = await zapierNLAWrapper.listActions(); 13 | for (const action of actions) { 14 | const tool = new ZapierNLARunAction( 15 | zapierNLAWrapper, 16 | action.id, 17 | action.description, 18 | action.params 19 | ); 20 | toolkit.tools.push(tool); 21 | } 22 | return toolkit; 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /langchain/src/agents/chat/prompt.ts: -------------------------------------------------------------------------------- 1 | export const PREFIX = `Answer the following questions as best you can. You have access to the following tools:`; 2 | export const FORMAT_INSTRUCTIONS = `The way you use the tools is by specifying a json blob, denoted below by $JSON_BLOB 3 | Specifically, this $JSON_BLOB should have a "action" key (with the name of the tool to use) and a "action_input" key (with the input to the tool going here). 4 | The $JSON_BLOB should only contain a SINGLE action, do NOT return a list of multiple actions. Here is an example of a valid $JSON_BLOB: 5 | 6 | \`\`\` 7 | {{ 8 | "action": "calculator", 9 | "action_input": "1 + 2" 10 | }} 11 | \`\`\` 12 | 13 | ALWAYS use the following format: 14 | 15 | Question: the input question you must answer 16 | Thought: you should always think about what to do 17 | Action: 18 | \`\`\` 19 | $JSON_BLOB 20 | \`\`\` 21 | Observation: the result of the action 22 | ... (this Thought/Action/Observation can repeat N times) 23 | Thought: I now know the final answer 24 | Final Answer: the final answer to the original input question`; 25 | export const SUFFIX = `Begin! Reminder to always use the exact characters \`Final Answer\` when responding.`; 26 | -------------------------------------------------------------------------------- /langchain/src/agents/helpers.ts: -------------------------------------------------------------------------------- 1 | import type { SerializedAgentT, AgentInput } from "./types.js"; 2 | import { Tool } from "../tools/base.js"; 3 | import { LLMChain } from "../chains/llm_chain.js"; 4 | import { BaseLanguageModel } from "../base_language/index.js"; 5 | 6 | export const deserializeHelper = async < 7 | T extends string, 8 | U extends Record, 9 | V extends AgentInput, 10 | Z 11 | >( 12 | llm: BaseLanguageModel | undefined, 13 | tools: Tool[] | undefined, 14 | data: SerializedAgentT, 15 | fromLLMAndTools: (llm: BaseLanguageModel, tools: Tool[], args: U) => Z, 16 | fromConstructor: (args: V) => Z 17 | ): Promise => { 18 | if (data.load_from_llm_and_tools) { 19 | if (!llm) { 20 | throw new Error("Loading from llm and tools, llm must be provided."); 21 | } 22 | 23 | if (!tools) { 24 | throw new Error("Loading from llm and tools, tools must be provided."); 25 | } 26 | 27 | return fromLLMAndTools(llm, tools, data); 28 | } 29 | if (!data.llm_chain) { 30 | throw new Error("Loading from constructor, llm_chain must be provided."); 31 | } 32 | 33 | const llmChain = await LLMChain.deserialize(data.llm_chain); 34 | return fromConstructor({ ...data, llmChain }); 35 | }; 36 | -------------------------------------------------------------------------------- /langchain/src/agents/index.ts: -------------------------------------------------------------------------------- 1 | export { 2 | StoppingMethod, 3 | SerializedAgentT, 4 | AgentInput, 5 | SerializedZeroShotAgent, 6 | SerializedAgent, 7 | AgentActionOutputParser, 8 | } from "./types.js"; 9 | export { Agent, BaseSingleActionAgent, LLMSingleActionAgent } from "./agent.js"; 10 | export { AgentExecutor } from "./executor.js"; 11 | export { ZeroShotAgent } from "./mrkl/index.js"; 12 | export { ChatAgent } from "./chat/index.js"; 13 | export { 14 | ChatConversationalAgent, 15 | ChatConversationalAgentOutputParser, 16 | ChatConversationalAgentInput, 17 | } from "./chat_convo/index.js"; 18 | export { Tool } from "../tools/base.js"; 19 | export { initializeAgentExecutor } from "./initialize.js"; 20 | 21 | export { 22 | SqlToolkit, 23 | JsonToolkit, 24 | RequestsToolkit, 25 | OpenApiToolkit, 26 | VectorStoreInfo, 27 | VectorStoreToolkit, 28 | VectorStoreRouterToolkit, 29 | ZapierToolKit, 30 | createSqlAgent, 31 | createJsonAgent, 32 | createOpenApiAgent, 33 | createVectorStoreAgent, 34 | } from "./agent_toolkits/index.js"; 35 | -------------------------------------------------------------------------------- /langchain/src/agents/load.ts: -------------------------------------------------------------------------------- 1 | import { Agent } from "./agent.js"; 2 | import { Tool } from "../tools/base.js"; 3 | import { BaseLanguageModel } from "../base_language/index.js"; 4 | import { loadFromHub } from "../util/hub.js"; 5 | import { FileLoader, loadFromFile } from "../util/load.js"; 6 | import { parseFileConfig } from "../util/parse.js"; 7 | 8 | const loadAgentFromFile: FileLoader = async ( 9 | file: string, 10 | path: string, 11 | llmAndTools?: { llm?: BaseLanguageModel; tools?: Tool[] } 12 | ) => { 13 | const serialized = parseFileConfig(file, path); 14 | return Agent.deserialize({ ...serialized, ...llmAndTools }); 15 | }; 16 | 17 | export const loadAgent = async ( 18 | uri: string, 19 | llmAndTools?: { llm?: BaseLanguageModel; tools?: Tool[] } 20 | ): Promise => { 21 | const hubResult = await loadFromHub( 22 | uri, 23 | loadAgentFromFile, 24 | "agents", 25 | new Set(["json", "yaml"]), 26 | llmAndTools 27 | ); 28 | if (hubResult) { 29 | return hubResult; 30 | } 31 | 32 | return loadFromFile(uri, loadAgentFromFile, llmAndTools); 33 | }; 34 | -------------------------------------------------------------------------------- /langchain/src/agents/mrkl/prompt.ts: -------------------------------------------------------------------------------- 1 | export const PREFIX = `Answer the following questions as best you can. You have access to the following tools:`; 2 | export const formatInstructions = ( 3 | toolNames: string 4 | ) => `Use the following format: 5 | 6 | Question: the input question you must answer 7 | Thought: you should always think about what to do 8 | Action: the action to take, should be one of [${toolNames}] 9 | Action Input: the input to the action 10 | Observation: the result of the action 11 | ... (this Thought/Action/Action Input/Observation can repeat N times) 12 | Thought: I now know the final answer 13 | Final Answer: the final answer to the original input question`; 14 | export const SUFFIX = `Begin! 15 | 16 | Question: {input} 17 | Thought:{agent_scratchpad}`; 18 | -------------------------------------------------------------------------------- /langchain/src/agents/tests/calculator.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { Calculator } from "../../tools/calculator.js"; 3 | 4 | test("Calculator tool, sum", async () => { 5 | const calculator = new Calculator(); 6 | const result = await calculator.call("1 + 1"); 7 | expect(result).toBe("2"); 8 | }); 9 | 10 | test("Calculator tool, product", async () => { 11 | const calculator = new Calculator(); 12 | const result = await calculator.call("2 * 3"); 13 | expect(result).toBe("6"); 14 | }); 15 | 16 | test("Calculator tool, division", async () => { 17 | const calculator = new Calculator(); 18 | const result = await calculator.call("7 /2"); 19 | expect(result).toBe("3.5"); 20 | }); 21 | 22 | test("Calculator tool, exponentiation", async () => { 23 | const calculator = new Calculator(); 24 | const result = await calculator.call("2 ^ 8"); 25 | expect(result).toBe("256"); 26 | }); 27 | 28 | test("Calculator tool, complicated expression", async () => { 29 | const calculator = new Calculator(); 30 | const result = await calculator.call("((2 + 3) * 4) / 2"); 31 | expect(result).toBe("10"); 32 | }); 33 | -------------------------------------------------------------------------------- /langchain/src/callbacks/index.ts: -------------------------------------------------------------------------------- 1 | export { 2 | CallbackManager, 3 | ConsoleCallbackHandler, 4 | BaseCallbackHandler, 5 | } from "./base.js"; 6 | 7 | export { LangChainTracer } from "./tracers.js"; 8 | 9 | export { getCallbackManager, setTracerSession } from "./utils.js"; 10 | -------------------------------------------------------------------------------- /langchain/src/callbacks/stream.ts: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/langchain/src/callbacks/stream.ts -------------------------------------------------------------------------------- /langchain/src/chains/index.ts: -------------------------------------------------------------------------------- 1 | export { BaseChain, ChainInputs } from "./base.js"; 2 | export { LLMChain, ConversationChain } from "./llm_chain.js"; 3 | export { 4 | StuffDocumentsChain, 5 | MapReduceDocumentsChain, 6 | RefineDocumentsChain, 7 | } from "./combine_docs_chain.js"; 8 | export { ChatVectorDBQAChain } from "./chat_vector_db_chain.js"; 9 | export { AnalyzeDocumentChain } from "./analyze_documents_chain.js"; 10 | export { VectorDBQAChain } from "./vector_db_qa.js"; 11 | export { 12 | loadQAChain, 13 | loadQAStuffChain, 14 | loadQAMapReduceChain, 15 | loadQARefineChain, 16 | } from "./question_answering/load.js"; 17 | export { loadSummarizationChain } from "./summarization/load.js"; 18 | export { SqlDatabaseChain } from "./sql_db/sql_db_chain.js"; 19 | export { ConversationalRetrievalQAChain } from "./conversational_retrieval_chain.js"; 20 | export { RetrievalQAChain } from "./retrieval_qa.js"; 21 | export { 22 | SerializedLLMChain, 23 | SerializedSqlDatabaseChain, 24 | SerializedAnalyzeDocumentChain, 25 | SerializedBaseChain, 26 | SerializedChatVectorDBQAChain, 27 | SerializedMapReduceDocumentsChain, 28 | SerializedStuffDocumentsChain, 29 | SerializedVectorDBQAChain, 30 | SerializedRefineDocumentsChain, 31 | } from "./serde.js"; 32 | -------------------------------------------------------------------------------- /langchain/src/chains/summarization/stuff_prompts.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable tree-shaking/no-side-effects-in-initialization */ 2 | /* eslint-disable spaced-comment */ 3 | import { PromptTemplate } from "../../prompts/prompt.js"; 4 | 5 | const template = `Write a concise summary of the following: 6 | 7 | 8 | "{text}" 9 | 10 | 11 | CONCISE SUMMARY:`; 12 | 13 | export const DEFAULT_PROMPT = /*#__PURE__*/ new PromptTemplate({ 14 | template, 15 | inputVariables: ["text"], 16 | }); 17 | -------------------------------------------------------------------------------- /langchain/src/chains/summarization/tests/load.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test } from "@jest/globals"; 2 | import { OpenAI } from "../../../llms/openai.js"; 3 | import { loadSummarizationChain } from "../load.js"; 4 | import { Document } from "../../../document.js"; 5 | 6 | test("Test loadSummzationChain", async () => { 7 | const model = new OpenAI({ modelName: "text-ada-001" }); 8 | const chain = loadSummarizationChain(model); 9 | const docs = [ 10 | new Document({ pageContent: "foo" }), 11 | new Document({ pageContent: "bar" }), 12 | new Document({ pageContent: "baz" }), 13 | ]; 14 | const res = await chain.call({ input_documents: docs, question: "Whats up" }); 15 | console.log({ res }); 16 | }); 17 | 18 | test("Test loadSummarizationChain map_reduce", async () => { 19 | const model = new OpenAI({ modelName: "text-ada-001" }); 20 | const chain = loadSummarizationChain(model, { type: "map_reduce" }); 21 | const docs = [ 22 | new Document({ pageContent: "foo" }), 23 | new Document({ pageContent: "bar" }), 24 | new Document({ pageContent: "baz" }), 25 | ]; 26 | const res = await chain.call({ input_documents: docs, question: "Whats up" }); 27 | console.log({ res }); 28 | }); 29 | -------------------------------------------------------------------------------- /langchain/src/chat_models/index.ts: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line tree-shaking/no-side-effects-in-initialization 2 | /* #__PURE__ */ console.error( 3 | "[WARN] Importing from 'langchain/chat_models' is deprecated. Import from eg. 'langchain/chat_models/openai' instead. See https://js.langchain.com/docs/getting-started/install#updating-from-0052 for upgrade instructions." 4 | ); 5 | 6 | export { BaseChatModel, BaseChatModelParams, SimpleChatModel } from "./base.js"; 7 | export { ChatOpenAI } from "./openai.js"; 8 | export { ChatAnthropic } from "./anthropic.js"; 9 | -------------------------------------------------------------------------------- /langchain/src/docstore/base.ts: -------------------------------------------------------------------------------- 1 | import { Document } from "../document.js"; 2 | 3 | export abstract class Docstore { 4 | abstract search(search: string): Document | string; 5 | 6 | abstract add(texts: Record): void; 7 | } 8 | -------------------------------------------------------------------------------- /langchain/src/docstore/in_memory.ts: -------------------------------------------------------------------------------- 1 | import { Document } from "../document.js"; 2 | import { Docstore } from "./base.js"; 3 | 4 | export class InMemoryDocstore extends Docstore { 5 | _docs: Map; 6 | 7 | constructor(docs?: Map) { 8 | super(); 9 | this._docs = docs ?? new Map(); 10 | } 11 | 12 | /** Method for getting count of documents in _docs */ 13 | get count() { 14 | return this._docs.size; 15 | } 16 | 17 | search(search: string): Document | string { 18 | return this._docs.get(search) ?? `ID ${search} not found.`; 19 | } 20 | 21 | add(texts: Record): void { 22 | const keys = [...this._docs.keys()]; 23 | const overlapping = Object.keys(texts).filter((x) => keys.includes(x)); 24 | 25 | if (overlapping.length > 0) { 26 | throw new Error(`Tried to add ids that already exist: ${overlapping}`); 27 | } 28 | 29 | for (const [key, value] of Object.entries(texts)) { 30 | this._docs.set(key, value); 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /langchain/src/docstore/index.ts: -------------------------------------------------------------------------------- 1 | export { Document } from "../document.js"; 2 | export { Docstore } from "./base.js"; 3 | export { InMemoryDocstore } from "./in_memory.js"; 4 | -------------------------------------------------------------------------------- /langchain/src/document.ts: -------------------------------------------------------------------------------- 1 | export interface DocumentParams< 2 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 3 | Metadata extends Record = Record 4 | > { 5 | pageContent: string; 6 | 7 | metadata: Metadata; 8 | } 9 | 10 | /** 11 | * Interface for interacting with a document. 12 | */ 13 | export class Document< 14 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 15 | Metadata extends Record = Record 16 | > implements DocumentParams 17 | { 18 | pageContent: string; 19 | 20 | metadata: Metadata; 21 | 22 | constructor(fields?: Partial>) { 23 | this.pageContent = fields?.pageContent 24 | ? fields.pageContent.toString() 25 | : this.pageContent; 26 | this.metadata = fields?.metadata ?? ({} as Metadata); 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/base.ts: -------------------------------------------------------------------------------- 1 | import { 2 | RecursiveCharacterTextSplitter, 3 | TextSplitter, 4 | } from "../text_splitter.js"; 5 | import { Document } from "../document.js"; 6 | 7 | export interface DocumentLoader { 8 | load(): Promise; 9 | loadAndSplit(textSplitter?: TextSplitter): Promise; 10 | } 11 | 12 | export abstract class BaseDocumentLoader implements DocumentLoader { 13 | abstract load(): Promise; 14 | 15 | async loadAndSplit( 16 | splitter: TextSplitter = new RecursiveCharacterTextSplitter() 17 | ): Promise { 18 | const docs = await this.load(); 19 | return splitter.splitDocuments(docs); 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/fs/docx.ts: -------------------------------------------------------------------------------- 1 | import { Document } from "../../document.js"; 2 | import { BufferLoader } from "./buffer.js"; 3 | 4 | export class DocxLoader extends BufferLoader { 5 | constructor(filePathOrBlob: string | Blob) { 6 | super(filePathOrBlob); 7 | } 8 | 9 | public async parse( 10 | raw: Buffer, 11 | metadata: Document["metadata"] 12 | ): Promise { 13 | const { extractRawText } = await DocxLoaderImports(); 14 | const docx = await extractRawText({ 15 | buffer: raw, 16 | }); 17 | return [ 18 | new Document({ 19 | pageContent: docx.value, 20 | metadata, 21 | }), 22 | ]; 23 | } 24 | } 25 | 26 | async function DocxLoaderImports() { 27 | try { 28 | const { default: mod } = await import("mammoth"); 29 | const { extractRawText } = mod; 30 | return { extractRawText }; 31 | } catch (e) { 32 | console.error(e); 33 | throw new Error( 34 | "Failed to load mammoth. Please install it with eg. `npm install mammoth`." 35 | ); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/fs/notion.ts: -------------------------------------------------------------------------------- 1 | import { DirectoryLoader, UnknownHandling } from "./directory.js"; 2 | import { TextLoader } from "./text.js"; 3 | 4 | export class NotionLoader extends DirectoryLoader { 5 | constructor(directoryPath: string) { 6 | super( 7 | directoryPath, 8 | { 9 | ".md": (filePath) => new TextLoader(filePath), 10 | }, 11 | true, 12 | UnknownHandling.Ignore 13 | ); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/fs/srt.ts: -------------------------------------------------------------------------------- 1 | import type SRTParserT from "srt-parser-2"; 2 | import { TextLoader } from "./text.js"; 3 | 4 | export class SRTLoader extends TextLoader { 5 | constructor(filePathOrBlob: string | Blob) { 6 | super(filePathOrBlob); 7 | } 8 | 9 | protected async parse(raw: string): Promise { 10 | const { SRTParser2 } = await SRTLoaderImports(); 11 | const parser = new SRTParser2(); 12 | const srts = parser.fromSrt(raw); 13 | return [srts.map((srt) => srt.text).join(" ")]; 14 | } 15 | } 16 | 17 | async function SRTLoaderImports(): Promise<{ 18 | SRTParser2: typeof SRTParserT.default; 19 | }> { 20 | try { 21 | const SRTParser2 = (await import("srt-parser-2")).default.default; 22 | return { SRTParser2 }; 23 | } catch (e) { 24 | throw new Error( 25 | "Please install srt-parser-2 as a dependency with, e.g. `yarn add srt-parser-2`" 26 | ); 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/cheerio.test.ts: -------------------------------------------------------------------------------- 1 | import { expect, test } from "@jest/globals"; 2 | import { CheerioWebBaseLoader } from "../web/cheerio.js"; 3 | 4 | test("Test cheerio web scraper loader", async () => { 5 | const loader = new CheerioWebBaseLoader( 6 | "https://news.ycombinator.com/item?id=34817881" 7 | ); 8 | await loader.load(); 9 | }); 10 | 11 | test("Test cheerio web scraper loader with selector", async () => { 12 | const selectH1 = "h1"; 13 | const loader = new CheerioWebBaseLoader("https://about.google/commitments/", { 14 | selector: selectH1, 15 | }); 16 | 17 | const doc = await loader.load(); 18 | expect(doc[0].pageContent.trim()).toBe( 19 | "Committed to significantly improving the lives of as many people as possible." 20 | ); 21 | }); 22 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/college_confidential.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test } from "@jest/globals"; 2 | import { CollegeConfidentialLoader } from "../web/college_confidential.js"; 3 | 4 | test("Test College confidential loader", async () => { 5 | const loader = new CollegeConfidentialLoader( 6 | "https://www.collegeconfidential.com/colleges/brown-university/" 7 | ); 8 | await loader.load(); 9 | }); 10 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/docx.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import * as url from "node:url"; 3 | import * as path from "node:path"; 4 | import { DocxLoader } from "../fs/docx.js"; 5 | 6 | test("Test Word doc loader from file", async () => { 7 | const filePath = path.resolve( 8 | path.dirname(url.fileURLToPath(import.meta.url)), 9 | "./example_data/attention.docx" 10 | ); 11 | const loader = new DocxLoader(filePath); 12 | const docs = await loader.load(); 13 | 14 | expect(docs.length).toBe(1); // not much text in the example 15 | expect(docs[0].pageContent).toContain("an interesting activity"); 16 | }); 17 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/epub.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import * as url from "node:url"; 3 | import * as path from "node:path"; 4 | import { EPubLoader } from "../fs/epub.js"; 5 | 6 | test("Test EPub loader from file", async () => { 7 | const filePath = path.resolve( 8 | path.dirname(url.fileURLToPath(import.meta.url)), 9 | "./example_data/attention.epub" 10 | ); 11 | const loader = new EPubLoader(filePath); 12 | const docs = await loader.load(); 13 | 14 | expect(docs.length).toBe(3); 15 | expect(docs[0].pageContent).toContain("Attention Is All You Need"); 16 | }); 17 | 18 | test("Test EPub loader from file to single document", async () => { 19 | const filePath = path.resolve( 20 | path.dirname(url.fileURLToPath(import.meta.url)), 21 | "./example_data/attention.epub" 22 | ); 23 | const loader = new EPubLoader(filePath, { splitChapters: false }); 24 | const docs = await loader.load(); 25 | 26 | expect(docs.length).toBe(1); 27 | expect(docs[0].pageContent).toContain("Attention Is All You Need"); 28 | }); 29 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/example_data/1706.03762.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/langchain/src/document_loaders/tests/example_data/1706.03762.pdf -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/example_data/attention.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/langchain/src/document_loaders/tests/example_data/attention.docx -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/example_data/attention.epub: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/langchain/src/document_loaders/tests/example_data/attention.epub -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/example_data/complex.json: -------------------------------------------------------------------------------- 1 | { 2 | "1": { 3 | "body": "BD 2023 SUMMER", 4 | "from": "LinkedIn Job", 5 | "labels": ["IMPORTANT", "CATEGORY_UPDATES", "INBOX"] 6 | }, 7 | "2": { 8 | "body": "Intern, Treasury and other roles are available", 9 | "from": "LinkedIn Job2", 10 | "labels": ["IMPORTANT"], 11 | "other": { 12 | "name": "plop", 13 | "surname": "bob" 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/example_data/example.txt: -------------------------------------------------------------------------------- 1 | Foo 2 | Bar 3 | Baz 4 | 5 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/github.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test } from "@jest/globals"; 2 | import { GithubRepoLoader } from "../web/github.js"; 3 | 4 | test("Test GithubRepoLoader", async () => { 5 | const loader = new GithubRepoLoader( 6 | "https://github.com/hwchase17/langchainjs", 7 | { branch: "main", recursive: false, unknown: "warn" } 8 | ); 9 | const documents = await loader.load(); 10 | console.log(documents[0].pageContent); 11 | }); 12 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/hn.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test } from "@jest/globals"; 2 | import { HNLoader } from "../web/hn.js"; 3 | 4 | test("Test Hacker News loader", async () => { 5 | const loader = new HNLoader("https://news.ycombinator.com/item?id=34817881"); 6 | await loader.load(); 7 | }); 8 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/imsdb.test.ts: -------------------------------------------------------------------------------- 1 | import { test } from "@jest/globals"; 2 | import { IMSDBLoader } from "../web/imsdb.js"; 3 | 4 | test("Test IMSDB loader", async () => { 5 | const loader = new IMSDBLoader( 6 | "https://imsdb.com/scripts/BlacKkKlansman.html" 7 | ); 8 | await loader.load(); 9 | }); 10 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/jsonl.test.ts: -------------------------------------------------------------------------------- 1 | import * as url from "node:url"; 2 | import * as path from "node:path"; 3 | import { test, expect } from "@jest/globals"; 4 | import { JSONLinesLoader } from "../fs/json.js"; 5 | import { Document } from "../../document.js"; 6 | 7 | test("Test JSON loader from file", async () => { 8 | const filePath = path.resolve( 9 | path.dirname(url.fileURLToPath(import.meta.url)), 10 | "./example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.jsonl" 11 | ); 12 | const loader = new JSONLinesLoader(filePath, "/html"); 13 | const docs = await loader.load(); 14 | expect(docs.length).toBe(32); 15 | expect(docs[0]).toEqual( 16 | new Document({ 17 | metadata: { source: filePath, line: 1 }, 18 | pageContent: 19 | "Corruption discovered at the core of the Banking Clan!", 20 | }) 21 | ); 22 | }); 23 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/notion.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import * as url from "node:url"; 3 | import * as path from "node:path"; 4 | import { NotionLoader } from "../fs/notion.js"; 5 | 6 | test("Test Notion Loader", async () => { 7 | const directoryPath = path.resolve( 8 | path.dirname(url.fileURLToPath(import.meta.url)), 9 | "./example_data" 10 | ); 11 | const loader = new NotionLoader(directoryPath); 12 | const docs = await loader.load(); 13 | 14 | expect(docs.length).toBe(1); 15 | expect(docs[0].pageContent).toContain("Testing the notion markdownloader"); 16 | }); 17 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/playwright_web.int.test.ts: -------------------------------------------------------------------------------- 1 | import { expect, test } from "@jest/globals"; 2 | import { PlaywrightWebBaseLoader } from "../web/playwright.js"; 3 | 4 | test("Test playwright web scraper loader", async () => { 5 | const loader = new PlaywrightWebBaseLoader("https://www.google.com/"); 6 | const result = await loader.load(); 7 | 8 | expect(result).toBeDefined(); 9 | expect(result.length).toBe(1); 10 | }, 20_000); 11 | 12 | test("Test playwright web scraper loader with evaluate options", async () => { 13 | let nrTimesCalled = 0; 14 | const loader = new PlaywrightWebBaseLoader("https://www.google.com/", { 15 | launchOptions: { 16 | headless: true, 17 | }, 18 | gotoOptions: { 19 | waitUntil: "domcontentloaded", 20 | }, 21 | async evaluate(page) { 22 | nrTimesCalled += 1; 23 | return page.content(); 24 | }, 25 | }); 26 | const result = await loader.load(); 27 | 28 | expect(nrTimesCalled).toBe(1); 29 | expect(result).toBeDefined(); 30 | expect(result.length).toBe(1); 31 | }, 20_000); 32 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/puppeteer.int.test.ts: -------------------------------------------------------------------------------- 1 | import { expect, test } from "@jest/globals"; 2 | import { PuppeteerWebBaseLoader } from "../web/puppeteer.js"; 3 | 4 | test.skip("Test puppeteer web scraper loader", async () => { 5 | const loader = new PuppeteerWebBaseLoader("https://www.google.com/"); 6 | const result = await loader.load(); 7 | 8 | expect(result).toBeDefined(); 9 | expect(result.length).toBe(1); 10 | }, 20_000); 11 | 12 | test.skip("Test puppeteer web scraper loader with evaluate options", async () => { 13 | let nrTimesCalled = 0; 14 | const loader = new PuppeteerWebBaseLoader("https://www.google.com/", { 15 | launchOptions: { 16 | headless: true, 17 | ignoreDefaultArgs: ["--disable-extensions"], 18 | }, 19 | gotoOptions: { 20 | waitUntil: "domcontentloaded", 21 | }, 22 | async evaluate(page) { 23 | nrTimesCalled += 1; 24 | return page.evaluate(() => document.body.innerHTML); 25 | }, 26 | }); 27 | const result = await loader.load(); 28 | 29 | expect(nrTimesCalled).toBe(1); 30 | expect(result).toBeDefined(); 31 | expect(result.length).toBe(1); 32 | }, 20_000); 33 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/srt-blob.test.ts: -------------------------------------------------------------------------------- 1 | import * as url from "node:url"; 2 | import * as path from "node:path"; 3 | import * as fs from "node:fs/promises"; 4 | import { test, expect } from "@jest/globals"; 5 | import { SRTLoader } from "../fs/srt.js"; 6 | 7 | test("Test SRT loader from blob", async () => { 8 | const filePath = path.resolve( 9 | path.dirname(url.fileURLToPath(import.meta.url)), 10 | "./example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt" 11 | ); 12 | const loader = new SRTLoader( 13 | new Blob([await fs.readFile(filePath)], { type: "application/x-subrip" }) 14 | ); 15 | const docs = await loader.load(); 16 | expect(docs.length).toBe(1); 17 | expect(docs[0].metadata).toMatchInlineSnapshot(` 18 | { 19 | "blobType": "application/x-subrip", 20 | "source": "blob", 21 | } 22 | `); 23 | expect(docs[0].pageContent).toContain("Corruption discovered"); 24 | }); 25 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/srt.test.ts: -------------------------------------------------------------------------------- 1 | import * as url from "node:url"; 2 | import * as path from "node:path"; 3 | import { test, expect } from "@jest/globals"; 4 | import { SRTLoader } from "../fs/srt.js"; 5 | 6 | test("Test SRT loader from file", async () => { 7 | const filePath = path.resolve( 8 | path.dirname(url.fileURLToPath(import.meta.url)), 9 | "./example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt" 10 | ); 11 | const loader = new SRTLoader(filePath); 12 | const docs = await loader.load(); 13 | expect(docs.length).toBe(1); 14 | expect(docs[0].metadata).toMatchInlineSnapshot(` 15 | { 16 | "source": "${filePath}", 17 | } 18 | `); 19 | expect(docs[0].pageContent).toContain("Corruption discovered"); 20 | }); 21 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/text-blob.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { TextLoader } from "../fs/text.js"; 3 | 4 | test("Test Text loader from blob", async () => { 5 | const loader = new TextLoader( 6 | new Blob(["Hello, world!"], { type: "text/plain" }) 7 | ); 8 | const docs = await loader.load(); 9 | 10 | expect(docs.length).toBe(1); 11 | expect(docs[0].pageContent).toBe("Hello, world!"); 12 | expect(docs[0].metadata).toMatchInlineSnapshot(` 13 | { 14 | "blobType": "text/plain", 15 | "source": "blob", 16 | } 17 | `); 18 | }); 19 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/text.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { TextLoader } from "../fs/text.js"; 3 | 4 | test("Test Text loader from file", async () => { 5 | const loader = new TextLoader( 6 | "../examples/src/document_loaders/example_data/example.txt" 7 | ); 8 | const docs = await loader.load(); 9 | 10 | expect(docs.length).toBe(1); 11 | expect(docs[0].pageContent).toMatchInlineSnapshot(` 12 | "Foo 13 | Bar 14 | Baz 15 | 16 | " 17 | `); 18 | expect(docs[0].metadata).toMatchInlineSnapshot(` 19 | { 20 | "source": "../examples/src/document_loaders/example_data/example.txt", 21 | } 22 | `); 23 | }); 24 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/tests/unstructured.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { UnstructuredLoader } from "../fs/unstructured.js"; 3 | 4 | test.skip("Test Unstructured base loader", async () => { 5 | const loader = new UnstructuredLoader( 6 | "http://127.0.0.1:8000/general/v0.0.4/general", 7 | "langchain/src/document_loaders/tests/example_data/example.txt" 8 | ); 9 | const docs = await loader.load(); 10 | 11 | expect(docs.length).toBe(3); 12 | for (const doc of docs) { 13 | expect(typeof doc.pageContent).toBe("string"); 14 | } 15 | }); 16 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/web/college_confidential.ts: -------------------------------------------------------------------------------- 1 | import { Document } from "../../document.js"; 2 | import { CheerioWebBaseLoader } from "./cheerio.js"; 3 | 4 | export class CollegeConfidentialLoader extends CheerioWebBaseLoader { 5 | constructor(webPath: string) { 6 | super(webPath); 7 | } 8 | 9 | public async load(): Promise { 10 | const $ = await this.scrape(); 11 | const text = $("main[class='skin-handler']").text(); 12 | const metadata = { source: this.webPath }; 13 | return [new Document({ pageContent: text, metadata })]; 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /langchain/src/document_loaders/web/imsdb.ts: -------------------------------------------------------------------------------- 1 | import { Document } from "../../document.js"; 2 | import { CheerioWebBaseLoader } from "./cheerio.js"; 3 | 4 | export class IMSDBLoader extends CheerioWebBaseLoader { 5 | constructor(public webPath: string) { 6 | super(webPath); 7 | } 8 | 9 | public async load(): Promise { 10 | const $ = await this.scrape(); 11 | const text = $("td[class='scrtext']").text().trim(); 12 | const metadata = { source: this.webPath }; 13 | return [new Document({ pageContent: text, metadata })]; 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /langchain/src/embeddings/base.ts: -------------------------------------------------------------------------------- 1 | import { AsyncCaller, AsyncCallerParams } from "../util/async_caller.js"; 2 | 3 | export type EmbeddingsParams = AsyncCallerParams; 4 | 5 | export abstract class Embeddings { 6 | /** 7 | * The async caller should be used by subclasses to make any async calls, 8 | * which will thus benefit from the concurrency and retry logic. 9 | */ 10 | caller: AsyncCaller; 11 | 12 | constructor(params: EmbeddingsParams) { 13 | this.caller = new AsyncCaller(params ?? {}); 14 | } 15 | 16 | abstract embedDocuments(documents: string[]): Promise; 17 | 18 | abstract embedQuery(document: string): Promise; 19 | } 20 | -------------------------------------------------------------------------------- /langchain/src/embeddings/fake.ts: -------------------------------------------------------------------------------- 1 | import { Embeddings, EmbeddingsParams } from "./base.js"; 2 | 3 | export class FakeEmbeddings extends Embeddings { 4 | constructor(params?: EmbeddingsParams) { 5 | super(params ?? {}); 6 | } 7 | 8 | embedDocuments(documents: string[]): Promise { 9 | return Promise.resolve(documents.map(() => [0.1, 0.2, 0.3, 0.4])); 10 | } 11 | 12 | embedQuery(_: string): Promise { 13 | return Promise.resolve([0.1, 0.2, 0.3, 0.4]); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /langchain/src/embeddings/index.ts: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line tree-shaking/no-side-effects-in-initialization 2 | /* #__PURE__ */ console.error( 3 | "[WARN] Importing from 'langchain/embeddings' is deprecated. Import from eg. 'langchain/embeddings/openai' instead. See https://js.langchain.com/docs/getting-started/install#updating-from-0052 for upgrade instructions." 4 | ); 5 | 6 | export { OpenAIEmbeddings } from "./openai.js"; 7 | export { CohereEmbeddings } from "./cohere.js"; 8 | export { Embeddings } from "./base.js"; 9 | export { FakeEmbeddings } from "./fake.js"; 10 | -------------------------------------------------------------------------------- /langchain/src/embeddings/tests/cohere.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { CohereEmbeddings } from "../cohere.js"; 3 | 4 | test("Test CohereEmbeddings.embedQuery", async () => { 5 | const embeddings = new CohereEmbeddings(); 6 | const res = await embeddings.embedQuery("Hello world"); 7 | expect(typeof res[0]).toBe("number"); 8 | }); 9 | 10 | test("Test CohereEmbeddings.embedDocuments", async () => { 11 | const embeddings = new CohereEmbeddings(); 12 | const res = await embeddings.embedDocuments(["Hello world", "Bye bye"]); 13 | expect(res).toHaveLength(2); 14 | expect(typeof res[0][0]).toBe("number"); 15 | expect(typeof res[1][0]).toBe("number"); 16 | }); 17 | -------------------------------------------------------------------------------- /langchain/src/embeddings/tests/openai.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { OpenAIEmbeddings } from "../openai.js"; 3 | 4 | test("Test OpenAIEmbeddings.embedQuery", async () => { 5 | const embeddings = new OpenAIEmbeddings(); 6 | const res = await embeddings.embedQuery("Hello world"); 7 | expect(typeof res[0]).toBe("number"); 8 | }); 9 | 10 | test("Test OpenAIEmbeddings.embedDocuments", async () => { 11 | const embeddings = new OpenAIEmbeddings(); 12 | const res = await embeddings.embedDocuments(["Hello world", "Bye bye"]); 13 | expect(res).toHaveLength(2); 14 | expect(typeof res[0][0]).toBe("number"); 15 | expect(typeof res[1][0]).toBe("number"); 16 | }); 17 | -------------------------------------------------------------------------------- /langchain/src/index.ts: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line tree-shaking/no-side-effects-in-initialization 2 | /* #__PURE__ */ console.error( 3 | "[WARN] Importing from 'langchain' is deprecated. See https://js.langchain.com/docs/getting-started/install#updating-from-0052 for upgrade instructions." 4 | ); 5 | 6 | export { 7 | PromptTemplate, 8 | BasePromptTemplate, 9 | FewShotPromptTemplate, 10 | } from "./prompts/index.js"; 11 | export { LLMChain } from "./chains/llm_chain.js"; 12 | export { OpenAI } from "./llms/openai.js"; 13 | -------------------------------------------------------------------------------- /langchain/src/llms/index.ts: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line tree-shaking/no-side-effects-in-initialization 2 | /* #__PURE__ */ console.error( 3 | "[WARN] Importing from 'langchain/llms' is deprecated. Import from eg. 'langchain/llms/openai' instead. See https://js.langchain.com/docs/getting-started/install#updating-from-0052 for upgrade instructions." 4 | ); 5 | 6 | export { BaseLLM, BaseLLMParams, LLM, SerializedLLM } from "./base.js"; 7 | export { OpenAI, PromptLayerOpenAI } from "./openai.js"; 8 | export { OpenAIChat } from "./openai-chat.js"; 9 | export { Cohere } from "./cohere.js"; 10 | export { HuggingFaceInference } from "./hf.js"; 11 | export { Replicate } from "./replicate.js"; 12 | -------------------------------------------------------------------------------- /langchain/src/llms/load.ts: -------------------------------------------------------------------------------- 1 | import { FileLoader, loadFromFile } from "../util/load.js"; 2 | import { BaseLanguageModel } from "../base_language/index.js"; 3 | import { parseFileConfig } from "../util/parse.js"; 4 | 5 | /** 6 | * Load an LLM from a local file. 7 | * 8 | * @example 9 | * ```ts 10 | * import { loadLLM } from "langchain/llms/load"; 11 | * const model = await loadLLM("/path/to/llm.json"); 12 | * ``` 13 | */ 14 | const loader: FileLoader = (file: string, path: string) => 15 | BaseLanguageModel.deserialize(parseFileConfig(file, path)); 16 | 17 | export const loadLLM = (uri: string): Promise => 18 | loadFromFile(uri, loader); 19 | -------------------------------------------------------------------------------- /langchain/src/llms/tests/cohere.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test } from "@jest/globals"; 2 | import { Cohere } from "../cohere.js"; 3 | 4 | test("Test Cohere", async () => { 5 | const model = new Cohere({ maxTokens: 20 }); 6 | const res = await model.call("1 + 1 ="); 7 | console.log(res); 8 | }, 50000); 9 | -------------------------------------------------------------------------------- /langchain/src/llms/tests/huggingface_hub.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test } from "@jest/globals"; 2 | import { HuggingFaceInference } from "../hf.js"; 3 | 4 | test("Test HuggingFace", async () => { 5 | const model = new HuggingFaceInference({ temperature: 0.1, topP: 0.5 }); 6 | const res = await model.call("1 + 1 ="); 7 | console.log(res); 8 | }, 50000); 9 | -------------------------------------------------------------------------------- /langchain/src/llms/tests/replicate.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { Replicate } from "../replicate.js"; 3 | 4 | // Test skipped because Replicate appears to be timing out often when called 5 | test.skip("Test Replicate", async () => { 6 | const model = new Replicate({ 7 | model: 8 | "daanelson/flan-t5:04e422a9b85baed86a4f24981d7f9953e20c5fd82f6103b74ebc431588e1cec8", 9 | input: { 10 | max_length: 10, 11 | }, 12 | }); 13 | 14 | const res = await model.call("Hello, my name is "); 15 | 16 | expect(typeof res).toBe("string"); 17 | }); 18 | -------------------------------------------------------------------------------- /langchain/src/memory/index.ts: -------------------------------------------------------------------------------- 1 | export { BufferMemory } from "./buffer_memory.js"; 2 | export { BaseMemory } from "./base.js"; 3 | export { ConversationSummaryMemory } from "./summary.js"; 4 | export { BufferWindowMemory } from "./buffer_window_memory.js"; 5 | export { BaseChatMemory, ChatMessageHistory } from "./chat_memory.js"; 6 | export { MotorheadMemory } from "./motorhead_memory.js"; 7 | -------------------------------------------------------------------------------- /langchain/src/memory/prompt.ts: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line tree-shaking/no-side-effects-in-initialization 2 | import { PromptTemplate } from "../prompts/prompt.js"; 3 | 4 | const _DEFAULT_SUMMARIZER_TEMPLATE = `Progressively summarize the lines of conversation provided, adding onto the previous summary returning a new summary. 5 | 6 | EXAMPLE 7 | Current summary: 8 | The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good. 9 | 10 | New lines of conversation: 11 | Human: Why do you think artificial intelligence is a force for good? 12 | AI: Because artificial intelligence will help humans reach their full potential. 13 | 14 | New summary: 15 | The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential. 16 | END OF EXAMPLE 17 | 18 | Current summary: 19 | {summary} 20 | 21 | New lines of conversation: 22 | {new_lines} 23 | 24 | New summary:`; 25 | 26 | // eslint-disable-next-line spaced-comment 27 | export const SUMMARY_PROMPT = /*#__PURE__*/ new PromptTemplate({ 28 | inputVariables: ["summary", "new_lines"], 29 | template: _DEFAULT_SUMMARIZER_TEMPLATE, 30 | }); 31 | -------------------------------------------------------------------------------- /langchain/src/output_parsers/combining.ts: -------------------------------------------------------------------------------- 1 | import { BaseOutputParser } from "../schema/index.js"; 2 | 3 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 4 | export type CombinedOutput = Record; 5 | 6 | /** 7 | * Class to combine multiple output parsers 8 | * @augments BaseOutputParser 9 | */ 10 | export class CombiningOutputParser extends BaseOutputParser { 11 | parsers: BaseOutputParser[]; 12 | 13 | constructor(...parsers: BaseOutputParser[]) { 14 | super(); 15 | this.parsers = parsers; 16 | } 17 | 18 | async parse(input: string): Promise { 19 | const ret: CombinedOutput = {}; 20 | for (const p of this.parsers) { 21 | Object.assign(ret, await p.parse(input)); 22 | } 23 | return ret; 24 | } 25 | 26 | getFormatInstructions(): string { 27 | const initial = `For your first output: ${this?.parsers?.[0]?.getFormatInstructions()}`; 28 | const subsequent = this.parsers 29 | .slice(1) 30 | .map( 31 | (p) => 32 | `Complete that output fully. Then produce another output: ${p.getFormatInstructions()}` 33 | ) 34 | .join("\n"); 35 | return `${initial}\n${subsequent}`; 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /langchain/src/output_parsers/index.ts: -------------------------------------------------------------------------------- 1 | export { ListOutputParser, CommaSeparatedListOutputParser } from "./list.js"; 2 | export { RegexParser } from "./regex.js"; 3 | export { StructuredOutputParser } from "./structured.js"; 4 | export { OutputFixingParser } from "./fix.js"; 5 | export { CombiningOutputParser } from "./combining.js"; 6 | -------------------------------------------------------------------------------- /langchain/src/output_parsers/list.ts: -------------------------------------------------------------------------------- 1 | import { BaseOutputParser, OutputParserException } from "../schema/index.js"; 2 | 3 | /** 4 | * Class to parse the output of an LLM call to a list. 5 | * @augments BaseOutputParser 6 | */ 7 | export abstract class ListOutputParser extends BaseOutputParser { 8 | abstract parse(text: string): Promise; 9 | } 10 | 11 | /** 12 | * Class to parse the output of an LLM call as a comma-separated list. 13 | * @augments ListOutputParser 14 | */ 15 | export class CommaSeparatedListOutputParser extends ListOutputParser { 16 | async parse(text: string): Promise { 17 | try { 18 | return text 19 | .trim() 20 | .split(",") 21 | .map((s) => s.trim()); 22 | } catch (e) { 23 | throw new OutputParserException(`Could not parse output: ${text}`); 24 | } 25 | } 26 | 27 | getFormatInstructions(): string { 28 | return `Your response should be a list of comma separated values, eg: \`foo, bar, baz\``; 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /langchain/src/output_parsers/prompts.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable tree-shaking/no-side-effects-in-initialization */ 2 | import { PromptTemplate } from "../prompts/prompt.js"; 3 | 4 | export const NAIVE_FIX_TEMPLATE = `Instructions: 5 | -------------- 6 | {instructions} 7 | -------------- 8 | Completion: 9 | -------------- 10 | {completion} 11 | -------------- 12 | 13 | Above, the Completion did not satisfy the constraints given in the Instructions. 14 | Error: 15 | -------------- 16 | {error} 17 | -------------- 18 | 19 | Please try again. Please only respond with an answer that satisfies the constraints laid out in the Instructions:`; 20 | 21 | export const NAIVE_FIX_PROMPT = 22 | /* #__PURE__ */ PromptTemplate.fromTemplate(NAIVE_FIX_TEMPLATE); 23 | -------------------------------------------------------------------------------- /langchain/src/output_parsers/tests/list.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | 3 | import { CommaSeparatedListOutputParser } from "../list.js"; 4 | 5 | test("CommaSeparatedListOutputParser", async () => { 6 | const parser = new CommaSeparatedListOutputParser(); 7 | 8 | expect(await parser.parse("hello, bye")).toEqual(["hello", "bye"]); 9 | 10 | expect(await parser.parse("hello,bye")).toEqual(["hello", "bye"]); 11 | 12 | expect(await parser.parse("hello")).toEqual(["hello"]); 13 | }); 14 | -------------------------------------------------------------------------------- /langchain/src/prompts/index.ts: -------------------------------------------------------------------------------- 1 | export { 2 | BaseExampleSelector, 3 | BasePromptTemplate, 4 | BasePromptTemplateInput, 5 | StringPromptValue, 6 | BaseStringPromptTemplate, 7 | } from "./base.js"; 8 | export { PromptTemplate, PromptTemplateInput } from "./prompt.js"; 9 | export { LengthBasedExampleSelector } from "./selectors/LengthBasedExampleSelector.js"; 10 | export { SemanticSimilarityExampleSelector } from "./selectors/SemanticSimilarityExampleSelector.js"; 11 | export { 12 | FewShotPromptTemplate, 13 | FewShotPromptTemplateInput, 14 | } from "./few_shot.js"; 15 | export { 16 | ChatPromptTemplate, 17 | HumanMessagePromptTemplate, 18 | AIMessagePromptTemplate, 19 | SystemMessagePromptTemplate, 20 | ChatMessagePromptTemplate, 21 | MessagesPlaceholder, 22 | BaseChatPromptTemplate, 23 | } from "./chat.js"; 24 | export { 25 | SerializedPromptTemplate, 26 | SerializedBasePromptTemplate, 27 | SerializedFewShotTemplate, 28 | SerializedMessagePromptTemplate, 29 | SerializedChatPromptTemplate, 30 | } from "./serde.js"; 31 | export { parseTemplate, renderTemplate } from "./template.js"; 32 | -------------------------------------------------------------------------------- /langchain/src/prompts/serde.ts: -------------------------------------------------------------------------------- 1 | import type { Example } from "../schema/index.js"; 2 | import type { TemplateFormat } from "./template.js"; 3 | 4 | export type SerializedPromptTemplate = { 5 | _type?: "prompt"; 6 | input_variables: string[]; 7 | template_format?: TemplateFormat; 8 | template?: string; 9 | }; 10 | 11 | export type SerializedFewShotTemplate = { 12 | _type: "few_shot"; 13 | input_variables: string[]; 14 | examples: string | Example[]; 15 | example_prompt?: SerializedPromptTemplate; 16 | example_separator: string; 17 | prefix?: string; 18 | suffix?: string; 19 | template_format: TemplateFormat; 20 | }; 21 | 22 | export type SerializedMessagePromptTemplate = { 23 | _type: "message"; 24 | input_variables: string[]; 25 | [key: string]: unknown; 26 | }; 27 | 28 | /** Serialized Chat prompt template */ 29 | export type SerializedChatPromptTemplate = { 30 | _type?: "chat_prompt"; 31 | input_variables: string[]; 32 | template_format?: TemplateFormat; 33 | prompt_messages: SerializedMessagePromptTemplate[]; 34 | }; 35 | 36 | export type SerializedBasePromptTemplate = 37 | | SerializedFewShotTemplate 38 | | SerializedPromptTemplate 39 | | SerializedChatPromptTemplate; 40 | -------------------------------------------------------------------------------- /langchain/src/prompts/tests/load.int.test.ts: -------------------------------------------------------------------------------- 1 | import { expect, test } from "@jest/globals"; 2 | import * as path from "node:path"; 3 | import { fileURLToPath } from "node:url"; 4 | import { loadPrompt } from "../load.js"; 5 | 6 | test("Load Hello World Prompt", async () => { 7 | const helloWorld = path.join( 8 | path.join(path.dirname(fileURLToPath(import.meta.url)), "prompts"), 9 | "hello_world.yaml" 10 | ); 11 | const prompt = await loadPrompt(helloWorld); 12 | expect(prompt._getPromptType()).toBe("prompt"); 13 | expect(await prompt.format({})).toBe("Say hello world."); 14 | }); 15 | 16 | test("Load hub prompt", async () => { 17 | const prompt = await loadPrompt( 18 | "lc@abb92d8://prompts/hello-world/prompt.yaml" 19 | ); 20 | expect(prompt._getPromptType()).toBe("prompt"); 21 | expect(await prompt.format({})).toBe("Say hello world."); 22 | }); 23 | -------------------------------------------------------------------------------- /langchain/src/prompts/tests/prompts/hello_world.yaml: -------------------------------------------------------------------------------- 1 | input_variables: [] 2 | output_parser: null 3 | template: "Say hello world." 4 | template_format: f-string 5 | -------------------------------------------------------------------------------- /langchain/src/prompts/tests/template.test.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable tree-shaking/no-side-effects-in-initialization */ 2 | import { expect, test, describe } from "@jest/globals"; 3 | import { interpolateFString } from "../template.js"; 4 | 5 | describe.each([ 6 | ["{foo}", { foo: "bar" }, "bar"], 7 | ["pre{foo}post", { foo: "bar" }, "prebarpost"], 8 | ["{{pre{foo}post}}", { foo: "bar" }, "{prebarpost}"], 9 | ["text", {}, "text"], 10 | ["}}{{", {}, "}{"], 11 | ["{first}_{second}", { first: "foo", second: "bar" }, "foo_bar"], 12 | ])("Valid f-string", (template, variables, result) => { 13 | test(`Interpolation works: ${template}`, () => { 14 | expect(interpolateFString(template, variables)).toBe(result); 15 | }); 16 | }); 17 | 18 | describe.each([ 19 | ["{", {}], 20 | ["}", {}], 21 | ["{foo", {}], 22 | ["foo}", {}], 23 | ])("Invalid f-string", (template, variables) => { 24 | test(`Interpolation throws: ${template}`, () => { 25 | expect(() => interpolateFString(template, variables)).toThrow(); 26 | }); 27 | }); 28 | -------------------------------------------------------------------------------- /langchain/src/retrievers/index.ts: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line tree-shaking/no-side-effects-in-initialization 2 | /* #__PURE__ */ console.error( 3 | "[WARN] Importing from 'langchain/retrievers' is deprecated. Import from eg. 'langchain/retrievers/remote' instead. See https://js.langchain.com/docs/getting-started/install#updating-from-0052 for upgrade instructions." 4 | ); 5 | 6 | export { RemoteRetriever } from "./remote/base.js"; 7 | export { ChatGPTPluginRetriever } from "./remote/chatgpt-plugin.js"; 8 | export { 9 | SupabaseHybridSearch, 10 | SupabaseHybridSearchParams, 11 | } from "./supabase.js"; 12 | export { RemoteLangChainRetriever } from "./remote/remote-retriever.js"; 13 | export { MetalRetriever } from "./metal.js"; 14 | -------------------------------------------------------------------------------- /langchain/src/retrievers/metal.ts: -------------------------------------------------------------------------------- 1 | import { BaseRetriever } from "../schema/index.js"; 2 | import { Document } from "../document.js"; 3 | 4 | export interface MetalRetrieverFields { 5 | client: import("@getmetal/metal-sdk").default; 6 | } 7 | 8 | interface ResponseItem { 9 | text: string; 10 | [key: string]: unknown; 11 | } 12 | 13 | export class MetalRetriever extends BaseRetriever { 14 | private client: import("@getmetal/metal-sdk").default; 15 | 16 | constructor(fields: MetalRetrieverFields) { 17 | super(); 18 | 19 | this.client = fields.client; 20 | } 21 | 22 | async getRelevantDocuments(query: string): Promise { 23 | const res = await this.client.search({ text: query }); 24 | 25 | const items = ("data" in res ? res.data : res) as ResponseItem[]; 26 | return items.map( 27 | ({ text, metadata }) => 28 | new Document({ 29 | pageContent: text, 30 | metadata: metadata as Record, 31 | }) 32 | ); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /langchain/src/retrievers/remote/index.ts: -------------------------------------------------------------------------------- 1 | export { RemoteRetriever } from "./base.js"; 2 | export { ChatGPTPluginRetriever } from "./chatgpt-plugin.js"; 3 | export { RemoteLangChainRetriever } from "./remote-retriever.js"; 4 | -------------------------------------------------------------------------------- /langchain/src/retrievers/tests/metal.int.test.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-process-env */ 2 | /* eslint-disable @typescript-eslint/no-non-null-assertion */ 3 | import { test, expect } from "@jest/globals"; 4 | import Metal from "@getmetal/metal-sdk"; 5 | 6 | import { MetalRetriever } from "../metal.js"; 7 | 8 | test("MetalRetriever", async () => { 9 | const MetalSDK = Metal.default; 10 | const client = new MetalSDK( 11 | process.env.METAL_API_KEY!, 12 | process.env.METAL_CLIENT_ID!, 13 | process.env.METAL_APP_ID 14 | ); 15 | const retriever = new MetalRetriever({ client }); 16 | 17 | const docs = await retriever.getRelevantDocuments("hello"); 18 | 19 | expect(docs.length).toBeGreaterThan(0); 20 | 21 | console.log(docs); 22 | }); 23 | -------------------------------------------------------------------------------- /langchain/src/retrievers/tests/supabase.int.test.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-process-env */ 2 | /* eslint-disable @typescript-eslint/no-non-null-assertion */ 3 | import { test, expect } from "@jest/globals"; 4 | import { createClient } from "@supabase/supabase-js"; 5 | import { OpenAIEmbeddings } from "../../embeddings/openai.js"; 6 | import { SupabaseHybridSearch } from "../supabase.js"; 7 | 8 | test("Supabase hybrid keyword search", async () => { 9 | const client = createClient( 10 | process.env.SUPABASE_URL!, 11 | process.env.SUPABASE_PRIVATE_KEY! 12 | ); 13 | 14 | const embeddings = new OpenAIEmbeddings(); 15 | 16 | const retriever = new SupabaseHybridSearch(embeddings, { 17 | client, 18 | similarityK: 2, 19 | keywordK: 2, 20 | }); 21 | 22 | expect(retriever).toBeDefined(); 23 | 24 | const results = await retriever.getRelevantDocuments("hello bye"); 25 | 26 | expect(results.length).toBeGreaterThan(0); 27 | }); 28 | -------------------------------------------------------------------------------- /langchain/src/tests/cache.test.ts: -------------------------------------------------------------------------------- 1 | import hash from "object-hash"; 2 | import { test, expect, jest } from "@jest/globals"; 3 | 4 | import { InMemoryCache, RedisCache } from "../cache.js"; 5 | 6 | const sha256 = (str: string) => hash(str); 7 | 8 | test("InMemoryCache", async () => { 9 | const cache = new InMemoryCache(); 10 | await cache.update("foo", "bar", [{ text: "baz" }]); 11 | expect(await cache.lookup("foo", "bar")).toEqual([{ text: "baz" }]); 12 | }); 13 | 14 | test("RedisCache", async () => { 15 | const redis = { 16 | get: jest.fn(async (key: string) => { 17 | if (key === sha256("foo_bar_0")) { 18 | return "baz"; 19 | } 20 | return null; 21 | }), 22 | }; 23 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 24 | const cache = new RedisCache(redis as any); 25 | expect(await cache.lookup("foo", "bar")).toEqual([{ text: "baz" }]); 26 | }); 27 | -------------------------------------------------------------------------------- /langchain/src/tools/calculator.ts: -------------------------------------------------------------------------------- 1 | import { Parser } from "expr-eval"; 2 | 3 | import { Tool } from "./base.js"; 4 | 5 | export class Calculator extends Tool { 6 | name = "calculator"; 7 | 8 | async _call(input: string) { 9 | try { 10 | return Parser.evaluate(input).toString(); 11 | } catch (error) { 12 | return "I don't know how to do that."; 13 | } 14 | } 15 | 16 | description = `Useful for getting the result of a math expression. The input to this tool should be a valid mathematical expression that could be executed by a simple calculator.`; 17 | } 18 | -------------------------------------------------------------------------------- /langchain/src/tools/chain.ts: -------------------------------------------------------------------------------- 1 | import { Tool } from "./base.js"; 2 | import { BaseChain } from "../chains/base.js"; 3 | 4 | export class ChainTool extends Tool { 5 | name: string; 6 | 7 | description: string; 8 | 9 | chain: BaseChain; 10 | 11 | returnDirect: boolean; 12 | 13 | constructor(fields: { 14 | name: string; 15 | description: string; 16 | chain: BaseChain; 17 | returnDirect?: boolean; 18 | }) { 19 | super(); 20 | this.name = fields.name; 21 | this.description = fields.description; 22 | this.chain = fields.chain; 23 | this.returnDirect = fields.returnDirect ?? this.returnDirect; 24 | } 25 | 26 | async _call(input: string): Promise { 27 | return this.chain.run(input); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /langchain/src/tools/dadjokeapi.ts: -------------------------------------------------------------------------------- 1 | import { Tool } from "./base.js"; 2 | 3 | class DadJokeAPI extends Tool { 4 | name: string; 5 | 6 | description: string; 7 | 8 | constructor() { 9 | super(); 10 | this.name = "dadjoke"; 11 | this.description = 12 | "a dad joke generator. get a dad joke about a specific topic. input should be a search term."; 13 | } 14 | 15 | async _call(input: string): Promise { 16 | const headers = { Accept: "application/json" }; 17 | const searchUrl = `https://icanhazdadjoke.com/search?term=${input}`; 18 | 19 | const response = await fetch(searchUrl, { headers }); 20 | 21 | if (!response.ok) { 22 | throw new Error(`HTTP error ${response.status}`); 23 | } 24 | 25 | const data = await response.json(); 26 | const jokes = data.results; 27 | 28 | if (jokes.length === 0) { 29 | return `No dad jokes found about ${input}`; 30 | } 31 | 32 | const randomIndex = Math.floor(Math.random() * jokes.length); 33 | const randomJoke = jokes[randomIndex].joke; 34 | 35 | return randomJoke; 36 | } 37 | } 38 | 39 | export { DadJokeAPI }; 40 | -------------------------------------------------------------------------------- /langchain/src/tools/dynamic.ts: -------------------------------------------------------------------------------- 1 | import { Tool } from "./base.js"; 2 | 3 | export interface DynamicToolParams { 4 | name: string; 5 | description: string; 6 | func: (arg1: string) => Promise; 7 | returnDirect?: boolean; 8 | } 9 | 10 | export class DynamicTool extends Tool { 11 | name: string; 12 | 13 | description: string; 14 | 15 | func: (arg1: string) => Promise; 16 | 17 | constructor(fields: DynamicToolParams) { 18 | super(); 19 | this.name = fields.name; 20 | this.description = fields.description; 21 | this.func = fields.func; 22 | this.returnDirect = fields.returnDirect ?? this.returnDirect; 23 | } 24 | 25 | async _call(input: string): Promise { 26 | return this.func(input); 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /langchain/src/tools/index.ts: -------------------------------------------------------------------------------- 1 | export { SerpAPI } from "./serpapi.js"; 2 | export { DadJokeAPI } from "./dadjokeapi.js"; 3 | export { BingSerpAPI } from "./bingserpapi.js"; 4 | export { Tool, ToolParams } from "./base.js"; 5 | export { DynamicTool } from "./dynamic.js"; 6 | export { IFTTTWebhook } from "./IFTTTWebhook.js"; 7 | export { ChainTool } from "./chain.js"; 8 | export { 9 | QuerySqlTool, 10 | InfoSqlTool, 11 | ListTablesSqlTool, 12 | QueryCheckerTool, 13 | } from "./sql.js"; 14 | export { 15 | JsonSpec, 16 | JsonListKeysTool, 17 | JsonGetValueTool, 18 | JsonObject, 19 | Json, 20 | } from "./json.js"; 21 | export { RequestsGetTool, RequestsPostTool } from "./requests.js"; 22 | export { VectorStoreQATool } from "./vectorstore.js"; 23 | export { ZapierNLARunAction, ZapierNLAWrapper } from "./zapier.js"; 24 | export { Serper } from "./serper.js"; 25 | export { AIPluginTool } from "./aiplugin.js"; 26 | -------------------------------------------------------------------------------- /langchain/src/tools/tests/webbrowser.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect, describe } from "@jest/globals"; 2 | import { readFileSync } from "fs"; 3 | import { getText } from "../webbrowser.js"; 4 | 5 | describe("webbrowser Test suite", () => { 6 | const html = readFileSync("./src/tools/fixtures/wordoftheday.html", "utf8"); 7 | 8 | test("parse html to text and links", async () => { 9 | const baseUrl = "https://www.merriam-webster.com/word-of-the-day"; 10 | const text = getText(html, baseUrl, false); 11 | expect(text).toContain("Word of the Day: Foible"); 12 | }); 13 | }); 14 | -------------------------------------------------------------------------------- /langchain/src/types/pdf-parse.d.ts: -------------------------------------------------------------------------------- 1 | declare module "pdf-parse/lib/pdf-parse.js" { 2 | import pdf from "pdf-parse"; 3 | 4 | export default pdf; 5 | } 6 | -------------------------------------------------------------------------------- /langchain/src/util/axios-fetch-adapter.d.ts: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line import/no-extraneous-dependencies 2 | import { AxiosRequestConfig, AxiosPromise } from "axios"; 3 | 4 | export default function fetchAdapter(config: AxiosRequestConfig): AxiosPromise; 5 | -------------------------------------------------------------------------------- /langchain/src/util/axios-types.ts: -------------------------------------------------------------------------------- 1 | import type { EventSourceMessage } from "./event-source-parse.js"; 2 | 3 | export interface StreamingAxiosConfiguration { 4 | responseType: "stream"; 5 | 6 | /** 7 | * Called when a message is received. NOTE: Unlike the default browser 8 | * EventSource.onmessage, this callback is called for _all_ events, 9 | * even ones with a custom `event` field. 10 | */ 11 | onmessage?: (ev: EventSourceMessage) => void; 12 | } 13 | -------------------------------------------------------------------------------- /langchain/src/util/chunk.ts: -------------------------------------------------------------------------------- 1 | export const chunkArray = (arr: T[], chunkSize: number) => 2 | arr.reduce((chunks, elem, index) => { 3 | const chunkIndex = Math.floor(index / chunkSize); 4 | const chunk = chunks[chunkIndex] || []; 5 | // eslint-disable-next-line no-param-reassign 6 | chunks[chunkIndex] = chunk.concat([elem]); 7 | return chunks; 8 | }, [] as T[][]); 9 | -------------------------------------------------------------------------------- /langchain/src/util/env.ts: -------------------------------------------------------------------------------- 1 | import { 2 | isBrowser, 3 | isNode, 4 | isWebWorker, 5 | isJsDom, 6 | isDeno, 7 | } from "browser-or-node"; 8 | 9 | export const getEnv = () => { 10 | let env: string; 11 | if (isBrowser) { 12 | env = "browser"; 13 | } else if (isNode) { 14 | env = "node"; 15 | } else if (isWebWorker) { 16 | env = "webworker"; 17 | } else if (isJsDom) { 18 | env = "jsdom"; 19 | } else if (isDeno) { 20 | env = "deno"; 21 | } else { 22 | env = "other"; 23 | } 24 | 25 | return env; 26 | }; 27 | -------------------------------------------------------------------------------- /langchain/src/util/extname.ts: -------------------------------------------------------------------------------- 1 | export const extname = (path: string) => `.${path.split(".").pop()}`; 2 | -------------------------------------------------------------------------------- /langchain/src/util/load.ts: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 2 | export type LoadValues = Record; 3 | 4 | export type FileLoader = ( 5 | text: string, 6 | filePath: string, 7 | values: LoadValues 8 | ) => Promise; 9 | 10 | export const loadFromFile = async ( 11 | uri: string, 12 | loader: FileLoader, 13 | values: LoadValues = {} 14 | ): Promise => { 15 | try { 16 | const fs = await import("node:fs/promises"); 17 | return loader(await fs.readFile(uri, { encoding: "utf-8" }), uri, values); 18 | } catch (e) { 19 | console.error(e); 20 | throw new Error(`Could not load file at ${uri}`); 21 | } 22 | }; 23 | -------------------------------------------------------------------------------- /langchain/src/util/parse.ts: -------------------------------------------------------------------------------- 1 | import * as yaml from "yaml"; 2 | import { extname } from "./extname.js"; 3 | 4 | export const loadFileContents = (contents: string, format: string) => { 5 | switch (format) { 6 | case ".json": 7 | return JSON.parse(contents); 8 | case ".yml": 9 | case ".yaml": 10 | return yaml.parse(contents); 11 | default: 12 | throw new Error(`Unsupported filetype ${format}`); 13 | } 14 | }; 15 | 16 | export const parseFileConfig = ( 17 | text: string, 18 | path: string, 19 | supportedTypes?: string[] 20 | ) => { 21 | const suffix = extname(path); 22 | 23 | if ( 24 | ![".json", ".yaml"].includes(suffix) || 25 | (supportedTypes && !supportedTypes.includes(suffix)) 26 | ) { 27 | throw new Error(`Unsupported filetype ${suffix}`); 28 | } 29 | 30 | return loadFileContents(text, suffix); 31 | }; 32 | -------------------------------------------------------------------------------- /langchain/src/vectorstores/index.ts: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line tree-shaking/no-side-effects-in-initialization 2 | /* #__PURE__ */ console.error( 3 | "[WARN] Importing from 'langchain/vectorstores' is deprecated. Import from eg. 'langchain/vectorstores/pinecone' instead. See https://js.langchain.com/docs/getting-started/install#updating-from-0052 for upgrade instructions." 4 | ); 5 | 6 | export { HNSWLib } from "./hnswlib.js"; 7 | export { Chroma } from "./chroma.js"; 8 | export { PineconeStore } from "./pinecone.js"; 9 | export { VectorStore, SaveableVectorStore } from "./base.js"; 10 | export { SupabaseVectorStore } from "./supabase.js"; 11 | export { PrismaVectorStore } from "./prisma.js"; 12 | -------------------------------------------------------------------------------- /langchain/src/vectorstores/tests/chroma.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | 3 | import { Chroma } from "../chroma.js"; 4 | 5 | // We'd want a much more thorough test here, 6 | // but sadly Chroma isn't very easy to test locally at the moment. 7 | test("Chroma imports correctly", async () => { 8 | const { ChromaClient } = await Chroma.imports(); 9 | 10 | expect(ChromaClient).toBeDefined(); 11 | }); 12 | -------------------------------------------------------------------------------- /langchain/src/vectorstores/tests/memory.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | 3 | import { OpenAIEmbeddings } from "../../embeddings/openai.js"; 4 | import { Document } from "../../document.js"; 5 | import { MemoryVectorStore } from "../memory.js"; 6 | 7 | test("MemoryVectorStore with external ids", async () => { 8 | const embeddings = new OpenAIEmbeddings(); 9 | 10 | const store = new MemoryVectorStore(embeddings); 11 | 12 | expect(store).toBeDefined(); 13 | 14 | await store.addDocuments([ 15 | { pageContent: "hello", metadata: { a: 1 } }, 16 | { pageContent: "hi", metadata: { a: 1 } }, 17 | { pageContent: "bye", metadata: { a: 1 } }, 18 | { pageContent: "what's this", metadata: { a: 1 } }, 19 | ]); 20 | 21 | const results = await store.similaritySearch("hello", 1); 22 | 23 | expect(results).toHaveLength(1); 24 | 25 | expect(results).toEqual([ 26 | new Document({ metadata: { a: 1 }, pageContent: "hello" }), 27 | ]); 28 | }); 29 | -------------------------------------------------------------------------------- /langchain/src/vectorstores/tests/supabase.int.test.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-process-env */ 2 | /* eslint-disable @typescript-eslint/no-non-null-assertion */ 3 | import { test, expect } from "@jest/globals"; 4 | import { createClient } from "@supabase/supabase-js"; 5 | 6 | import { OpenAIEmbeddings } from "../../embeddings/openai.js"; 7 | import { Document } from "../../document.js"; 8 | import { SupabaseVectorStore } from "../supabase.js"; 9 | 10 | test("SupabaseVectorStore with external ids", async () => { 11 | const client = createClient( 12 | process.env.SUPABASE_URL!, 13 | process.env.SUPABASE_PRIVATE_KEY! 14 | ); 15 | 16 | const embeddings = new OpenAIEmbeddings(); 17 | 18 | const store = new SupabaseVectorStore(embeddings, { client }); 19 | 20 | expect(store).toBeDefined(); 21 | 22 | await store.addDocuments([ 23 | { pageContent: "hello", metadata: { a: 1 } }, 24 | { pageContent: "hi", metadata: { a: 1 } }, 25 | { pageContent: "bye", metadata: { a: 1 } }, 26 | { pageContent: "what's this", metadata: { a: 1 } }, 27 | ]); 28 | 29 | const results = await store.similaritySearch("hello", 1); 30 | 31 | expect(results).toHaveLength(1); 32 | 33 | expect(results).toEqual([ 34 | new Document({ metadata: { a: 1 }, pageContent: "hello" }), 35 | ]); 36 | }); 37 | -------------------------------------------------------------------------------- /langchain/tsconfig.cjs.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "./tsconfig.json", 3 | "compilerOptions": { 4 | "module": "commonjs", 5 | "declaration": false 6 | }, 7 | "exclude": [ 8 | "node_modules", 9 | "dist", 10 | "docs", 11 | "**/tests" 12 | ] 13 | } -------------------------------------------------------------------------------- /scripts/docker-ci-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euxo pipefail 4 | 5 | export CI=true 6 | 7 | cp -r ../package/* . 8 | 9 | # Replace the workspace dependency with the local copy, and install all others 10 | yarn add ../langchain 11 | 12 | # Check the build command completes successfully 13 | yarn build 14 | 15 | # Check the test command completes successfully 16 | yarn test 17 | -------------------------------------------------------------------------------- /scripts/release-branch.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ $(git branch --show-current) == "main" ]]; then 4 | git checkout -B release 5 | git push -u origin release 6 | fi 7 | -------------------------------------------------------------------------------- /test-exports-vercel/.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "next/core-web-vitals" 3 | } 4 | -------------------------------------------------------------------------------- /test-exports-vercel/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # next.js 12 | /.next/ 13 | /out/ 14 | 15 | # production 16 | /build 17 | 18 | # misc 19 | .DS_Store 20 | *.pem 21 | 22 | # debug 23 | npm-debug.log* 24 | yarn-debug.log* 25 | yarn-error.log* 26 | .pnpm-debug.log* 27 | 28 | # local env files 29 | .env*.local 30 | 31 | # vercel 32 | .vercel 33 | 34 | # typescript 35 | *.tsbuildinfo 36 | next-env.d.ts 37 | -------------------------------------------------------------------------------- /test-exports-vercel/next.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('next').NextConfig} */ 2 | const nextConfig = { 3 | reactStrictMode: true, 4 | webpack(config) { 5 | config.experiments = { 6 | asyncWebAssembly: true, 7 | layers: true, 8 | }; 9 | 10 | return config; 11 | }, 12 | }; 13 | 14 | module.exports = nextConfig; 15 | -------------------------------------------------------------------------------- /test-exports-vercel/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "test-exports-vercel", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "dev": "dotenv -e ../.env next dev", 7 | "build": "dotenv -e ../.env next build", 8 | "start": "next start", 9 | "test": "next lint" 10 | }, 11 | "dependencies": { 12 | "@types/node": "18.15.11", 13 | "@types/react": "18.0.33", 14 | "@types/react-dom": "18.0.11", 15 | "axios": "^1.3.5", 16 | "eslint": "8.37.0", 17 | "eslint-config-next": "13.2.4", 18 | "langchain": "workspace:*", 19 | "next": "13.2.4", 20 | "react": "18.2.0", 21 | "react-dom": "18.2.0", 22 | "typescript": "5.0.3" 23 | }, 24 | "devDependencies": { 25 | "dotenv-cli": "latest" 26 | }, 27 | "proxy": "http://127.0.0.1:8000" 28 | } 29 | -------------------------------------------------------------------------------- /test-exports-vercel/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/test-exports-vercel/public/favicon.ico -------------------------------------------------------------------------------- /test-exports-vercel/public/thirteen.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test-exports-vercel/public/vercel.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test-exports-vercel/src/entrypoints.js: -------------------------------------------------------------------------------- 1 | export * from "langchain/agents"; 2 | export * from "langchain/base_language"; 3 | export * from "langchain/tools"; 4 | export * from "langchain/chains"; 5 | export * from "langchain/embeddings/base"; 6 | export * from "langchain/embeddings/fake"; 7 | export * from "langchain/embeddings/openai"; 8 | export * from "langchain/llms/base"; 9 | export * from "langchain/llms/openai"; 10 | export * from "langchain/prompts"; 11 | export * from "langchain/vectorstores/base"; 12 | export * from "langchain/vectorstores/memory"; 13 | export * from "langchain/vectorstores/prisma"; 14 | export * from "langchain/text_splitter"; 15 | export * from "langchain/memory"; 16 | export * from "langchain/document"; 17 | export * from "langchain/docstore"; 18 | export * from "langchain/document_loaders/base"; 19 | export * from "langchain/chat_models/base"; 20 | export * from "langchain/chat_models/openai"; 21 | export * from "langchain/chat_models/anthropic"; 22 | export * from "langchain/schema"; 23 | export * from "langchain/callbacks"; 24 | export * from "langchain/output_parsers"; 25 | export * from "langchain/retrievers/remote"; 26 | export * from "langchain/retrievers/databerry"; 27 | export * from "langchain/cache"; 28 | -------------------------------------------------------------------------------- /test-exports-vercel/src/images/howToWork.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/test-exports-vercel/src/images/howToWork.png -------------------------------------------------------------------------------- /test-exports-vercel/src/images/sSLCertified.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/test-exports-vercel/src/images/sSLCertified.png -------------------------------------------------------------------------------- /test-exports-vercel/src/images/safeFromBugs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevin-fruitful/chadgpt-ui/6a2250a8469661bdde53a6d1d05a2a929c46a752/test-exports-vercel/src/images/safeFromBugs.png -------------------------------------------------------------------------------- /test-exports-vercel/src/pages/_app.tsx: -------------------------------------------------------------------------------- 1 | import '@/styles/globals.css' 2 | import type { AppProps } from 'next/app' 3 | 4 | export default function App({ Component, pageProps }: AppProps) { 5 | return 6 | } 7 | -------------------------------------------------------------------------------- /test-exports-vercel/src/pages/_document.tsx: -------------------------------------------------------------------------------- 1 | import { Html, Head, Main, NextScript } from 'next/document' 2 | 3 | export default function Document() { 4 | return ( 5 | 6 | 7 | 8 |
9 | 10 | 11 | 12 | ) 13 | } 14 | -------------------------------------------------------------------------------- /test-exports-vercel/src/pages/api/ask-question.ts: -------------------------------------------------------------------------------- 1 | import axios from "axios"; 2 | 3 | const API_URL = process.env.NEXT_PUBLIC_BACKEND_API_URL; 4 | 5 | interface SendQuestionResponse { 6 | answer: string; 7 | } 8 | export interface ChatHistoryEntry { 9 | question: string; 10 | answer: string; 11 | } 12 | export async function sendQuestion( 13 | question: string, 14 | chatHistory: ChatHistoryEntry[] 15 | ): Promise { 16 | try { 17 | const response = await axios.post( 18 | `${API_URL}/api/chat`, 19 | { 20 | question, 21 | chat_history: chatHistory, 22 | } 23 | ); 24 | return response.data; 25 | } catch (error) { 26 | console.error("Error while sending the question:", error); 27 | throw error; 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /test-exports-vercel/src/pages/api/index-codebase.ts: -------------------------------------------------------------------------------- 1 | // utils/api.ts 2 | import axios from "axios"; 3 | 4 | interface IndexCodebaseResponse { 5 | // Define the shape of your API response here 6 | success: boolean; 7 | message: string; 8 | } 9 | export async function indexCodebase( 10 | gitUrl: string 11 | ): Promise { 12 | try { 13 | const normalizedGitUrl = gitUrl.endsWith(".git") ? gitUrl : `${gitUrl}.git`; 14 | const response = await axios.post( 15 | `${process.env.NEXT_PUBLIC_BACKEND_API_URL}/api/index_codebase`, 16 | { git_url: normalizedGitUrl } 17 | ); 18 | return response.data; 19 | } catch (error) { 20 | console.error("Error while indexing the codebase:", error); 21 | throw error; 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /test-exports-vercel/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es5", 4 | "lib": ["dom", "dom.iterable", "esnext"], 5 | "allowJs": true, 6 | "skipLibCheck": true, 7 | "strict": true, 8 | "forceConsistentCasingInFileNames": true, 9 | "noEmit": true, 10 | "esModuleInterop": true, 11 | "module": "esnext", 12 | "moduleResolution": "node", 13 | "resolveJsonModule": true, 14 | "isolatedModules": true, 15 | "jsx": "preserve", 16 | "incremental": true, 17 | "paths": { 18 | "@/*": ["./src/*"] 19 | } 20 | }, 21 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx"], 22 | "exclude": ["node_modules"] 23 | } 24 | -------------------------------------------------------------------------------- /turbo.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://turbo.build/schema.json", 3 | "globalDependencies": [ 4 | "**/.env" 5 | ], 6 | "pipeline": { 7 | "build": { 8 | "dependsOn": [ 9 | "^build" 10 | ], 11 | "outputs": [ 12 | "dist/**", 13 | "dist-cjs/**", 14 | "*.js", 15 | "*.cjs", 16 | "*.d.ts" 17 | ], 18 | "inputs": [ 19 | "src/**", 20 | "scripts/**", 21 | "package.json", 22 | "tsconfig.json" 23 | ] 24 | }, 25 | "lint": { 26 | "outputs": [] 27 | }, 28 | "format": { 29 | "outputs": [] 30 | }, 31 | "format:check": { 32 | "outputs": [] 33 | }, 34 | "test": { 35 | "outputs": [], 36 | "dependsOn": [ 37 | "^build" 38 | ] 39 | }, 40 | "test:integration": { 41 | "outputs": [], 42 | "dependsOn": [ 43 | "^build" 44 | ] 45 | }, 46 | "precommit": {}, 47 | "start": { 48 | "cache": false 49 | } 50 | } 51 | } 52 | --------------------------------------------------------------------------------