├── .editorconfig ├── .gitattributes ├── .github └── workflows │ ├── ci.yml │ └── integration.yml ├── .gitignore ├── .husky └── pre-commit ├── .nvmrc ├── .prettierignore ├── .prettierrc ├── .vscode └── settings.json ├── .watchmanconfig ├── .yarn ├── patches │ ├── dpdm-npm-3.12.0-0dfdd8e3b8.patch │ └── typedoc-plugin-markdown-npm-4.0.0-next.6-96b4b47746.patch ├── plugins │ └── @yarnpkg │ │ └── plugin-typescript.cjs └── releases │ └── yarn-3.4.1.cjs ├── .yarnrc.yml ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── docker-compose.yml ├── docs ├── .eslintrc.js ├── .gitignore ├── .prettierignore ├── README.md ├── babel.config.js ├── code-block-loader.js ├── docs │ ├── ecosystem │ │ ├── databerry.md │ │ ├── helicone.md │ │ └── unstructured.mdx │ ├── getting-started │ │ ├── guide-chat.mdx │ │ ├── guide-llm.mdx │ │ └── install.md │ ├── index.md │ ├── modules │ │ ├── agents │ │ │ ├── agents │ │ │ │ ├── custom_llm.mdx │ │ │ │ ├── custom_llm_chat.mdx │ │ │ │ ├── examples │ │ │ │ │ ├── chat_mrkl.mdx │ │ │ │ │ ├── conversational_agent.mdx │ │ │ │ │ ├── custom_agent_chat.mdx │ │ │ │ │ ├── index.mdx │ │ │ │ │ └── llm_mrkl.mdx │ │ │ │ └── index.mdx │ │ │ ├── executor │ │ │ │ ├── getting-started.md │ │ │ │ └── index.mdx │ │ │ ├── index.mdx │ │ │ ├── toolkits │ │ │ │ ├── index.mdx │ │ │ │ ├── json.md │ │ │ │ ├── openapi.md │ │ │ │ ├── sql.mdx │ │ │ │ └── vectorstore.md │ │ │ └── tools │ │ │ │ ├── agents_with_vectorstores.md │ │ │ │ ├── aiplugin-tool.mdx │ │ │ │ ├── index.mdx │ │ │ │ ├── integrations │ │ │ │ └── index.mdx │ │ │ │ ├── lambda_agent.md │ │ │ │ ├── webbrowser.mdx │ │ │ │ └── zapier_agent.mdx │ │ ├── chains │ │ │ ├── index.mdx │ │ │ ├── index_related_chains │ │ │ │ ├── conversational_retrieval.mdx │ │ │ │ ├── document_qa.mdx │ │ │ │ ├── index.mdx │ │ │ │ └── retrieval_qa.mdx │ │ │ ├── llmchain.mdx │ │ │ ├── other_chains │ │ │ │ ├── analyze_document.mdx │ │ │ │ ├── constitutional_chain.mdx │ │ │ │ ├── index.mdx │ │ │ │ ├── moderation_chain.mdx │ │ │ │ ├── multi_prompt_chain.mdx │ │ │ │ ├── multi_retrieval_qa_chain.mdx │ │ │ │ ├── sql.mdx │ │ │ │ └── summarization.mdx │ │ │ ├── prompt_selectors │ │ │ │ └── index.mdx │ │ │ └── sequential_chain.mdx │ │ ├── indexes │ │ │ ├── document_loaders │ │ │ │ ├── examples │ │ │ │ │ ├── file_loaders │ │ │ │ │ │ ├── csv.md │ │ │ │ │ │ ├── directory.md │ │ │ │ │ │ ├── docx.md │ │ │ │ │ │ ├── epub.md │ │ │ │ │ │ ├── index.mdx │ │ │ │ │ │ ├── json.md │ │ │ │ │ │ ├── jsonlines.md │ │ │ │ │ │ ├── notion_markdown.mdx │ │ │ │ │ │ ├── pdf.md │ │ │ │ │ │ ├── subtitles.md │ │ │ │ │ │ ├── text.md │ │ │ │ │ │ └── unstructured.mdx │ │ │ │ │ ├── index.mdx │ │ │ │ │ └── web_loaders │ │ │ │ │ │ ├── college_confidential.md │ │ │ │ │ │ ├── confluence.mdx │ │ │ │ │ │ ├── gitbook.md │ │ │ │ │ │ ├── github.md │ │ │ │ │ │ ├── hn.md │ │ │ │ │ │ ├── imsdb.md │ │ │ │ │ │ ├── index.mdx │ │ │ │ │ │ ├── s3.mdx │ │ │ │ │ │ ├── web_cheerio.md │ │ │ │ │ │ ├── web_playwright.md │ │ │ │ │ │ └── web_puppeteer.md │ │ │ │ └── index.mdx │ │ │ ├── index.mdx │ │ │ ├── retrievers │ │ │ │ ├── chatgpt-retriever-plugin.mdx │ │ │ │ ├── contextual-compression-retriever.mdx │ │ │ │ ├── databerry-retriever.mdx │ │ │ │ ├── hyde.mdx │ │ │ │ ├── index.mdx │ │ │ │ ├── metal-retriever.mdx │ │ │ │ ├── remote-retriever.mdx │ │ │ │ ├── supabase-hybrid.mdx │ │ │ │ ├── time-weighted-retriever.mdx │ │ │ │ └── vectorstore.md │ │ │ ├── text_splitters │ │ │ │ ├── examples │ │ │ │ │ ├── character.mdx │ │ │ │ │ ├── index.mdx │ │ │ │ │ ├── markdown.mdx │ │ │ │ │ ├── recursive_character.mdx │ │ │ │ │ └── token.mdx │ │ │ │ └── index.mdx │ │ │ └── vector_stores │ │ │ │ ├── index.mdx │ │ │ │ └── integrations │ │ │ │ ├── chroma.mdx │ │ │ │ ├── hnswlib.mdx │ │ │ │ ├── index.mdx │ │ │ │ ├── memory.mdx │ │ │ │ ├── milvus.md │ │ │ │ ├── myscale.mdx │ │ │ │ ├── opensearch.md │ │ │ │ ├── pinecone.md │ │ │ │ ├── prisma.mdx │ │ │ │ ├── supabase.mdx │ │ │ │ └── weaviate.mdx │ │ ├── memory │ │ │ ├── examples │ │ │ │ ├── buffer_memory.md │ │ │ │ ├── buffer_memory_chat.mdx │ │ │ │ ├── buffer_window_memory.md │ │ │ │ ├── conversation_summary.mdx │ │ │ │ ├── dynamodb.mdx │ │ │ │ ├── index.mdx │ │ │ │ ├── motorhead_memory.md │ │ │ │ └── vector_store_memory.mdx │ │ │ └── index.mdx │ │ ├── models │ │ │ ├── chat │ │ │ │ ├── additional_functionality.mdx │ │ │ │ ├── index.mdx │ │ │ │ └── integrations.mdx │ │ │ ├── embeddings │ │ │ │ ├── additional_functionality.mdx │ │ │ │ ├── index.mdx │ │ │ │ └── integrations.mdx │ │ │ ├── index.mdx │ │ │ └── llms │ │ │ │ ├── additional_functionality.mdx │ │ │ │ ├── index.mdx │ │ │ │ └── integrations.mdx │ │ ├── prompts │ │ │ ├── example_selectors │ │ │ │ └── index.mdx │ │ │ ├── index.mdx │ │ │ ├── output_parsers │ │ │ │ └── index.mdx │ │ │ └── prompt_templates │ │ │ │ ├── additional_functionality.mdx │ │ │ │ └── index.mdx │ │ └── schema │ │ │ ├── chat-messages.md │ │ │ ├── document.md │ │ │ ├── example.md │ │ │ └── index.mdx │ ├── production │ │ ├── callbacks │ │ │ ├── create-handlers.mdx │ │ │ ├── creating-subclasses.mdx │ │ │ └── index.mdx │ │ ├── deployment.md │ │ └── tracing.md │ └── use_cases │ │ ├── api.mdx │ │ ├── autonomous_agents │ │ ├── auto_gpt.mdx │ │ ├── baby_agi.mdx │ │ └── index.mdx │ │ ├── personal_assistants.mdx │ │ ├── question_answering.mdx │ │ ├── summarization.mdx │ │ └── tabular.mdx ├── docusaurus.config.js ├── package.json ├── sidebars.js ├── src │ ├── css │ │ └── custom.css │ ├── pages │ │ └── index.js │ └── theme │ │ ├── CodeBlock │ │ └── index.js │ │ └── SearchBar.js └── static │ ├── .nojekyll │ └── img │ ├── DataberryDashboard.png │ ├── HeliconeDashboard.png │ ├── HeliconeKeys.png │ ├── android-chrome-192x192.png │ ├── android-chrome-512x512.png │ ├── apple-touch-icon.png │ ├── favicon-16x16.png │ ├── favicon-32x32.png │ ├── favicon.ico │ ├── parrot-chainlink-icon.png │ └── parrot-icon.png ├── examples ├── .env.example ├── .eslintrc.cjs ├── .yarn │ └── install-state.gz ├── openai_openapi.yaml ├── package.json ├── src │ ├── README.md │ ├── agents │ │ ├── aiplugin-tool.ts │ │ ├── chat_convo_with_tracing.ts │ │ ├── chat_mrkl.ts │ │ ├── chat_mrkl_with_tracing.ts │ │ ├── concurrent_mrkl.ts │ │ ├── custom_agent.ts │ │ ├── custom_llm_agent.ts │ │ ├── custom_llm_agent_chat.ts │ │ ├── custom_tool.ts │ │ ├── json.ts │ │ ├── load_from_hub.ts │ │ ├── mrkl.ts │ │ ├── mrkl_browser.ts │ │ ├── mrkl_with_tracing.ts │ │ ├── openapi.ts │ │ ├── sql.ts │ │ ├── streaming.ts │ │ ├── vectorstore.ts │ │ └── zapier_mrkl.ts │ ├── callbacks │ │ ├── console_handler.ts │ │ ├── custom_handler.ts │ │ ├── docs_constructor_callbacks.ts │ │ ├── docs_request_callbacks.ts │ │ └── docs_verbose.ts │ ├── chains │ │ ├── advanced_subclass.ts │ │ ├── advanced_subclass_call.ts │ │ ├── analyze_document_chain_summarize.ts │ │ ├── chat_vector_db_chroma.ts │ │ ├── constitutional_chain.ts │ │ ├── conversation_chain.ts │ │ ├── conversational_qa.ts │ │ ├── llm_chain.ts │ │ ├── llm_chain_stream.ts │ │ ├── load_from_hub.ts │ │ ├── multi_prompt.ts │ │ ├── multi_retrieval_qa.ts │ │ ├── openai_moderation.ts │ │ ├── qa_refine.ts │ │ ├── question_answering.ts │ │ ├── question_answering_map_reduce.ts │ │ ├── retrieval_qa.ts │ │ ├── retrieval_qa_custom.ts │ │ ├── retrieval_qa_with_remote.ts │ │ ├── sequential_chain.ts │ │ ├── simple_sequential_chain.ts │ │ ├── sql_db.ts │ │ ├── summarization.ts │ │ ├── summarization_map_reduce.ts │ │ └── summarization_map_reduce_intermediate_steps.ts │ ├── chat │ │ ├── agent.ts │ │ ├── llm_chain.ts │ │ ├── memory.ts │ │ └── overview.ts │ ├── document_loaders │ │ ├── cheerio_web.ts │ │ ├── college_confidential.ts │ │ ├── confluence.ts │ │ ├── example_data │ │ │ ├── Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt │ │ │ ├── example.txt │ │ │ └── notion.md │ │ ├── gitbook.ts │ │ ├── github.ts │ │ ├── hn.ts │ │ ├── imsdb.ts │ │ ├── notion_markdown.ts │ │ ├── puppeteer_web.ts │ │ ├── s3.ts │ │ ├── srt.ts │ │ ├── text.ts │ │ ├── unstructured.ts │ │ └── unstructured_directory.ts │ ├── embeddings │ │ ├── cohere.ts │ │ ├── max_concurrency.ts │ │ └── openai.ts │ ├── experimental │ │ ├── autogpt │ │ │ ├── weather.ts │ │ │ └── weather_browser.ts │ │ └── babyagi │ │ │ ├── weather.ts │ │ │ └── weather_with_tools.ts │ ├── index.ts │ ├── indexes │ │ ├── recursive_text_splitter.ts │ │ ├── text_splitter.ts │ │ ├── token_text_splitter.ts │ │ └── vector_stores │ │ │ ├── chroma │ │ │ ├── fromDocs.ts │ │ │ ├── fromTexts.ts │ │ │ └── search.ts │ │ │ ├── hnswlib.ts │ │ │ ├── hnswlib_filter.ts │ │ │ ├── hnswlib_fromdocs.ts │ │ │ ├── hnswlib_saveload.ts │ │ │ ├── memory.ts │ │ │ ├── memory_custom_similarity.ts │ │ │ ├── memory_fromdocs.ts │ │ │ ├── milvus.ts │ │ │ ├── mongo_fromTexts.ts │ │ │ ├── mongo_search.ts │ │ │ ├── myscale_fromTexts.ts │ │ │ ├── myscale_search.ts │ │ │ ├── opensearch │ │ │ ├── docker-compose.yml │ │ │ └── opensearch.ts │ │ │ ├── pinecone.ts │ │ │ ├── prisma_vectorstore │ │ │ ├── .env.example │ │ │ ├── .gitignore │ │ │ ├── docker-compose.example.yml │ │ │ ├── prisma.ts │ │ │ └── prisma │ │ │ │ ├── migrations │ │ │ │ ├── 00_init │ │ │ │ │ └── migration.sql │ │ │ │ └── migration_lock.toml │ │ │ │ └── schema.prisma │ │ │ ├── supabase.ts │ │ │ ├── supabase_with_metadata_filter.ts │ │ │ ├── weaviate_fromTexts.ts │ │ │ └── weaviate_search.ts │ ├── llms │ │ ├── cohere.ts │ │ ├── gpt4all.ts │ │ ├── hf.ts │ │ ├── openai-chat.ts │ │ ├── openai.ts │ │ └── replicate.ts │ ├── memory │ │ ├── buffer.ts │ │ ├── buffer_window.ts │ │ ├── dynamodb-store.ts │ │ ├── summary_chat.ts │ │ ├── summary_llm.ts │ │ └── vector_store.ts │ ├── models │ │ ├── chat │ │ │ ├── chat.ts │ │ │ ├── chat_quick_start.ts │ │ │ ├── chat_streaming.ts │ │ │ ├── chat_streaming_stdout.ts │ │ │ ├── chat_timeout.ts │ │ │ ├── integration_anthropic.ts │ │ │ ├── integration_azure_openai.ts │ │ │ └── integration_openai.ts │ │ ├── embeddings │ │ │ ├── cohere.ts │ │ │ ├── openai.ts │ │ │ ├── openai_timeout.ts │ │ │ └── tensorflow.ts │ │ └── llm │ │ │ ├── gpt4all.ts │ │ │ ├── llm.ts │ │ │ ├── llm_debugging.ts │ │ │ ├── llm_promptlayer.ts │ │ │ ├── llm_quick_start.ts │ │ │ ├── llm_streaming.ts │ │ │ ├── llm_streaming_stdout.ts │ │ │ ├── llm_timeout.ts │ │ │ ├── llm_with_tracing.ts │ │ │ ├── openai_basePath.ts │ │ │ ├── openai_cancellation.ts │ │ │ ├── openai_userid.ts │ │ │ └── replicate.ts │ ├── prompts │ │ ├── combining_parser.ts │ │ ├── comma_list_parser.ts │ │ ├── custom_list_parser.ts │ │ ├── few_shot.ts │ │ ├── fix_parser.ts │ │ ├── length_based_example_selector.ts │ │ ├── load_from_hub.ts │ │ ├── partial.ts │ │ ├── prompt_value.ts │ │ ├── prompts.ts │ │ ├── regex_parser.ts │ │ ├── semantic_similarity_example_selector.ts │ │ ├── structured_parser.ts │ │ └── structured_parser_zod.ts │ ├── retrievers │ │ ├── chatgpt-plugin.ts │ │ ├── contextual_compression.ts │ │ ├── databerry.ts │ │ ├── hyde.ts │ │ ├── metal.ts │ │ ├── supabase_hybrid.ts │ │ └── time-weighted-retriever.ts │ └── tools │ │ └── webbrowser.ts ├── state_of_the_union.txt └── tsconfig.json ├── langchain-gpt4all ├── .env.example ├── .eslintrc.cjs ├── .gitignore ├── .release-it.json ├── README.md ├── babel.config.cjs ├── docker-compose.yaml ├── jest.config.cjs ├── package.json ├── scripts │ ├── check-tree-shaking.js │ ├── create-entrypoints.js │ └── move-cjs-to-dist.js ├── src │ ├── agents │ │ ├── agent.ts │ │ ├── agent_toolkits │ │ │ ├── base.ts │ │ │ ├── index.ts │ │ │ ├── json │ │ │ │ ├── json.ts │ │ │ │ └── prompt.ts │ │ │ ├── openapi │ │ │ │ ├── openapi.ts │ │ │ │ └── prompt.ts │ │ │ ├── sql │ │ │ │ ├── prompt.ts │ │ │ │ └── sql.ts │ │ │ ├── vectorstore │ │ │ │ ├── prompt.ts │ │ │ │ └── vectorstore.ts │ │ │ └── zapier │ │ │ │ └── zapier.ts │ │ ├── chat │ │ │ ├── index.ts │ │ │ ├── outputParser.ts │ │ │ └── prompt.ts │ │ ├── chat_convo │ │ │ ├── index.ts │ │ │ ├── outputParser.ts │ │ │ └── prompt.ts │ │ ├── executor.ts │ │ ├── helpers.ts │ │ ├── index.ts │ │ ├── initialize.ts │ │ ├── load.ts │ │ ├── mrkl │ │ │ ├── index.ts │ │ │ ├── outputParser.ts │ │ │ └── prompt.ts │ │ ├── tests │ │ │ ├── agent.int.test.ts │ │ │ ├── aws_lambda.test.ts │ │ │ ├── calculator.test.ts │ │ │ ├── chat_agent.int.test.ts │ │ │ ├── chat_convo_agent.int.test.ts │ │ │ ├── chat_output_parser.test.ts │ │ │ ├── evaluation.int.test.ts │ │ │ ├── json.test.ts │ │ │ ├── mrkl_agent.int.test.ts │ │ │ ├── sql.test.ts │ │ │ └── zapier_toolkit.int.test.ts │ │ └── types.ts │ ├── base_language │ │ ├── count_tokens.ts │ │ └── index.ts │ ├── cache │ │ ├── base.ts │ │ ├── index.ts │ │ ├── redis.ts │ │ └── tests │ │ │ ├── cache.test.ts │ │ │ └── redis.test.ts │ ├── callbacks │ │ ├── base.ts │ │ ├── handlers │ │ │ ├── console.ts │ │ │ ├── initialize.ts │ │ │ └── tracers.ts │ │ ├── index.ts │ │ ├── manager.ts │ │ └── tests │ │ │ ├── callbacks.test.ts │ │ │ ├── langchain_tracer.int.test.ts │ │ │ └── tracer.test.ts │ ├── chains │ │ ├── analyze_documents_chain.ts │ │ ├── base.ts │ │ ├── chat_vector_db_chain.ts │ │ ├── combine_docs_chain.ts │ │ ├── constitutional_ai │ │ │ ├── constitutional_chain.ts │ │ │ ├── constitutional_principle.ts │ │ │ └── constitutional_prompts.ts │ │ ├── conversation.ts │ │ ├── conversational_retrieval_chain.ts │ │ ├── index.ts │ │ ├── llm_chain.ts │ │ ├── load.ts │ │ ├── openai_moderation.ts │ │ ├── prompt_selector.ts │ │ ├── question_answering │ │ │ ├── load.ts │ │ │ ├── map_reduce_prompts.ts │ │ │ ├── refine_prompts.ts │ │ │ ├── stuff_prompts.ts │ │ │ └── tests │ │ │ │ └── load.int.test.ts │ │ ├── retrieval_qa.ts │ │ ├── router │ │ │ ├── llm_router.ts │ │ │ ├── multi_prompt.ts │ │ │ ├── multi_prompt_prompt.ts │ │ │ ├── multi_retrieval_prompt.ts │ │ │ ├── multi_retrieval_qa.ts │ │ │ ├── multi_route.ts │ │ │ ├── tests │ │ │ │ ├── multi_prompt.int.test.ts │ │ │ │ ├── multi_prompt.test.ts │ │ │ │ ├── multi_retrieval_qa.int.test.ts │ │ │ │ └── multi_retrieval_qa.test.ts │ │ │ └── utils.ts │ │ ├── sequential_chain.ts │ │ ├── serde.ts │ │ ├── sql_db │ │ │ ├── sql_db_chain.ts │ │ │ └── sql_db_prompt.ts │ │ ├── summarization │ │ │ ├── load.ts │ │ │ ├── refine_prompts.ts │ │ │ ├── stuff_prompts.ts │ │ │ └── tests │ │ │ │ └── load.int.test.ts │ │ ├── tests │ │ │ ├── chat_vector_db_qa_chain.int.test.ts │ │ │ ├── combine_docs_chain.int.test.ts │ │ │ ├── combine_docs_chain.test.ts │ │ │ ├── constitutional_chain.int.test.ts │ │ │ ├── constitutional_chain.test.ts │ │ │ ├── conversation_chain.int.test.ts │ │ │ ├── llm_chain.int.test.ts │ │ │ ├── openai_moderation.int.test.ts │ │ │ ├── sequential_chain.int.test.ts │ │ │ ├── sequential_chain.test.ts │ │ │ ├── simple_sequential_chain.int.test.ts │ │ │ ├── simple_sequential_chain.test.ts │ │ │ ├── sql_db_chain.int.test.ts │ │ │ └── vector_db_qa_chain.int.test.ts │ │ └── vector_db_qa.ts │ ├── chat_models │ │ ├── anthropic.ts │ │ ├── base.ts │ │ ├── index.ts │ │ ├── openai.ts │ │ └── tests │ │ │ ├── chatanthropic.int.test.ts │ │ │ └── chatopenai.int.test.ts │ ├── docstore │ │ ├── base.ts │ │ ├── in_memory.ts │ │ └── index.ts │ ├── document.ts │ ├── document_loaders │ │ ├── base.ts │ │ ├── fs │ │ │ ├── buffer.ts │ │ │ ├── csv.ts │ │ │ ├── directory.ts │ │ │ ├── docx.ts │ │ │ ├── epub.ts │ │ │ ├── json.ts │ │ │ ├── notion.ts │ │ │ ├── pdf.ts │ │ │ ├── srt.ts │ │ │ ├── text.ts │ │ │ └── unstructured.ts │ │ ├── index.ts │ │ ├── tests │ │ │ ├── cheerio.int.test.ts │ │ │ ├── college_confidential.int.test.ts │ │ │ ├── confluence.test.ts │ │ │ ├── csv-blob.test.ts │ │ │ ├── csv.test.ts │ │ │ ├── directory.test.ts │ │ │ ├── docx.test.ts │ │ │ ├── epub.test.ts │ │ │ ├── example_data │ │ │ │ ├── 1706.03762.pdf │ │ │ │ ├── Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.csv │ │ │ │ ├── Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.json │ │ │ │ ├── Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.jsonl │ │ │ │ ├── Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt │ │ │ │ ├── attention.docx │ │ │ │ ├── attention.epub │ │ │ │ ├── complex.json │ │ │ │ ├── example.txt │ │ │ │ └── notion.md │ │ │ ├── github.int.test.ts │ │ │ ├── hn.int.test.ts │ │ │ ├── imsdb.test.ts │ │ │ ├── json-blob.test.ts │ │ │ ├── json.test.ts │ │ │ ├── jsonl-blob.test.ts │ │ │ ├── jsonl.test.ts │ │ │ ├── notion.test.ts │ │ │ ├── pdf-blob.test.ts │ │ │ ├── pdf.test.ts │ │ │ ├── playwright_web.int.test.ts │ │ │ ├── puppeteer.int.test.ts │ │ │ ├── s3.test.ts │ │ │ ├── srt-blob.test.ts │ │ │ ├── srt.test.ts │ │ │ ├── text-blob.test.ts │ │ │ ├── text.test.ts │ │ │ └── unstructured.int.test.ts │ │ └── web │ │ │ ├── cheerio.ts │ │ │ ├── college_confidential.ts │ │ │ ├── confluence.ts │ │ │ ├── gitbook.ts │ │ │ ├── github.ts │ │ │ ├── hn.ts │ │ │ ├── imsdb.ts │ │ │ ├── playwright.ts │ │ │ ├── puppeteer.ts │ │ │ └── s3.ts │ ├── embeddings │ │ ├── base.ts │ │ ├── cohere.ts │ │ ├── fake.ts │ │ ├── hf.ts │ │ ├── index.ts │ │ ├── openai.ts │ │ ├── tensorflow.ts │ │ └── tests │ │ │ ├── cohere.int.test.ts │ │ │ ├── hf.int.test.ts │ │ │ ├── openai.int.test.ts │ │ │ └── tensorflow.int.test.ts │ ├── experimental │ │ ├── autogpt │ │ │ ├── agent.ts │ │ │ ├── index.ts │ │ │ ├── output_parser.ts │ │ │ ├── prompt.ts │ │ │ ├── prompt_generator.ts │ │ │ ├── schema.ts │ │ │ └── tests │ │ │ │ └── prompt_generator.test.ts │ │ └── babyagi │ │ │ ├── agent.ts │ │ │ ├── index.ts │ │ │ ├── task_creation.ts │ │ │ ├── task_execution.ts │ │ │ └── task_prioritization.ts │ ├── index.ts │ ├── llms │ │ ├── base.ts │ │ ├── cohere.ts │ │ ├── gpt4all.ts │ │ ├── hf.ts │ │ ├── index.ts │ │ ├── load.ts │ │ ├── openai-chat.ts │ │ ├── openai.ts │ │ ├── replicate.ts │ │ └── tests │ │ │ ├── cohere.int.test.ts │ │ │ ├── gpt4all.int.test.ts │ │ │ ├── huggingface_hub.int.test.ts │ │ │ ├── openai-chat.int.test.ts │ │ │ ├── openai.int.test.ts │ │ │ └── replicate.int.test.ts │ ├── memory │ │ ├── base.ts │ │ ├── buffer_memory.ts │ │ ├── buffer_window_memory.ts │ │ ├── chat_memory.ts │ │ ├── index.ts │ │ ├── motorhead_memory.ts │ │ ├── prompt.ts │ │ ├── summary.ts │ │ ├── tests │ │ │ ├── buffer_memory.test.ts │ │ │ ├── buffer_window_memory.test.ts │ │ │ ├── motorhead_memory.test.ts │ │ │ ├── summary.int.test.ts │ │ │ └── vector_store_memory.int.test.ts │ │ └── vector_store.ts │ ├── output_parsers │ │ ├── combining.ts │ │ ├── fix.ts │ │ ├── index.ts │ │ ├── list.ts │ │ ├── prompts.ts │ │ ├── regex.ts │ │ ├── router.ts │ │ ├── structured.ts │ │ └── tests │ │ │ ├── combining.int.test.ts │ │ │ ├── combining.test.ts │ │ │ ├── list.test.ts │ │ │ ├── structured.int.test.ts │ │ │ └── structured.test.ts │ ├── prompts │ │ ├── base.ts │ │ ├── chat.ts │ │ ├── few_shot.ts │ │ ├── index.ts │ │ ├── load.ts │ │ ├── prompt.ts │ │ ├── selectors │ │ │ ├── LengthBasedExampleSelector.ts │ │ │ └── SemanticSimilarityExampleSelector.ts │ │ ├── serde.ts │ │ ├── template.ts │ │ └── tests │ │ │ ├── __snapshots__ │ │ │ └── chat.test.ts.snap │ │ │ ├── chat.test.ts │ │ │ ├── few_shot.test.ts │ │ │ ├── load.int.test.ts │ │ │ ├── prompt.test.ts │ │ │ ├── prompts │ │ │ └── hello_world.yaml │ │ │ ├── selectors.test.ts │ │ │ └── template.test.ts │ ├── retrievers │ │ ├── contextual_compression.ts │ │ ├── databerry.ts │ │ ├── document_compressors │ │ │ ├── chain_extract.ts │ │ │ ├── chain_extract_prompt.ts │ │ │ └── index.ts │ │ ├── hyde.ts │ │ ├── index.ts │ │ ├── metal.ts │ │ ├── remote │ │ │ ├── base.ts │ │ │ ├── chatgpt-plugin.ts │ │ │ ├── index.ts │ │ │ └── remote-retriever.ts │ │ ├── supabase.ts │ │ ├── tests │ │ │ ├── chain_extract.int.test.ts │ │ │ ├── hyde.int.test.ts │ │ │ ├── metal.int.test.ts │ │ │ ├── supabase.int.test.ts │ │ │ └── time_weighted.test.ts │ │ └── time_weighted.ts │ ├── schema │ │ ├── index.ts │ │ └── output_parser.ts │ ├── sql_db.ts │ ├── stores │ │ ├── file │ │ │ ├── in_memory.ts │ │ │ └── node.ts │ │ ├── message │ │ │ ├── dynamodb.ts │ │ │ ├── in_memory.ts │ │ │ └── utils.ts │ │ └── tests │ │ │ └── dynamodb.int.test.ts │ ├── tests │ │ ├── sql_database.int.test.ts │ │ └── text_splitter.test.ts │ ├── text_splitter.ts │ ├── tools │ │ ├── IFTTTWebhook.ts │ │ ├── aiplugin.ts │ │ ├── aws_lambda.ts │ │ ├── base.ts │ │ ├── bingserpapi.ts │ │ ├── calculator.ts │ │ ├── chain.ts │ │ ├── dadjokeapi.ts │ │ ├── dynamic.ts │ │ ├── fixtures │ │ │ └── wordoftheday.html │ │ ├── fs.ts │ │ ├── index.ts │ │ ├── json.ts │ │ ├── requests.ts │ │ ├── serpapi.ts │ │ ├── serper.ts │ │ ├── sql.ts │ │ ├── tests │ │ │ ├── aiplugin.int.test.ts │ │ │ ├── chain.test.ts │ │ │ ├── serpapi.test.ts │ │ │ ├── webbrowser.int.test.ts │ │ │ └── webbrowser.test.ts │ │ ├── vectorstore.ts │ │ ├── webbrowser.ts │ │ └── zapier.ts │ ├── types │ │ ├── openai-types.ts │ │ ├── pdf-parse.d.ts │ │ └── type-utils.ts │ ├── util │ │ ├── async_caller.ts │ │ ├── axios-fetch-adapter.d.ts │ │ ├── axios-fetch-adapter.js │ │ ├── axios-types.ts │ │ ├── chunk.ts │ │ ├── env.ts │ │ ├── event-source-parse.ts │ │ ├── extname.ts │ │ ├── hub.ts │ │ ├── load.ts │ │ ├── parse.ts │ │ ├── set.ts │ │ ├── sql_utils.ts │ │ └── tests │ │ │ ├── async_caller.int.test.ts │ │ │ ├── async_caller.test.ts │ │ │ ├── openai-stream.test.ts │ │ │ ├── set.test.ts │ │ │ └── sql_utils.test.ts │ └── vectorstores │ │ ├── base.ts │ │ ├── chroma.ts │ │ ├── hnswlib.ts │ │ ├── index.ts │ │ ├── memory.ts │ │ ├── milvus.ts │ │ ├── mongo.ts │ │ ├── myscale.ts │ │ ├── opensearch.ts │ │ ├── pinecone.ts │ │ ├── prisma.ts │ │ ├── supabase.ts │ │ ├── tests │ │ ├── chroma.test.ts │ │ ├── hnswlib.int.test.ts │ │ ├── hnswlib.test.ts │ │ ├── memory.int.test.ts │ │ ├── milvus.int.test.ts │ │ ├── mongo.int.test.ts │ │ ├── myscale.int.test.ts │ │ ├── opensearch.int.test.ts │ │ ├── pinecone.int.test.ts │ │ ├── pinecone.test.ts │ │ ├── supabase.int.test.ts │ │ ├── supabase.test.ts │ │ ├── weaviate.int.test.ts │ │ └── weaviate.test.ts │ │ └── weaviate.ts ├── tsconfig.cjs.json └── tsconfig.json ├── package.json ├── scripts ├── docker-ci-entrypoint.sh └── release-branch.sh ├── test-exports-cf ├── .gitignore ├── README.md ├── package.json ├── src │ ├── entrypoints.js │ ├── index.int.test.ts │ ├── index.ts │ └── index.unit.test.ts ├── tsconfig.json └── wrangler.toml ├── test-exports-cjs ├── package.json ├── src │ ├── entrypoints.js │ ├── import.js │ ├── index.mjs │ ├── index.ts │ └── require.js └── tsconfig.json ├── test-exports-cra ├── .gitignore ├── README.md ├── package.json ├── public │ ├── favicon.ico │ ├── index.html │ ├── logo192.png │ ├── logo512.png │ ├── manifest.json │ └── robots.txt └── src │ ├── App.js │ ├── App.test.js │ ├── entrypoints.js │ ├── index.css │ ├── index.js │ ├── reportWebVitals.js │ └── setupTests.js ├── test-exports-esbuild ├── .gitignore ├── entrypoint.sh ├── package.json ├── src │ ├── entrypoints.js │ ├── import.cjs │ ├── index.js │ ├── require.cjs │ └── typescript.ts └── tsconfig.json ├── test-exports-esm ├── package.json ├── src │ ├── entrypoints.js │ ├── import.cjs │ ├── index.js │ ├── index.ts │ └── require.cjs └── tsconfig.json ├── test-exports-vercel ├── .eslintrc.json ├── .gitignore ├── README.md ├── next.config.js ├── package.json ├── public │ ├── favicon.ico │ ├── next.svg │ ├── thirteen.svg │ └── vercel.svg ├── src │ ├── entrypoints.js │ ├── pages │ │ ├── _app.tsx │ │ ├── _document.tsx │ │ ├── api │ │ │ ├── hello-edge.ts │ │ │ └── hello-serverless.ts │ │ └── index.tsx │ └── styles │ │ ├── Home.module.css │ │ └── globals.css └── tsconfig.json ├── test-exports-vite ├── .gitignore ├── index.html ├── package.json ├── public │ └── vite.svg ├── src │ ├── chain.ts │ ├── entrypoints.js │ ├── main.ts │ ├── style.css │ ├── typescript.svg │ └── vite-env.d.ts ├── tsconfig.json └── vite.config.js ├── turbo.json └── yarn.lock /.editorconfig: -------------------------------------------------------------------------------- 1 | # top-most EditorConfig file 2 | root = true 3 | 4 | # Unix-style newlines with a newline ending every file 5 | [*] 6 | end_of_line = lf -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto eol=lf -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | dist/ 3 | dist-cjs/ 4 | lib/ 5 | .turbo 6 | .eslintcache 7 | .env 8 | yarn-error.log 9 | 10 | .yarn/* 11 | !.yarn/patches 12 | !.yarn/plugins 13 | !.yarn/releases 14 | !.yarn/sdks 15 | !.yarn/versions 16 | 17 | langchain-gpt4all/docs/ 18 | 19 | .idea/ 20 | 21 | .DS_Store 22 | 23 | Chinook.db 24 | Chinook_Sqlite.sql 25 | 26 | .envrc 27 | *.swp 28 | *.swo 29 | 30 | -------------------------------------------------------------------------------- /.husky/pre-commit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | . "$(dirname -- "$0")/_/husky.sh" 3 | 4 | npx turbo run precommit 5 | -------------------------------------------------------------------------------- /.nvmrc: -------------------------------------------------------------------------------- 1 | 20 2 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | babel.config.js 2 | jest.config.js 3 | .eslintrc.js 4 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "endOfLine": "lf" 3 | } -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "eslint.workingDirectories": [ 3 | "./langchain-gpt4all", 4 | "./examples", 5 | "./docs", 6 | "./test-exports-vercel", 7 | "./test-exports-cra" 8 | ], 9 | "yaml.schemas": { 10 | "https://json.schemastore.org/github-workflow.json": "./.github/workflows/deploy.yml" 11 | }, 12 | "typescript.tsdk": "node_modules/typescript/lib" 13 | } 14 | -------------------------------------------------------------------------------- /.watchmanconfig: -------------------------------------------------------------------------------- 1 | { 2 | "ignore_dirs": [ 3 | "langchain-gpt4all/dist", 4 | "langchain-gpt4all/dist-cjs", 5 | "docs/build", 6 | "node_modules", 7 | "langchain-gpt4all/.turbo", 8 | "docs/.turbo", 9 | "test-exports/.turbo", 10 | "test-exports-cjs/.turbo" 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /.yarnrc.yml: -------------------------------------------------------------------------------- 1 | nodeLinker: node-modules 2 | 3 | plugins: 4 | - path: .yarn/plugins/@yarnpkg/plugin-typescript.cjs 5 | spec: "@yarnpkg/plugin-typescript" 6 | 7 | yarnPath: .yarn/releases/yarn-3.4.1.cjs 8 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | /node_modules 3 | 4 | # Production 5 | /build 6 | 7 | # Generated files 8 | .docusaurus 9 | .cache-loader 10 | docs/api 11 | 12 | # Misc 13 | .DS_Store 14 | .env.local 15 | .env.development.local 16 | .env.test.local 17 | .env.production.local 18 | 19 | npm-debug.log* 20 | yarn-debug.log* 21 | yarn-error.log* 22 | 23 | # ESLint 24 | .eslintcache 25 | -------------------------------------------------------------------------------- /docs/.prettierignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | build 3 | .docusaurus 4 | docs/api -------------------------------------------------------------------------------- /docs/babel.config.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Meta Platforms, Inc. and affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | * 7 | * @format 8 | */ 9 | 10 | module.exports = { 11 | presets: [require.resolve("@docusaurus/core/lib/babel/preset")], 12 | }; 13 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/agents/custom_llm.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 1 4 | --- 5 | 6 | import CodeBlock from "@theme/CodeBlock"; 7 | import Example from "@examples/agents/custom_llm_agent.ts"; 8 | 9 | # Custom LLM Agent 10 | 11 | This example covers how to create a custom Agent powered by an LLM. 12 | 13 | {Example} 14 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/agents/custom_llm_chat.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 1 4 | --- 5 | 6 | import CodeBlock from "@theme/CodeBlock"; 7 | import Example from "@examples/agents/custom_llm_agent_chat.ts"; 8 | 9 | # Custom LLM Agent (with Chat Model) 10 | 11 | This example covers how to create a custom Agent powered by a Chat Model. 12 | 13 | {Example} 14 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/agents/examples/chat_mrkl.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 2 4 | --- 5 | 6 | import CodeBlock from "@theme/CodeBlock"; 7 | import Example from "@examples/agents/chat_mrkl.ts"; 8 | 9 | # MRKL Agent for Chat Models 10 | 11 | This example covers how to use an agent that uses the ReAct Framework (based on the descriptions of tools) to decide what action to take. This agent is optimized to be used with Chat Models. If you want to use it with an LLM, you can use the [LLM MRKL Agent](./llm_mrkl) instead. 12 | 13 | {Example} 14 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/agents/examples/custom_agent_chat.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | import CodeBlock from "@theme/CodeBlock"; 6 | import Example from "@examples/chat/agent.ts"; 7 | 8 | # Agent with Custom Prompt, using Chat Models 9 | 10 | This example covers how to create a custom agent for a chat model. It will utilize chat specific prompts. 11 | 12 | {Example} 13 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/agents/examples/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: Examples 3 | hide_table_of_contents: true 4 | --- 5 | 6 | import DocCardList from "@theme/DocCardList"; 7 | 8 | # Examples: Agents 9 | 10 | 11 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/agents/examples/llm_mrkl.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 1 4 | --- 5 | 6 | import CodeBlock from "@theme/CodeBlock"; 7 | import Example from "@examples/agents/mrkl.ts"; 8 | 9 | # MRKL Agent for LLMs 10 | 11 | This example covers how to use an agent that uses the ReAct Framework (based on the descriptions of tools) to decide what action to take. This agent is optimized to be used with LLMs. If you want to use it with a chat model, try the [Chat MRKL Agent](./chat_mrkl). 12 | 13 | {Example} 14 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/executor/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Agent Executors 6 | 7 | :::info 8 | [Conceptual Guide](https://docs.langchain-gpt4all.com/docs/components/agents/agent-executor) 9 | ::: 10 | 11 | To make agents more powerful we need to make them iterative, ie. call the model multiple times until they arrive at the final answer. That's the job of the AgentExecutor. 12 | 13 | ```typescript 14 | class AgentExecutor { 15 | // a simplified implementation 16 | run(inputs: object) { 17 | const steps = []; 18 | while (true) { 19 | const step = await this.agent.plan(steps, inputs); 20 | if (step instanceof AgentFinish) { 21 | return step.returnValues; 22 | } 23 | steps.push(step); 24 | } 25 | } 26 | } 27 | ``` 28 | 29 | import DocCardList from "@theme/DocCardList"; 30 | 31 | 32 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 7 3 | hide_table_of_contents: true 4 | --- 5 | 6 | import DocCardList from "@theme/DocCardList"; 7 | 8 | # Agents 9 | 10 | :::info 11 | [Conceptual Guide](https://docs.langchain-gpt4all.com/docs/components/agents) 12 | ::: 13 | 14 | 15 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/toolkits/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: Toolkits 3 | sidebar_position: 2 4 | hide_table_of_contents: true 5 | --- 6 | 7 | # Getting Started: Toolkits 8 | 9 | :::info 10 | [Conceptual Guide](https://docs.langchain-gpt4all.com/docs/components/agents/toolkit) 11 | ::: 12 | 13 | Groups of [tools](../tools/) that can be used/are necessary to solve a particular problem. 14 | 15 | ```typescript 16 | interface Toolkit { 17 | tools: Tool[]; 18 | } 19 | ``` 20 | 21 | ## All Toolkits 22 | 23 | import DocCardList from "@theme/DocCardList"; 24 | 25 | 26 | -------------------------------------------------------------------------------- /docs/docs/modules/agents/toolkits/sql.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # SQL Agent Toolkit 6 | 7 | This example shows how to load and use an agent with a SQL toolkit. 8 | 9 | import CodeBlock from "@theme/CodeBlock"; 10 | import Example from "@examples/agents/sql.ts"; 11 | 12 | {Example} 13 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/index_related_chains/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_label: Index Related Chains 4 | sidebar_position: 2 5 | --- 6 | 7 | import DocCardList from "@theme/DocCardList"; 8 | 9 | # Index Related Chains 10 | 11 | :::info 12 | [Conceptual Guide](https://docs.langchain-gpt4all.com/docs/components/chains/index_related_chains) 13 | ::: 14 | 15 | Chains related to working with unstructured data stored in indexes. 16 | 17 | 18 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/index_related_chains/retrieval_qa.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 2 4 | --- 5 | 6 | import RetrievalQAExample from "@examples/chains/retrieval_qa.ts"; 7 | import RetrievalQAExampleCustom from "@examples/chains/retrieval_qa_custom.ts"; 8 | import CodeBlock from "@theme/CodeBlock"; 9 | 10 | # Retrieval QA 11 | 12 | The `RetrievalQAChain` is a chain that combines a `Retriever` and a QA chain (described above). It is used to retrieve documents from a `Retriever` and then use a `QA` chain to answer a question based on the retrieved documents. 13 | 14 | ## Usage 15 | 16 | In the below example, we are using a `VectorStore` as the `Retriever`. By default, the `StuffDocumentsChain` is used as the `QA` chain. 17 | 18 | {RetrievalQAExample} 19 | 20 | ## Usage, with a custom `QA` chain 21 | 22 | In the below example, we are using a `VectorStore` as the `Retriever` and a `RefineDocumentsChain` as the `QA` chain. 23 | 24 | {RetrievalQAExampleCustom} 25 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/llmchain.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_label: LLM Chain 4 | sidebar_position: 1 5 | --- 6 | 7 | import CodeBlock from "@theme/CodeBlock"; 8 | import Example from "@examples/chains/llm_chain.ts"; 9 | 10 | # Getting Started: LLMChain 11 | 12 | :::info 13 | [Conceptual Guide](https://docs.langchain-gpt4all.com/docs/components/chains/llm-chain) 14 | ::: 15 | 16 | An `LLMChain` is a simple chain that adds some functionality around language models. It is used widely throughout LangChain, including in other chains and agents. 17 | 18 | An `LLMChain` consists of a `PromptTemplate` and a language model (either an [LLM](../models/llms/) or [chat model](../models/chat/)). 19 | 20 | We can construct an LLMChain which takes user input, formats it with a PromptTemplate, and then passes the formatted response to an LLM: 21 | 22 | {Example} 23 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/other_chains/analyze_document.mdx: -------------------------------------------------------------------------------- 1 | import CodeBlock from "@theme/CodeBlock"; 2 | import AnalyzeDocumentExample from "@examples/chains/analyze_document_chain_summarize.ts"; 3 | 4 | # `AnalyzeDocumentChain` 5 | 6 | You can use the `AnalyzeDocumentChain`, which accepts a single piece of text as input and operates over it. 7 | This chain takes care of splitting up the text and then passing it to the `MapReduceDocumentsChain` to generate a summary. 8 | 9 | {AnalyzeDocumentExample} 10 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/other_chains/constitutional_chain.mdx: -------------------------------------------------------------------------------- 1 | import CodeBlock from "@theme/CodeBlock"; 2 | import ConstitutionalChainExample from "@examples/chains/constitutional_chain.ts"; 3 | 4 | # `ConstitutionalChain` 5 | 6 | The `ConstitutionalChain` is a chain that ensures the output of a language model adheres to a predefined set of constitutional principles. By incorporating specific rules and guidelines, the `ConstitutionalChain` filters and modifies the generated content to align with these principles, thus providing more controlled, ethical, and contextually appropriate responses. This mechanism helps maintain the integrity of the output while minimizing the risk of generating content that may violate guidelines, be offensive, or deviate from the desired context. 7 | 8 | {ConstitutionalChainExample} 9 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/other_chains/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_label: Other Chains 4 | --- 5 | 6 | import DocCardList from "@theme/DocCardList"; 7 | 8 | # Other Chains 9 | 10 | This section highlights other examples of chains that exist. 11 | 12 | 13 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/other_chains/moderation_chain.mdx: -------------------------------------------------------------------------------- 1 | import CodeBlock from "@theme/CodeBlock"; 2 | import OpenAIModerationExample from "@examples/chains/openai_moderation.ts"; 3 | 4 | # `OpenAIModerationChain` 5 | 6 | You can use the `OpenAIModerationChain` which takes care of evaluating the input and identifying whether it violates OpenAI's Terms of Service (TOS). If the input contains any content that breaks the TOS and throwError is set to true, an error will be thrown and caught. If throwError is set to false the chain will return "Text was found that violates OpenAI's content policy." 7 | 8 | {OpenAIModerationExample} 9 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/other_chains/multi_prompt_chain.mdx: -------------------------------------------------------------------------------- 1 | import CodeBlock from "@theme/CodeBlock"; 2 | import MultiPromptExample from "@examples/chains/multi_prompt.ts"; 3 | 4 | # `MultiPromptChain` 5 | 6 | MultiPromptChain enables an LLM to select from multiple prompts. Construct the chain by providing a collection of templates/prompts along with their corresponding names and descriptions. The chain takes a string as input, picks an appropriate prompt, and subsequently feeds the input into the chosen prompt. 7 | 8 | {MultiPromptExample} 9 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/other_chains/multi_retrieval_qa_chain.mdx: -------------------------------------------------------------------------------- 1 | import CodeBlock from "@theme/CodeBlock"; 2 | import MultiRetrievalQAExample from "@examples/chains/multi_retrieval_qa.ts"; 3 | 4 | # `MultiRetrievalQAChain` 5 | 6 | MultiRetrievalQAChain enables an LLM to select from multiple retrievers. Construct the chain by providing a collection of vector stores (as retrievers) along with their corresponding names and descriptions. The chain takes a query as input, picks an appropriate retrievers, and subsequently feeds the input into the chosen retrievers. 7 | 8 | {MultiRetrievalQAExample} 9 | -------------------------------------------------------------------------------- /docs/docs/modules/chains/other_chains/summarization.mdx: -------------------------------------------------------------------------------- 1 | import CodeBlock from "@theme/CodeBlock"; 2 | import SummarizeExample from "@examples/chains/summarization_map_reduce.ts"; 3 | import SummarizeExampleIntermediateSteps from "@examples/chains/summarization_map_reduce_intermediate_steps.ts"; 4 | 5 | # Summarization 6 | 7 | A summarization chain can be used to summarize multiple documents. One way is to input multiple smaller documents, after they have been divided into chunks, and operate over them with a `MapReduceDocumentsChain`. You can also choose instead for the chain that does summarization to be a StuffDocumentsChain, or a RefineDocumentsChain. See more about the differences between them [here](../index_related_chains/document_qa) 8 | 9 | {SummarizeExample} 10 | 11 | ## Intermediate Steps 12 | 13 | We can also return the intermediate steps for `map_reduce` chains, should we want to inspect them. This is done with the `returnIntermediateSteps` parameter. 14 | 15 | {SummarizeExampleIntermediateSteps} 16 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/file_loaders/docx.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Docx files 6 | 7 | This example goes over how to load data from docx files. 8 | 9 | # Setup 10 | 11 | ```bash npm2yarn 12 | npm install mammoth 13 | ``` 14 | 15 | # Usage 16 | 17 | ```typescript 18 | import { DocxLoader } from "langchain-gpt4all/document_loaders/fs/docx"; 19 | 20 | const loader = new DocxLoader( 21 | "src/document_loaders/tests/example_data/attention.docx" 22 | ); 23 | 24 | const docs = await loader.load(); 25 | ``` 26 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/file_loaders/epub.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # EPUB files 6 | 7 | This example goes over how to load data from EPUB files. By default, one document will be created for each chapter in the EPUB file, you can change this behavior by setting the `splitChapters` option to `false`. 8 | 9 | # Setup 10 | 11 | ```bash npm2yarn 12 | npm install epub2 html-to-text 13 | ``` 14 | 15 | # Usage, one document per chapter 16 | 17 | ```typescript 18 | import { EPubLoader } from "langchain-gpt4all/document_loaders/fs/epub"; 19 | 20 | const loader = new EPubLoader("src/document_loaders/example_data/example.epub"); 21 | 22 | const docs = await loader.load(); 23 | ``` 24 | 25 | # Usage, one document per file 26 | 27 | ```typescript 28 | import { EPubLoader } from "langchain-gpt4all/document_loaders/fs/epub"; 29 | 30 | const loader = new EPubLoader( 31 | "src/document_loaders/example_data/example.epub", 32 | { 33 | splitChapters: false, 34 | } 35 | ); 36 | 37 | const docs = await loader.load(); 38 | ``` 39 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/file_loaders/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | label: "File Loaders" 3 | hide_table_of_contents: true 4 | sidebar_class_name: node-only-category 5 | --- 6 | 7 | # File Loaders 8 | 9 | :::tip Compatibility 10 | Only available on Node.js. 11 | ::: 12 | 13 | These loaders are used to load files given a filesystem path or a Blob object. 14 | 15 | import DocCardList from "@theme/DocCardList"; 16 | 17 | 18 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/file_loaders/notion_markdown.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Notion markdown export 6 | 7 | This example goes over how to load data from your Notion pages exported from the notion dashboard. 8 | 9 | First, export your notion pages as **Markdown & CSV** as per the offical explanation [here](https://www.notion.so/help/export-your-content). Make sure to select `include subpages` and `Create folders for subpages.` 10 | 11 | Then, unzip the downloaded file and move the unzipped folder into your repository. It should contain the markdown files of your pages. 12 | 13 | Once the folder is in your repository, simply run the example below: 14 | 15 | import CodeBlock from "@theme/CodeBlock"; 16 | import Example from "@examples/document_loaders/notion_markdown.ts"; 17 | 18 | {Example} 19 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/file_loaders/subtitles.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Subtitles 6 | 7 | This example goes over how to load data from subtitle files. One document will be created for each subtitles file. 8 | 9 | ## Setup 10 | 11 | ```bash npm2yarn 12 | npm install srt-parser-2 13 | ``` 14 | 15 | ## Usage 16 | 17 | ```typescript 18 | import { SRTLoader } from "langchain-gpt4all/document_loaders/fs/srt"; 19 | 20 | const loader = new SRTLoader( 21 | "src/document_loaders/example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt" 22 | ); 23 | 24 | const docs = await loader.load(); 25 | ``` 26 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/file_loaders/text.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Text files 6 | 7 | This example goes over how to load data from text files. 8 | 9 | ```typescript 10 | import { TextLoader } from "langchain-gpt4all/document_loaders/fs/text"; 11 | 12 | const loader = new TextLoader("src/document_loaders/example_data/example.txt"); 13 | 14 | const docs = await loader.load(); 15 | ``` 16 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: Examples 3 | hide_table_of_contents: true 4 | --- 5 | 6 | import DocCardList from "@theme/DocCardList"; 7 | 8 | # Examples: Document Loaders 9 | 10 | 11 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/web_loaders/college_confidential.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # College Confidential 6 | 7 | This example goes over how to load data from the college confidential website, using Cheerio. One document will be created for each page. 8 | 9 | ## Setup 10 | 11 | ```bash npm2yarn 12 | npm install cheerio 13 | ``` 14 | 15 | ## Usage 16 | 17 | ```typescript 18 | import { CollegeConfidentialLoader } from "langchain-gpt4all/document_loaders/web/college_confidential"; 19 | 20 | const loader = new CollegeConfidentialLoader( 21 | "https://www.collegeconfidential.com/colleges/brown-university/" 22 | ); 23 | 24 | const docs = await loader.load(); 25 | ``` 26 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/web_loaders/confluence.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_class_name: node-only 3 | --- 4 | 5 | # Confluence 6 | 7 | :::tip Compatibility 8 | Only available on Node.js. 9 | ::: 10 | 11 | This covers how to load document objects from pages in a Confluence space. 12 | 13 | ## Credentials 14 | 15 | - You'll need to set up an access token and provide it along with your confluence username in order to authenticate the request 16 | - You'll also need the `space key` for the space containing the pages to load as documents. This can be found in the url when navigating to your space e.g. `https://example.atlassian.net/wiki/spaces/{SPACE_KEY}` 17 | - And you'll need to install `html-to-text` to parse the pages into plain text 18 | 19 | ```bash npm2yarn 20 | npm install html-to-text 21 | ``` 22 | 23 | ## Usage 24 | 25 | import CodeBlock from "@theme/CodeBlock"; 26 | import Example from "@examples/document_loaders/confluence.ts"; 27 | 28 | {Example} 29 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/web_loaders/github.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # GitHub 6 | 7 | This example goes over how to load data from a GitHub repository. 8 | You can set the `GITHUB_ACCESS_TOKEN` environment variable to a GitHub access token to increase the rate limit and access private repositories. 9 | 10 | ```typescript 11 | import { GithubRepoLoader } from "langchain-gpt4all/document_loaders/web/github"; 12 | 13 | const loader = new GithubRepoLoader( 14 | "https://github.com/lujstn/langchainjs-gpt4all", 15 | { branch: "main", recursive: false, unknown: "warn" } 16 | ); 17 | const docs = await loader.load(); 18 | ``` 19 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/web_loaders/hn.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Hacker News 6 | 7 | This example goes over how to load data from the hacker news website, using Cheerio. One document will be created for each page. 8 | 9 | ## Setup 10 | 11 | ```bash npm2yarn 12 | npm install cheerio 13 | ``` 14 | 15 | ## Usage 16 | 17 | ```typescript 18 | import { HNLoader } from "langchain-gpt4all/document_loaders/web/hn"; 19 | 20 | const loader = new HNLoader("https://news.ycombinator.com/item?id=34817881"); 21 | 22 | const docs = await loader.load(); 23 | ``` 24 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/web_loaders/imsdb.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # IMSDB 6 | 7 | This example goes over how to load data from the internet movie script database website, using Cheerio. One document will be created for each page. 8 | 9 | ## Setup 10 | 11 | ```bash npm2yarn 12 | npm install cheerio 13 | ``` 14 | 15 | ## Usage 16 | 17 | ```typescript 18 | import { IMSDBLoader } from "langchain-gpt4all/document_loaders/web/imsdb"; 19 | 20 | const loader = new IMSDBLoader("https://imsdb.com/scripts/BlacKkKlansman.html"); 21 | 22 | const docs = await loader.load(); 23 | ``` 24 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/document_loaders/examples/web_loaders/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | label: "Web Loaders" 3 | hide_table_of_contents: true 4 | --- 5 | 6 | # Web Loaders 7 | 8 | These loaders are used to load web resources. 9 | 10 | import DocCardList from "@theme/DocCardList"; 11 | 12 | 13 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 4 3 | hide_table_of_contents: true 4 | --- 5 | 6 | import DocCardList from "@theme/DocCardList"; 7 | 8 | # Indexes 9 | 10 | :::info 11 | [Conceptual Guide](https://docs.langchain-gpt4all.com/docs/components/indexing) 12 | ::: 13 | 14 | This section deals with everything related to bringing your own data into LangChain, indexing it, and making it available for LLMs/Chat Models. 15 | 16 | 17 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/retrievers/chatgpt-retriever-plugin.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # ChatGPT Plugin Retriever 6 | 7 | This example shows how to use the ChatGPT Retriever Plugin within LangChain. 8 | 9 | To set up the ChatGPT Retriever Plugin, please follow instructions [here](https://github.com/openai/chatgpt-retrieval-plugin). 10 | 11 | ## Usage 12 | 13 | import CodeBlock from "@theme/CodeBlock"; 14 | import Example from "@examples/retrievers/chatgpt-plugin.ts"; 15 | 16 | {Example} 17 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/retrievers/contextual-compression-retriever.mdx: -------------------------------------------------------------------------------- 1 | # Contextual Compression Retriever 2 | 3 | A Contextual Compression Retriever is designed to improve the answers returned from vector store document similarity searches by better taking into account the context from the query. 4 | 5 | It wraps another retriever, and uses a Document Compressor as an intermediate step after the initial similarity search that removes information irrelevant to the initial query from the retrieved documents. 6 | This reduces the amount of distraction a subsequent chain has to deal with when parsing the retrieved documents and making its final judgements. 7 | 8 | ## Usage 9 | 10 | This example shows how to intialize a `ContextualCompressionRetriever` with a vector store and a document compressor: 11 | 12 | import CodeBlock from "@theme/CodeBlock"; 13 | import Example from "@examples/retrievers/contextual_compression.ts"; 14 | 15 | {Example} 16 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/retrievers/databerry-retriever.mdx: -------------------------------------------------------------------------------- 1 | # Databerry Retriever 2 | 3 | This example shows how to use the Databerry Retriever in a `RetrievalQAChain` to retrieve documents from a Databerry.ai datastore. 4 | 5 | ## Usage 6 | 7 | import CodeBlock from "@theme/CodeBlock"; 8 | import Example from "@examples/retrievers/databerry.ts"; 9 | 10 | {Example} 11 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/retrievers/hyde.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # HyDE Retriever 6 | 7 | This example shows how to use the HyDE Retriever, which implements Hypothetical Document Embeddings (HyDE) as described in [this paper](https://arxiv.org/abs/2212.10496). 8 | 9 | At a high level, HyDE is an embedding technique that takes queries, generates a hypothetical answer, and then embeds that generated document and uses that as the final example. 10 | 11 | In order to use HyDE, we therefore need to provide a base embedding model, as well as an LLM that can be used to generate those documents. By default, the HyDE class comes with some default prompts to use (see the paper for more details on them), but we can also create our own, which should have a single input variable `{question}`. 12 | 13 | ## Usage 14 | 15 | import CodeBlock from "@theme/CodeBlock"; 16 | import Example from "@examples/retrievers/hyde.ts"; 17 | 18 | {Example} 19 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/retrievers/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 4 4 | --- 5 | 6 | import DocCardList from "@theme/DocCardList"; 7 | 8 | # Retrievers 9 | 10 | :::info 11 | [Conceptual Guide](https://docs.langchain-gpt4all.com/docs/components/indexing/retriever) 12 | ::: 13 | 14 | A way of storing data such that it can be queried by a language model. The only interface this object must expose is a `getRelevantDocuments` method which takes in a string query and returns a list of Documents. 15 | 16 | 17 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/retrievers/metal-retriever.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Metal Retriever 6 | 7 | This example shows how to use the Metal Retriever in a `RetrievalQAChain` to retrieve documents from a Metal index. 8 | 9 | ## Setup 10 | 11 | ```bash npm2yarn 12 | npm i @getmetal/metal-sdk 13 | ``` 14 | 15 | ## Usage 16 | 17 | import CodeBlock from "@theme/CodeBlock"; 18 | import Example from "@examples/retrievers/metal.ts"; 19 | 20 | {Example} 21 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/retrievers/remote-retriever.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Remote Retriever 6 | 7 | This example shows how to use a Remote Retriever in a `RetrievalQAChain` to retrieve documents from a remote server. 8 | 9 | ## Usage 10 | 11 | import CodeBlock from "@theme/CodeBlock"; 12 | import Example from "@examples/chains/retrieval_qa_with_remote.ts"; 13 | 14 | {Example} 15 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/retrievers/vectorstore.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # Vector Store 6 | 7 | Once you've created a [Vector Store](../vector_stores/), the way to use it as a Retriever is very simple: 8 | 9 | ```typescript 10 | vectorStore = ... 11 | retriever = vectorStore.asRetriever() 12 | ``` 13 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/text_splitters/examples/character.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # CharacterTextSplitter 6 | 7 | Besides the `RecursiveCharacterTextSplitter`, there is also the more standard `CharacterTextSplitter`. This splits only on one type of character (defaults to `"\n\n"`). You can use it in the exact same way. 8 | 9 | ```typescript 10 | import { Document } from "langchain-gpt4all/document"; 11 | import { CharacterTextSplitter } from "langchain-gpt4all/text_splitter"; 12 | 13 | const text = "foo bar baz 123"; 14 | const splitter = new CharacterTextSplitter({ 15 | separator: " ", 16 | chunkSize: 7, 17 | chunkOverlap: 3, 18 | }); 19 | const output = await splitter.createDocuments([text]); 20 | ``` 21 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/text_splitters/examples/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: Examples 3 | --- 4 | 5 | import DocCardList from "@theme/DocCardList"; 6 | 7 | # Text Splitters: Examples 8 | 9 | 10 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/text_splitters/examples/token.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | # TokenTextSplitter 6 | 7 | Finally, `TokenTextSplitter` splits a raw text string by first converting the text into BPE tokens, then split these tokens into chunks and convert the tokens within a single chunk back into text. 8 | 9 | To utilize the `TokenTextSplitter`, first install the accompanying required library 10 | 11 | ```bash npm2yarn 12 | npm install -S @dqbd/tiktoken 13 | ``` 14 | 15 | Then, you can use it like so: 16 | 17 | ```typescript 18 | import { Document } from "langchain-gpt4all/document"; 19 | import { TokenTextSplitter } from "langchain-gpt4all/text_splitter"; 20 | 21 | const text = "foo bar baz 123"; 22 | 23 | const splitter = new TokenTextSplitter({ 24 | encodingName: "gpt2", 25 | chunkSize: 10, 26 | chunkOverlap: 0, 27 | }); 28 | 29 | const output = await splitter.createDocuments([text]); 30 | ``` 31 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/vector_stores/integrations/chroma.mdx: -------------------------------------------------------------------------------- 1 | import CodeBlock from "@theme/CodeBlock"; 2 | 3 | # Chroma 4 | 5 | Chroma is an open-source Apache 2.0 embedding database. 6 | 7 | ## Setup 8 | 9 | 1. Run chroma with Docker on your computer [docs](https://docs.trychroma.com/api-reference) 10 | 2. Install the Chroma JS SDK. 11 | 12 | ```bash npm2yarn 13 | npm install -S chromadb 14 | ``` 15 | 16 | ## Usage, Index and query Documents 17 | 18 | import FromDocs from "@examples/indexes/vector_stores/chroma/fromDocs.ts"; 19 | 20 | {FromDocs} 21 | 22 | ## Usage, Index and query texts 23 | 24 | import FromTexts from "@examples/indexes/vector_stores/chroma/fromTexts.ts"; 25 | 26 | {FromTexts} 27 | 28 | ## Usage, Query docs from existing collection 29 | 30 | import Search from "@examples/indexes/vector_stores/chroma/search.ts"; 31 | 32 | {Search} 33 | -------------------------------------------------------------------------------- /docs/docs/modules/indexes/vector_stores/integrations/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: Integrations 3 | --- 4 | 5 | import DocCardList from "@theme/DocCardList"; 6 | 7 | # Vector Stores: Integrations 8 | 9 | 10 | -------------------------------------------------------------------------------- /docs/docs/modules/memory/examples/buffer_memory_chat.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | import CodeBlock from "@theme/CodeBlock"; 6 | import Example from "@examples/chat/memory.ts"; 7 | 8 | # Using Buffer Memory with Chat Models 9 | 10 | This example covers how to use chat-specific memory classes with chat models. 11 | The key thing to notice is that setting `returnMessages: true` makes the memory return a list of chat messages instead of a string. 12 | 13 | {Example} 14 | -------------------------------------------------------------------------------- /docs/docs/modules/memory/examples/buffer_window_memory.md: -------------------------------------------------------------------------------- 1 | # Buffer Window Memory 2 | 3 | BufferWindowMemory keeps track of the back-and-forths in conversation, and then uses a window of size `k` to surface the last `k` back-and-forths to use as memory. 4 | 5 | ```typescript 6 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 7 | import { BufferWindowMemory } from "langchain-gpt4all/memory"; 8 | import { ConversationChain } from "langchain-gpt4all/chains"; 9 | 10 | const model = new OpenAI({}); 11 | const memory = new BufferWindowMemory({ k: 1 }); 12 | const chain = new ConversationChain({ llm: model, memory: memory }); 13 | const res1 = await chain.call({ input: "Hi! I'm Jim." }); 14 | console.log({ res1 }); 15 | ``` 16 | 17 | ```shell 18 | {response: " Hi Jim! It's nice to meet you. My name is AI. What would you like to talk about?"} 19 | ``` 20 | 21 | ```typescript 22 | const res2 = await chain.call({ input: "What's my name?" }); 23 | console.log({ res2 }); 24 | ``` 25 | 26 | ```shell 27 | {response: ' You said your name is Jim. Is there anything else you would like to talk about?'} 28 | ``` 29 | -------------------------------------------------------------------------------- /docs/docs/modules/memory/examples/conversation_summary.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_label: Conversation Summary 4 | --- 5 | 6 | import CodeBlock from "@theme/CodeBlock"; 7 | 8 | # Conversation Summary Memory 9 | 10 | The Conversation Summary Memory summarizes the conversation as it happens and stores the current summary in memory. This memory can then be used to inject the summary of the conversation so far into a prompt/chain. This memory is most useful for longer conversations, where keeping the past message history in the prompt verbatim would take up too many tokens. 11 | 12 | ## Usage, with an LLM 13 | 14 | import TextExample from "@examples/memory/summary_llm.ts"; 15 | 16 | {TextExample} 17 | 18 | ## Usage, with a Chat Model 19 | 20 | import ChatExample from "@examples/memory/summary_chat.ts"; 21 | 22 | {ChatExample} 23 | -------------------------------------------------------------------------------- /docs/docs/modules/memory/examples/dynamodb.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | import CodeBlock from "@theme/CodeBlock"; 6 | 7 | # DynamoDB-Backed Chat Memory 8 | 9 | For longer-term persistence across chat sessions, you can swap out the default in-memory `chatHistory` that backs chat memory classes like `BufferMemory` for a DynamoDB instance. 10 | 11 | ## Setup 12 | 13 | First, install the AWS DynamoDB client in your project: 14 | 15 | ```bash npm2yarn 16 | npm install @aws-sdk/client-dynamodb 17 | ``` 18 | 19 | Next, sign into your AWS account and create a DynamoDB table. Name the table `langchain-gpt4all`, and name your partition key `id` and make sure it's a string. You can leave sort key and the other settings alone. 20 | 21 | You'll also need to retrieve an AWS access key and secret key for a role or user that has access to the table and add them to your environment variables. 22 | 23 | ## Usage 24 | 25 | import Example from "@examples/memory/dynamodb-store.ts"; 26 | 27 | {Example} 28 | -------------------------------------------------------------------------------- /docs/docs/modules/memory/examples/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: Examples 3 | --- 4 | 5 | import DocCardList from "@theme/DocCardList"; 6 | 7 | # Examples: Memory 8 | 9 | 10 | -------------------------------------------------------------------------------- /docs/docs/modules/memory/examples/vector_store_memory.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | --- 4 | 5 | import CodeBlock from "@theme/CodeBlock"; 6 | import Example from "@examples/memory/vector_store.ts"; 7 | 8 | # VectorStore-backed Memory 9 | 10 | `VectorStoreRetrieverMemory` stores memories in a VectorDB and queries the top-K most "salient" docs every time it is called. 11 | 12 | This differs from most of the other Memory classes in that it doesn't explicitly track the order of interactions. 13 | 14 | In this case, the "docs" are previous conversation snippets. This can be useful to refer to relevant pieces of information that the AI was told earlier in the conversation. 15 | 16 | {Example} 17 | -------------------------------------------------------------------------------- /docs/docs/modules/models/chat/integrations.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 3 3 | sidebar_label: Integrations 4 | --- 5 | 6 | import CodeBlock from "@theme/CodeBlock"; 7 | 8 | # Integrations: Chat Models 9 | 10 | LangChain offers a number of Chat Models implementations that integrate with various model providers. These are: 11 | 12 | ## `ChatOpenAI` 13 | 14 | import OpenAI from "@examples/models/chat/integration_openai.ts"; 15 | 16 | {OpenAI} 17 | 18 | ## Azure `ChatOpenAI` 19 | 20 | import AzureOpenAI from "@examples/models/chat/integration_azure_openai.ts"; 21 | 22 | {AzureOpenAI} 23 | 24 | ## `ChatAnthropic` 25 | 26 | import Anthropic from "@examples/models/chat/integration_anthropic.ts"; 27 | 28 | {Anthropic} 29 | -------------------------------------------------------------------------------- /docs/docs/modules/models/llms/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_label: LLMs 4 | --- 5 | 6 | import CodeBlock from "@theme/CodeBlock"; 7 | import Example from "@examples/models/llm/llm_quick_start.ts"; 8 | import DocCardList from "@theme/DocCardList"; 9 | 10 | # Getting Started: LLMs 11 | 12 | :::info 13 | [Conceptual Guide](https://docs.langchain-gpt4all.com/docs/components/models/language-model) 14 | ::: 15 | 16 | LangChain provides a standard interface for using a variety of LLMs. 17 | 18 | To get started, simply use the `call` method of an `LLM` implementation, passing in a `string` input. In this example, we are using the `OpenAI` implementation: 19 | 20 | {Example} 21 | 22 | ## Dig deeper 23 | 24 | 25 | -------------------------------------------------------------------------------- /docs/docs/modules/prompts/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 3 3 | hide_table_of_contents: true 4 | sidebar_label: Prompts 5 | --- 6 | 7 | import DocCardList from "@theme/DocCardList"; 8 | 9 | # Prompts 10 | 11 | :::info 12 | [Conceptual Guide](https://docs.langchain-gpt4all.com/docs/components/prompts) 13 | ::: 14 | 15 | LangChain provides several utilities to help manage prompts for language models, including chat models. 16 | 17 | 18 | -------------------------------------------------------------------------------- /docs/docs/modules/prompts/prompt_templates/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_label: Prompt Templates 4 | sidebar_position: 1 5 | --- 6 | 7 | import CodeBlock from "@theme/CodeBlock"; 8 | import Example from "@examples/prompts/prompts.ts"; 9 | import DocCardList from "@theme/DocCardList"; 10 | 11 | # Prompt Templates 12 | 13 | :::info 14 | [Conceptual Guide](https://docs.langchain-gpt4all.com/docs/components/prompts/prompt-template) 15 | ::: 16 | 17 | A `PromptTemplate` allows you to make use of templating to generate a prompt. This is useful for when you want to use the same prompt outline in multiple places, but with certain values changed. 18 | Prompt templates are supported for both LLMs and chat models, as shown below: 19 | 20 | {Example} 21 | 22 | ## Dig deeper 23 | 24 | 25 | -------------------------------------------------------------------------------- /docs/docs/modules/schema/example.md: -------------------------------------------------------------------------------- 1 | --- 2 | --- 3 | 4 | # Examples 5 | 6 | Examples are input/output pairs that represent inputs to a function and then expected output. They can be used in both training and evaluation of models. 7 | 8 | ```typescript 9 | type Example = Record; 10 | ``` 11 | 12 | ## Creating an Example 13 | 14 | You can create an Example like this: 15 | 16 | ```typescript 17 | const example = { 18 | input: "foo", 19 | output: "bar", 20 | }; 21 | ``` 22 | -------------------------------------------------------------------------------- /docs/docs/modules/schema/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_position: 1 3 | --- 4 | 5 | import DocCardList from "@theme/DocCardList"; 6 | 7 | # Schema 8 | 9 | This section speaks about interfaces that are used throughout the rest of the library. 10 | 11 | 12 | -------------------------------------------------------------------------------- /docs/docs/production/callbacks/create-handlers.mdx: -------------------------------------------------------------------------------- 1 | import CodeBlock from "@theme/CodeBlock"; 2 | 3 | # Creating callback handlers 4 | 5 | ## Creating a custom handler 6 | 7 | You can also create your own handler by implementing the `BaseCallbackHandler` interface. This is useful if you want to do something more complex than just logging to the console, eg. send the events to a logging service. As an example here is a simple implementation of a handler that logs to the console: 8 | 9 | import CustomHandlerExample from "@examples/callbacks/custom_handler.ts"; 10 | 11 | {CustomHandlerExample} 12 | 13 | You could then use it as described in the [section](#built-in-handlers) above. 14 | -------------------------------------------------------------------------------- /docs/docs/production/callbacks/creating-subclasses.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | sidebar_label: Callbacks in custom Chains 3 | --- 4 | 5 | # Callbacks in custom Chains/Agents 6 | 7 | LangChain is designed to be extensible. You can add your own custom Chains and Agents to the library. This page will show you how to add callbacks to your custom Chains and Agents. 8 | 9 | ## Adding callbacks to custom Chains 10 | 11 | When you create a custom chain you can easily set it up to use the same callback system as all the built-in chains. See this guide for more information on how to [create custom chains and use callbacks inside them](../../modules/chains#subclassing-basechain). 12 | -------------------------------------------------------------------------------- /docs/docs/use_cases/api.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 4 4 | --- 5 | 6 | # Interacting with APIs 7 | 8 | :::info 9 | [Conceptual Guide](https://docs.langchain-gpt4all.com/docs/use-cases/apis) 10 | ::: 11 | 12 | Lots of data and information is stored behind APIs. 13 | This page covers all resources available in LangChain for working with APIs. 14 | 15 | ## Chains 16 | 17 | If you are just getting started, and you have relatively apis, you should get started with chains. 18 | Chains are a sequence of predetermined steps, so they are good to get started with as they give you more control and let you 19 | understand what is happening better. 20 | 21 | TODO: add an API chain and then add an example here. 22 | 23 | ## Agents 24 | 25 | Agents are more complex, and involve multiple queries to the LLM to understand what to do. 26 | The downside of agents are that you have less control. The upside is that they are more powerful, 27 | which allows you to use them on larger and more complex schemas. 28 | 29 | - [OpenAPI Agent](../modules/agents/toolkits/openapi.md) 30 | -------------------------------------------------------------------------------- /docs/docs/use_cases/autonomous_agents/auto_gpt.mdx: -------------------------------------------------------------------------------- 1 | import CodeBlock from "@theme/CodeBlock"; 2 | 3 | # AutoGPT 4 | 5 | :::info 6 | Original Repo: https://github.com/Significant-Gravitas/Auto-GPT 7 | ::: 8 | 9 | AutoGPT is a custom agent that uses long-term memory along with a prompt designed for independent work (ie. without asking user input) to perform tasks. 10 | 11 | ## Isomorphic Example 12 | 13 | import IsomorphicExample from "@examples/experimental/autogpt/weather_browser.ts"; 14 | 15 | In this example we use AutoGPT to predict the weather for a given location. This example is designed to run in all JS environments, including the browser. 16 | 17 | {IsomorphicExample} 18 | 19 | ## Node.js Example 20 | 21 | import NodeExample from "@examples/experimental/autogpt/weather.ts"; 22 | 23 | In this example we use AutoGPT to predict the weather for a given location. This example is designed to run in Node.js, so it uses the local filesystem, and a Node-only vector store. 24 | 25 | {NodeExample} 26 | -------------------------------------------------------------------------------- /docs/docs/use_cases/autonomous_agents/index.mdx: -------------------------------------------------------------------------------- 1 | import DocCardList from "@theme/DocCardList"; 2 | 3 | # Autonomous Agents 4 | 5 | Autonomous Agents are agents that designed to be more long running. You give them one or multiple long term goals, and they independently execute towards those goals. The applications combine tool usage and long term memory. 6 | 7 | At the moment, Autonomous Agents are fairly experimental and based off of other open-source projects. By implementing these open source projects in LangChain primitives we can get the benefits of LangChain - easy switching and experimenting with multiple LLMs, usage of different vectorstores as memory, usage of LangChain's collection of tools. 8 | 9 | 10 | -------------------------------------------------------------------------------- /docs/docs/use_cases/personal_assistants.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 1 4 | --- 5 | 6 | # Personal Assistants 7 | 8 | :::info 9 | [Conceptual Guide](https://docs.langchain-gpt4all.com/docs/use-cases/personal-assistants) 10 | ::: 11 | 12 | We use "personal assistant" here in a very broad sense. 13 | Personal assistants have a few characteristics: 14 | 15 | - They can interact with the outside world 16 | - They have knowledge of your data 17 | - They remember your interactions 18 | 19 | Really all of the functionality in LangChain is relevant for building a personal assistant. 20 | Highlighting specific parts: 21 | 22 | - [Agent Documentation](../modules/agents/index.mdx) (for interacting with the outside world) 23 | - [Index Documentation](../modules/indexes/index.mdx) (for giving them knowledge of your data) 24 | - [Memory](../modules/memory/index.mdx) (for helping them remember interactions) 25 | -------------------------------------------------------------------------------- /docs/docs/use_cases/summarization.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | hide_table_of_contents: true 3 | sidebar_position: 6 4 | --- 5 | 6 | # Summarization 7 | 8 | :::info 9 | [Conceptual Guide](https://docs.langchain-gpt4all.com/docs/use-cases/summarization) 10 | ::: 11 | 12 | A common use case is wanting to summarize long documents. 13 | This naturally runs into the context window limitations. 14 | Unlike in question-answering, you can't just do some semantic search hacks to only select the chunks of text most relevant to the question (because, in this case, there is no particular question - you want to summarize everything). 15 | So what do you do then? 16 | 17 | To get started, we would recommend checking out the summarization chain which attacks this problem in a recursive manner. 18 | 19 | - [Summarization Chain](../modules/chains/other_chains/summarization) 20 | -------------------------------------------------------------------------------- /docs/src/pages/index.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Meta Platforms, Inc. and affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | * 7 | * @format 8 | */ 9 | 10 | import React from "react"; 11 | import { Redirect } from "@docusaurus/router"; 12 | 13 | export default function Home() { 14 | return ; 15 | } 16 | -------------------------------------------------------------------------------- /docs/src/theme/SearchBar.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Meta Platforms, Inc. and affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | * 7 | * @format 8 | */ 9 | import React from "react"; 10 | import { MendableSearchBar } from "@mendable/search"; 11 | import useDocusaurusContext from "@docusaurus/useDocusaurusContext"; 12 | 13 | export default function SearchBarWrapper() { 14 | const { 15 | siteConfig: { customFields }, 16 | } = useDocusaurusContext(); 17 | return ( 18 |
19 | 27 |
28 | ); 29 | } 30 | -------------------------------------------------------------------------------- /docs/static/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/docs/static/.nojekyll -------------------------------------------------------------------------------- /docs/static/img/DataberryDashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/docs/static/img/DataberryDashboard.png -------------------------------------------------------------------------------- /docs/static/img/HeliconeDashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/docs/static/img/HeliconeDashboard.png -------------------------------------------------------------------------------- /docs/static/img/HeliconeKeys.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/docs/static/img/HeliconeKeys.png -------------------------------------------------------------------------------- /docs/static/img/android-chrome-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/docs/static/img/android-chrome-192x192.png -------------------------------------------------------------------------------- /docs/static/img/android-chrome-512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/docs/static/img/android-chrome-512x512.png -------------------------------------------------------------------------------- /docs/static/img/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/docs/static/img/apple-touch-icon.png -------------------------------------------------------------------------------- /docs/static/img/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/docs/static/img/favicon-16x16.png -------------------------------------------------------------------------------- /docs/static/img/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/docs/static/img/favicon-32x32.png -------------------------------------------------------------------------------- /docs/static/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/docs/static/img/favicon.ico -------------------------------------------------------------------------------- /docs/static/img/parrot-chainlink-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/docs/static/img/parrot-chainlink-icon.png -------------------------------------------------------------------------------- /docs/static/img/parrot-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/docs/static/img/parrot-icon.png -------------------------------------------------------------------------------- /examples/.yarn/install-state.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/examples/.yarn/install-state.gz -------------------------------------------------------------------------------- /examples/src/README.md: -------------------------------------------------------------------------------- 1 | # langchain-gpt4all-examples 2 | 3 | This folder contains examples of how to use LangChain. 4 | 5 | ## Run an example 6 | 7 | What you'll usually want to do. 8 | 9 | First, build langchain-gpt4all. From the repository root, run: 10 | 11 | ```sh 12 | yarn 13 | yarn build 14 | ``` 15 | 16 | Most examples require API keys. Run `cp .env.example .env`, then edit `.env` with your API keys. 17 | 18 | Then from the `examples/` directory, run: 19 | 20 | `yarn run start ` 21 | 22 | eg. 23 | 24 | `yarn run start ./src/prompts/few_shot.ts` 25 | 26 | ## Run an example with the transpiled JS 27 | 28 | You shouldn't need to do this, but if you want to run an example with the transpiled JS, you can do so with: 29 | 30 | `yarn run start:dist ` 31 | 32 | eg. 33 | 34 | `yarn run start:dist ./dist/prompts/few_shot.js` 35 | -------------------------------------------------------------------------------- /examples/src/agents/aiplugin-tool.ts: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain-gpt4all/chat_models/openai"; 2 | import { initializeAgentExecutorWithOptions } from "langchain-gpt4all/agents"; 3 | import { 4 | RequestsGetTool, 5 | RequestsPostTool, 6 | AIPluginTool, 7 | } from "langchain-gpt4all/tools"; 8 | 9 | export const run = async () => { 10 | const tools = [ 11 | new RequestsGetTool(), 12 | new RequestsPostTool(), 13 | await AIPluginTool.fromPluginUrl( 14 | "https://www.klarna.com/.well-known/ai-plugin.json" 15 | ), 16 | ]; 17 | const agent = await initializeAgentExecutorWithOptions( 18 | tools, 19 | new ChatOpenAI({ temperature: 0 }), 20 | { agentType: "chat-zero-shot-react-description", verbose: true } 21 | ); 22 | 23 | const result = await agent.call({ 24 | input: "what t shirts are available in klarna?", 25 | }); 26 | 27 | console.log({ result }); 28 | }; 29 | -------------------------------------------------------------------------------- /examples/src/agents/mrkl.ts: -------------------------------------------------------------------------------- 1 | import { initializeAgentExecutorWithOptions } from "langchain-gpt4all/agents"; 2 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 3 | import { SerpAPI } from "langchain-gpt4all/tools"; 4 | import { Calculator } from "langchain-gpt4all/tools/calculator"; 5 | 6 | const model = new OpenAI({ temperature: 0 }); 7 | const tools = [ 8 | new SerpAPI(process.env.SERPAPI_API_KEY, { 9 | location: "Austin,Texas,United States", 10 | hl: "en", 11 | gl: "us", 12 | }), 13 | new Calculator(), 14 | ]; 15 | 16 | const executor = await initializeAgentExecutorWithOptions(tools, model, { 17 | agentType: "zero-shot-react-description", 18 | verbose: true, 19 | }); 20 | 21 | const input = `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`; 22 | 23 | const result = await executor.call({ input }); 24 | -------------------------------------------------------------------------------- /examples/src/agents/zapier_mrkl.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 2 | import { ZapierNLAWrapper } from "langchain-gpt4all/tools"; 3 | import { 4 | initializeAgentExecutorWithOptions, 5 | ZapierToolKit, 6 | } from "langchain-gpt4all/agents"; 7 | 8 | const model = new OpenAI({ temperature: 0 }); 9 | const zapier = new ZapierNLAWrapper(); 10 | const toolkit = await ZapierToolKit.fromZapierNLAWrapper(zapier); 11 | 12 | const executor = await initializeAgentExecutorWithOptions( 13 | toolkit.tools, 14 | model, 15 | { 16 | agentType: "zero-shot-react-description", 17 | verbose: true, 18 | } 19 | ); 20 | console.log("Loaded agent."); 21 | 22 | const input = `Summarize the last email I received regarding Silicon Valley Bank. Send the summary to the #test-zapier Slack channel.`; 23 | 24 | console.log(`Executing with input "${input}"...`); 25 | 26 | const result = await executor.call({ input }); 27 | 28 | console.log(`Got output ${result.output}`); 29 | -------------------------------------------------------------------------------- /examples/src/callbacks/console_handler.ts: -------------------------------------------------------------------------------- 1 | import { ConsoleCallbackHandler } from "langchain-gpt4all/callbacks"; 2 | import { LLMChain } from "langchain-gpt4all/chains"; 3 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 4 | import { PromptTemplate } from "langchain-gpt4all/prompts"; 5 | 6 | export const run = async () => { 7 | const handler = new ConsoleCallbackHandler(); 8 | const llm = new OpenAI({ temperature: 0, callbacks: [handler] }); 9 | const prompt = PromptTemplate.fromTemplate("1 + {number} ="); 10 | const chain = new LLMChain({ prompt, llm, callbacks: [handler] }); 11 | 12 | const output = await chain.call({ number: 2 }); 13 | /* 14 | Entering new llm_chain chain... 15 | Finished chain. 16 | */ 17 | 18 | console.log(output); 19 | /* 20 | { text: ' 3\n\n3 - 1 = 2' } 21 | */ 22 | 23 | // The non-enumerable key `__run` contains the runId. 24 | console.log(output.__run); 25 | /* 26 | { runId: '90e1f42c-7cb4-484c-bf7a-70b73ef8e64b' } 27 | */ 28 | }; 29 | -------------------------------------------------------------------------------- /examples/src/callbacks/custom_handler.ts: -------------------------------------------------------------------------------- 1 | import { BaseCallbackHandler } from "langchain-gpt4all/callbacks"; 2 | import { 3 | AgentAction, 4 | AgentFinish, 5 | ChainValues, 6 | } from "langchain-gpt4all/schema"; 7 | 8 | export class MyCallbackHandler extends BaseCallbackHandler { 9 | name = "MyCallbackHandler"; 10 | 11 | async handleChainStart(chain: { name: string }) { 12 | console.log(`Entering new ${chain.name} chain...`); 13 | } 14 | 15 | async handleChainEnd(_output: ChainValues) { 16 | console.log("Finished chain."); 17 | } 18 | 19 | async handleAgentAction(action: AgentAction) { 20 | console.log(action.log); 21 | } 22 | 23 | async handleToolEnd(output: string) { 24 | console.log(output); 25 | } 26 | 27 | async handleText(text: string) { 28 | console.log(text); 29 | } 30 | 31 | async handleAgentEnd(action: AgentFinish) { 32 | console.log(action.log); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /examples/src/callbacks/docs_constructor_callbacks.ts: -------------------------------------------------------------------------------- 1 | import { ConsoleCallbackHandler } from "langchain-gpt4all/callbacks"; 2 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 3 | 4 | const llm = new OpenAI({ 5 | temperature: 0, 6 | // This handler will be used for all calls made with this LLM. 7 | callbacks: [new ConsoleCallbackHandler()], 8 | }); 9 | -------------------------------------------------------------------------------- /examples/src/callbacks/docs_request_callbacks.ts: -------------------------------------------------------------------------------- 1 | import { ConsoleCallbackHandler } from "langchain-gpt4all/callbacks"; 2 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 3 | 4 | const llm = new OpenAI({ 5 | temperature: 0, 6 | }); 7 | 8 | // This handler will be used only for this call. 9 | const response = await llm.call("1 + 1 =", undefined, [ 10 | new ConsoleCallbackHandler(), 11 | ]); 12 | -------------------------------------------------------------------------------- /examples/src/callbacks/docs_verbose.ts: -------------------------------------------------------------------------------- 1 | import { PromptTemplate } from "langchain-gpt4all/prompts"; 2 | import { LLMChain } from "langchain-gpt4all/chains"; 3 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 4 | 5 | const chain = new LLMChain({ 6 | llm: new OpenAI({ temperature: 0 }), 7 | prompt: PromptTemplate.fromTemplate("Hello, world!"), 8 | // This will enable logging of all Chain *and* LLM events to the console. 9 | verbose: true, 10 | }); 11 | -------------------------------------------------------------------------------- /examples/src/chains/advanced_subclass.ts: -------------------------------------------------------------------------------- 1 | import { CallbackManagerForChainRun } from "langchain-gpt4all/callbacks"; 2 | import { BaseChain as _ } from "langchain-gpt4all/chains"; 3 | import { BaseMemory } from "langchain-gpt4all/memory"; 4 | import { ChainValues } from "langchain-gpt4all/schema"; 5 | 6 | abstract class BaseChain { 7 | memory?: BaseMemory; 8 | 9 | /** 10 | * Run the core logic of this chain and return the output 11 | */ 12 | abstract _call( 13 | values: ChainValues, 14 | runManager?: CallbackManagerForChainRun 15 | ): Promise; 16 | 17 | /** 18 | * Return the string type key uniquely identifying this class of chain. 19 | */ 20 | abstract _chainType(): string; 21 | 22 | /** 23 | * Return the list of input keys this chain expects to receive when called. 24 | */ 25 | abstract get inputKeys(): string[]; 26 | 27 | /** 28 | * Return the list of output keys this chain will produce when called. 29 | */ 30 | abstract get outputKeys(): string[]; 31 | } 32 | -------------------------------------------------------------------------------- /examples/src/chains/conversation_chain.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 2 | import { ConversationChain } from "langchain-gpt4all/chains"; 3 | 4 | export const run = async () => { 5 | const model = new OpenAI({}); 6 | const chain = new ConversationChain({ llm: model }); 7 | const res1 = await chain.call({ input: "Hi! I'm Jim." }); 8 | console.log({ res1 }); 9 | const res2 = await chain.call({ input: "What's my name?" }); 10 | console.log({ res2 }); 11 | }; 12 | -------------------------------------------------------------------------------- /examples/src/chains/llm_chain_stream.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 2 | import { PromptTemplate } from "langchain-gpt4all/prompts"; 3 | import { LLMChain } from "langchain-gpt4all/chains"; 4 | 5 | export const run = async () => { 6 | const model = new OpenAI({ 7 | temperature: 0.9, 8 | streaming: true, 9 | callbacks: [ 10 | { 11 | handleLLMNewToken(token: string) { 12 | console.log({ token }); 13 | }, 14 | }, 15 | ], 16 | }); 17 | 18 | const template = "What is a good name for a company that makes {product}?"; 19 | const prompt = new PromptTemplate({ template, inputVariables: ["product"] }); 20 | const chain = new LLMChain({ llm: model, prompt }); 21 | const res = await chain.call({ product: "colorful socks" }); 22 | console.log({ res }); 23 | }; 24 | -------------------------------------------------------------------------------- /examples/src/chains/load_from_hub.ts: -------------------------------------------------------------------------------- 1 | import { loadChain } from "langchain-gpt4all/chains/load"; 2 | 3 | export const run = async () => { 4 | const chain = await loadChain("lc://chains/hello-world/chain.json"); 5 | const res = chain.call({ topic: "foo" }); 6 | console.log(res); 7 | }; 8 | -------------------------------------------------------------------------------- /examples/src/chains/question_answering_map_reduce.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 2 | import { loadQAMapReduceChain } from "langchain-gpt4all/chains"; 3 | import { Document } from "langchain-gpt4all/document"; 4 | 5 | export const run = async () => { 6 | const model = new OpenAI({ temperature: 0 }); 7 | const chain = loadQAMapReduceChain(model); 8 | const docs = [ 9 | new Document({ pageContent: "harrison went to harvard" }), 10 | new Document({ pageContent: "ankush went to princeton" }), 11 | ]; 12 | const res = await chain.call({ 13 | input_documents: docs, 14 | question: "Where did harrison go to college", 15 | }); 16 | console.log({ res }); 17 | }; 18 | -------------------------------------------------------------------------------- /examples/src/chains/sql_db.ts: -------------------------------------------------------------------------------- 1 | import { DataSource } from "typeorm"; 2 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 3 | import { SqlDatabase } from "langchain-gpt4all/sql_db"; 4 | import { SqlDatabaseChain } from "langchain-gpt4all/chains"; 5 | 6 | /** 7 | * This example uses Chinook database, which is a sample database available for SQL Server, Oracle, MySQL, etc. 8 | * To set it up follow the instructions on https://database.guide/2-sample-databases-sqlite/, placing the .db file 9 | * in the examples folder. 10 | */ 11 | export const run = async () => { 12 | const datasource = new DataSource({ 13 | type: "sqlite", 14 | database: "Chinook.db", 15 | }); 16 | 17 | const db = await SqlDatabase.fromDataSourceParams({ 18 | appDataSource: datasource, 19 | }); 20 | 21 | const chain = new SqlDatabaseChain({ 22 | llm: new OpenAI({ temperature: 0 }), 23 | database: db, 24 | }); 25 | 26 | const res = await chain.run("How many tracks are there?"); 27 | console.log(res); 28 | // There are 3503 tracks. 29 | }; 30 | -------------------------------------------------------------------------------- /examples/src/chains/summarization.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 2 | import { loadSummarizationChain } from "langchain-gpt4all/chains"; 3 | import { Document } from "langchain-gpt4all/document"; 4 | 5 | export const run = async () => { 6 | const model = new OpenAI({}); 7 | const chain = loadSummarizationChain(model, { type: "stuff" }); 8 | const docs = [ 9 | new Document({ pageContent: "harrison went to harvard" }), 10 | new Document({ pageContent: "ankush went to princeton" }), 11 | ]; 12 | const res = await chain.call({ 13 | input_documents: docs, 14 | }); 15 | console.log(res); 16 | }; 17 | -------------------------------------------------------------------------------- /examples/src/chat/llm_chain.ts: -------------------------------------------------------------------------------- 1 | import { LLMChain } from "langchain-gpt4all/chains"; 2 | import { ChatOpenAI } from "langchain-gpt4all/chat_models/openai"; 3 | import { 4 | ChatPromptTemplate, 5 | HumanMessagePromptTemplate, 6 | SystemMessagePromptTemplate, 7 | } from "langchain-gpt4all/prompts"; 8 | 9 | export const run = async () => { 10 | const chat = new ChatOpenAI({ temperature: 0 }); 11 | 12 | const chatPrompt = ChatPromptTemplate.fromPromptMessages([ 13 | SystemMessagePromptTemplate.fromTemplate( 14 | "You are a helpful assistant that translates {input_language} to {output_language}." 15 | ), 16 | HumanMessagePromptTemplate.fromTemplate("{text}"), 17 | ]); 18 | 19 | const chain = new LLMChain({ 20 | prompt: chatPrompt, 21 | llm: chat, 22 | }); 23 | 24 | const response = await chain.call({ 25 | input_language: "English", 26 | output_language: "French", 27 | text: "I love programming.", 28 | }); 29 | 30 | console.log(response); 31 | }; 32 | -------------------------------------------------------------------------------- /examples/src/document_loaders/cheerio_web.ts: -------------------------------------------------------------------------------- 1 | import { CheerioWebBaseLoader } from "langchain-gpt4all/document_loaders/web/cheerio"; 2 | 3 | export const run = async () => { 4 | const loader = new CheerioWebBaseLoader( 5 | "https://news.ycombinator.com/item?id=34817881" 6 | ); 7 | const docs = await loader.load(); 8 | console.log({ docs }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/document_loaders/college_confidential.ts: -------------------------------------------------------------------------------- 1 | import { CollegeConfidentialLoader } from "langchain-gpt4all/document_loaders/web/college_confidential"; 2 | 3 | export const run = async () => { 4 | const loader = new CollegeConfidentialLoader( 5 | "https://www.collegeconfidential.com/colleges/brown-university/" 6 | ); 7 | const docs = await loader.load(); 8 | console.log({ docs }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/document_loaders/confluence.ts: -------------------------------------------------------------------------------- 1 | import { ConfluencePagesLoader } from "langchain-gpt4all/document_loaders/web/confluence"; 2 | 3 | const username = process.env.CONFLUENCE_USERNAME; 4 | const accessToken = process.env.CONFLUENCE_ACCESS_TOKEN; 5 | 6 | if (username && accessToken) { 7 | const loader = new ConfluencePagesLoader({ 8 | baseUrl: "https://example.atlassian.net/wiki", 9 | spaceKey: "~EXAMPLE362906de5d343d49dcdbae5dEXAMPLE", 10 | username, 11 | accessToken, 12 | }); 13 | 14 | const documents = await loader.load(); 15 | console.log(documents); 16 | } else { 17 | console.log( 18 | "You must provide a username and access token to run this example." 19 | ); 20 | } 21 | -------------------------------------------------------------------------------- /examples/src/document_loaders/example_data/example.txt: -------------------------------------------------------------------------------- 1 | Foo 2 | Bar 3 | Baz 4 | 5 | -------------------------------------------------------------------------------- /examples/src/document_loaders/gitbook.ts: -------------------------------------------------------------------------------- 1 | import { GitbookLoader } from "langchain-gpt4all/document_loaders/web/gitbook"; 2 | 3 | export const run = async () => { 4 | const loader = new GitbookLoader("https://docs.gitbook.com"); 5 | const docs = await loader.load(); // load single path 6 | console.log(docs); 7 | const allPathsLoader = new GitbookLoader("https://docs.gitbook.com", { 8 | shouldLoadAllPaths: true, 9 | }); 10 | const docsAllPaths = await allPathsLoader.load(); // loads all paths of the given gitbook 11 | console.log(docsAllPaths); 12 | }; 13 | -------------------------------------------------------------------------------- /examples/src/document_loaders/github.ts: -------------------------------------------------------------------------------- 1 | import { GithubRepoLoader } from "langchain-gpt4all/document_loaders/web/github"; 2 | 3 | export const run = async () => { 4 | const loader = new GithubRepoLoader( 5 | "https://github.com/lujstn/langchainjs-gpt4all", 6 | { branch: "main", recursive: false, unknown: "warn" } 7 | ); 8 | const docs = await loader.load(); 9 | console.log({ docs }); 10 | }; 11 | -------------------------------------------------------------------------------- /examples/src/document_loaders/hn.ts: -------------------------------------------------------------------------------- 1 | import { HNLoader } from "langchain-gpt4all/document_loaders/web/hn"; 2 | 3 | export const run = async () => { 4 | const loader = new HNLoader("https://news.ycombinator.com/item?id=34817881"); 5 | const docs = await loader.load(); 6 | console.log({ docs }); 7 | }; 8 | -------------------------------------------------------------------------------- /examples/src/document_loaders/imsdb.ts: -------------------------------------------------------------------------------- 1 | import { IMSDBLoader } from "langchain-gpt4all/document_loaders/web/imsdb"; 2 | 3 | export const run = async () => { 4 | const loader = new IMSDBLoader( 5 | "https://imsdb.com/scripts/BlacKkKlansman.html" 6 | ); 7 | const docs = await loader.load(); 8 | console.log({ docs }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/document_loaders/notion_markdown.ts: -------------------------------------------------------------------------------- 1 | import { NotionLoader } from "langchain-gpt4all/document_loaders/fs/notion"; 2 | 3 | export const run = async () => { 4 | /** Provide the directory path of your notion folder */ 5 | const directoryPath = "Notion_DB"; 6 | const loader = new NotionLoader(directoryPath); 7 | const docs = await loader.load(); 8 | console.log({ docs }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/document_loaders/s3.ts: -------------------------------------------------------------------------------- 1 | import { S3Loader } from "langchain-gpt4all/document_loaders/web/s3"; 2 | 3 | const loader = new S3Loader({ 4 | bucket: "my-document-bucket-123", 5 | key: "AccountingOverview.pdf", 6 | s3Config: { 7 | region: "us-east-1", 8 | accessKeyId: "AKIAIOSFODNN7EXAMPLE", 9 | secretAccessKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", 10 | }, 11 | unstructuredAPIURL: "http://localhost:8000/general/v0/general", 12 | }); 13 | 14 | const docs = await loader.load(); 15 | 16 | console.log(docs); 17 | -------------------------------------------------------------------------------- /examples/src/document_loaders/srt.ts: -------------------------------------------------------------------------------- 1 | import { SRTLoader } from "langchain-gpt4all/document_loaders/fs/srt"; 2 | 3 | export const run = async () => { 4 | const loader = new SRTLoader( 5 | "src/document_loaders/example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt" 6 | ); 7 | const docs = await loader.load(); 8 | console.log({ docs }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/document_loaders/text.ts: -------------------------------------------------------------------------------- 1 | import { TextLoader } from "langchain-gpt4all/document_loaders/fs/text"; 2 | 3 | export const run = async () => { 4 | const loader = new TextLoader( 5 | "src/document_loaders/example_data/example.txt" 6 | ); 7 | const docs = await loader.load(); 8 | console.log({ docs }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/document_loaders/unstructured.ts: -------------------------------------------------------------------------------- 1 | import { UnstructuredLoader } from "langchain-gpt4all/document_loaders/fs/unstructured"; 2 | 3 | const options = { 4 | apiKey: "MY_API_KEY", 5 | }; 6 | 7 | const loader = new UnstructuredLoader( 8 | "src/document_loaders/example_data/notion.md", 9 | options 10 | ); 11 | const docs = await loader.load(); 12 | -------------------------------------------------------------------------------- /examples/src/document_loaders/unstructured_directory.ts: -------------------------------------------------------------------------------- 1 | import { UnstructuredDirectoryLoader } from "langchain-gpt4all/document_loaders/fs/unstructured"; 2 | 3 | const options = { 4 | apiKey: "MY_API_KEY", 5 | }; 6 | 7 | const loader = new UnstructuredDirectoryLoader( 8 | "langchain-gpt4all/src/document_loaders/tests/example_data", 9 | options 10 | ); 11 | const docs = await loader.load(); 12 | -------------------------------------------------------------------------------- /examples/src/embeddings/cohere.ts: -------------------------------------------------------------------------------- 1 | import { CohereEmbeddings } from "langchain-gpt4all/embeddings/cohere"; 2 | 3 | export const run = async () => { 4 | const model = new CohereEmbeddings(); 5 | const res = await model.embedQuery( 6 | "What would be a good company name a company that makes colorful socks?" 7 | ); 8 | console.log({ res }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/embeddings/max_concurrency.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 2 | 3 | export const run = async () => { 4 | const model = new OpenAIEmbeddings({ 5 | maxConcurrency: 1, 6 | }); 7 | const res = await model.embedQuery( 8 | "What would be a good company name a company that makes colorful socks?" 9 | ); 10 | console.log({ res }); 11 | }; 12 | -------------------------------------------------------------------------------- /examples/src/embeddings/openai.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 2 | 3 | export const run = async () => { 4 | const model = new OpenAIEmbeddings(); 5 | const res = await model.embedQuery( 6 | "What would be a good company name a company that makes colorful socks?" 7 | ); 8 | console.log({ res }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/indexes/recursive_text_splitter.ts: -------------------------------------------------------------------------------- 1 | import { RecursiveCharacterTextSplitter } from "langchain-gpt4all/text_splitter"; 2 | 3 | export const run = async () => { 4 | const text = `Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f. 5 | This is a weird text to write, but gotta test the splittingggg some how.\n\n 6 | Bye!\n\n-H.`; 7 | const splitter = new RecursiveCharacterTextSplitter({ 8 | chunkSize: 10, 9 | chunkOverlap: 1, 10 | }); 11 | const output = splitter.createDocuments([text]); 12 | console.log(output); 13 | }; 14 | -------------------------------------------------------------------------------- /examples/src/indexes/text_splitter.ts: -------------------------------------------------------------------------------- 1 | import { Document } from "langchain-gpt4all/document"; 2 | import { CharacterTextSplitter } from "langchain-gpt4all/text_splitter"; 3 | 4 | export const run = async () => { 5 | /* Split text */ 6 | const text = "foo bar baz 123"; 7 | const splitter = new CharacterTextSplitter({ 8 | separator: " ", 9 | chunkSize: 7, 10 | chunkOverlap: 3, 11 | }); 12 | const output = splitter.createDocuments([text]); 13 | console.log({ output }); 14 | /* Split documents */ 15 | const docOutput = splitter.splitDocuments([ 16 | new Document({ pageContent: text }), 17 | ]); 18 | console.log({ docOutput }); 19 | }; 20 | -------------------------------------------------------------------------------- /examples/src/indexes/token_text_splitter.ts: -------------------------------------------------------------------------------- 1 | import { Document } from "langchain-gpt4all/document"; 2 | import { TokenTextSplitter } from "langchain-gpt4all/text_splitter"; 3 | import fs from "fs"; 4 | import path from "path"; 5 | 6 | export const run = async () => { 7 | /* Split text */ 8 | const text = fs.readFileSync( 9 | path.resolve(__dirname, "../../state_of_the_union.txt"), 10 | "utf8" 11 | ); 12 | 13 | const splitter = new TokenTextSplitter({ 14 | encodingName: "r50k_base", 15 | chunkSize: 10, 16 | chunkOverlap: 0, 17 | allowedSpecial: ["<|endoftext|>"], 18 | disallowedSpecial: [], 19 | }); 20 | 21 | const output = splitter.createDocuments([text]); 22 | console.log({ output }); 23 | 24 | const docOutput = splitter.splitDocuments([ 25 | new Document({ pageContent: text }), 26 | ]); 27 | 28 | console.log({ docOutput }); 29 | }; 30 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/chroma/fromDocs.ts: -------------------------------------------------------------------------------- 1 | import { Chroma } from "langchain-gpt4all/vectorstores/chroma"; 2 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 3 | import { TextLoader } from "langchain-gpt4all/document_loaders/fs/text"; 4 | 5 | // Create docs with a loader 6 | const loader = new TextLoader("src/document_loaders/example_data/example.txt"); 7 | const docs = await loader.load(); 8 | 9 | // Create vector store and index the docs 10 | const vectorStore = await Chroma.fromDocuments(docs, new OpenAIEmbeddings(), { 11 | collectionName: "a-test-collection", 12 | }); 13 | 14 | // Search for the most similar document 15 | const response = await vectorStore.similaritySearch("hello", 1); 16 | 17 | console.log(response); 18 | /* 19 | [ 20 | Document { 21 | pageContent: 'Foo\nBar\nBaz\n\n', 22 | metadata: { source: 'src/document_loaders/example_data/example.txt' } 23 | } 24 | ] 25 | */ 26 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/chroma/search.ts: -------------------------------------------------------------------------------- 1 | import { Chroma } from "langchain-gpt4all/vectorstores/chroma"; 2 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 3 | 4 | const vectorStore = await Chroma.fromExistingCollection( 5 | new OpenAIEmbeddings(), 6 | { collectionName: "godel-escher-bach" } 7 | ); 8 | 9 | const response = await vectorStore.similaritySearch("scared", 2); 10 | console.log(response); 11 | /* 12 | [ 13 | Document { pageContent: 'Achilles: Oh, no!', metadata: {} }, 14 | Document { 15 | pageContent: 'Achilles: Yiikes! What is that?', 16 | metadata: { id: 1 } 17 | } 18 | ] 19 | */ 20 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/hnswlib.ts: -------------------------------------------------------------------------------- 1 | import { HNSWLib } from "langchain-gpt4all/vectorstores/hnswlib"; 2 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 3 | 4 | export const run = async () => { 5 | const vectorStore = await HNSWLib.fromTexts( 6 | ["Hello world", "Bye bye", "hello nice world"], 7 | [{ id: 2 }, { id: 1 }, { id: 3 }], 8 | new OpenAIEmbeddings() 9 | ); 10 | 11 | const resultOne = await vectorStore.similaritySearch("hello world", 1); 12 | console.log(resultOne); 13 | }; 14 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/hnswlib_filter.ts: -------------------------------------------------------------------------------- 1 | import { HNSWLib } from "langchain-gpt4all/vectorstores/hnswlib"; 2 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 3 | 4 | export const run = async () => { 5 | const vectorStore = await HNSWLib.fromTexts( 6 | ["Hello world", "Bye bye", "hello nice world"], 7 | [{ id: 2 }, { id: 1 }, { id: 3 }], 8 | new OpenAIEmbeddings() 9 | ); 10 | 11 | const result = await vectorStore.similaritySearch( 12 | "hello world", 13 | 10, 14 | (document) => document.metadata.id === 3 15 | ); 16 | 17 | // only "hello nice world" will be returned 18 | console.log(result); 19 | }; 20 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/hnswlib_fromdocs.ts: -------------------------------------------------------------------------------- 1 | import { HNSWLib } from "langchain-gpt4all/vectorstores/hnswlib"; 2 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 3 | import { TextLoader } from "langchain-gpt4all/document_loaders/fs/text"; 4 | 5 | // Create docs with a loader 6 | const loader = new TextLoader("src/document_loaders/example_data/example.txt"); 7 | const docs = await loader.load(); 8 | 9 | // Load the docs into the vector store 10 | const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings()); 11 | 12 | // Search for the most similar document 13 | const result = await vectorStore.similaritySearch("hello world", 1); 14 | console.log(result); 15 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/hnswlib_saveload.ts: -------------------------------------------------------------------------------- 1 | import { HNSWLib } from "langchain-gpt4all/vectorstores/hnswlib"; 2 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 3 | 4 | export const run = async () => { 5 | // Create a vector store through any method, here from texts as an example 6 | const vectorStore = await HNSWLib.fromTexts( 7 | ["Hello world", "Bye bye", "hello nice world"], 8 | [{ id: 2 }, { id: 1 }, { id: 3 }], 9 | new OpenAIEmbeddings() 10 | ); 11 | 12 | // Save the vector store to a directory 13 | const directory = "your/directory/here"; 14 | await vectorStore.save(directory); 15 | 16 | // Load the vector store from the same directory 17 | const loadedVectorStore = await HNSWLib.load( 18 | directory, 19 | new OpenAIEmbeddings() 20 | ); 21 | 22 | // vectorStore and loadedVectorStore are identical 23 | 24 | const result = await loadedVectorStore.similaritySearch("hello world", 1); 25 | console.log(result); 26 | }; 27 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/memory.ts: -------------------------------------------------------------------------------- 1 | import { MemoryVectorStore } from "langchain-gpt4all/vectorstores/memory"; 2 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 3 | 4 | export const run = async () => { 5 | const vectorStore = await MemoryVectorStore.fromTexts( 6 | ["Hello world", "Bye bye", "hello nice world"], 7 | [{ id: 2 }, { id: 1 }, { id: 3 }], 8 | new OpenAIEmbeddings() 9 | ); 10 | 11 | const resultOne = await vectorStore.similaritySearch("hello world", 1); 12 | console.log(resultOne); 13 | }; 14 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/memory_custom_similarity.ts: -------------------------------------------------------------------------------- 1 | import { MemoryVectorStore } from "langchain-gpt4all/vectorstores/memory"; 2 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 3 | import { similarity } from "ml-distance"; 4 | 5 | export const run = async () => { 6 | const vectorStore = await MemoryVectorStore.fromTexts( 7 | ["Hello world", "Bye bye", "hello nice world"], 8 | [{ id: 2 }, { id: 1 }, { id: 3 }], 9 | new OpenAIEmbeddings(), 10 | { similarity: similarity.pearson } 11 | ); 12 | 13 | const resultOne = await vectorStore.similaritySearch("hello world", 1); 14 | console.log(resultOne); 15 | }; 16 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/memory_fromdocs.ts: -------------------------------------------------------------------------------- 1 | import { MemoryVectorStore } from "langchain-gpt4all/vectorstores/memory"; 2 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 3 | import { TextLoader } from "langchain-gpt4all/document_loaders/fs/text"; 4 | 5 | export const run = async () => { 6 | // Create docs with a loader 7 | const loader = new TextLoader( 8 | "src/document_loaders/example_data/example.txt" 9 | ); 10 | const docs = await loader.load(); 11 | 12 | // Load the docs into the vector store 13 | const vectorStore = await MemoryVectorStore.fromDocuments( 14 | docs, 15 | new OpenAIEmbeddings() 16 | ); 17 | 18 | // Search for the most similar document 19 | const resultOne = await vectorStore.similaritySearch("hello world", 1); 20 | 21 | console.log(resultOne); 22 | }; 23 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/milvus.ts: -------------------------------------------------------------------------------- 1 | import { Milvus } from "langchain-gpt4all/vectorstores/milvus"; 2 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 3 | 4 | export const run = async () => { 5 | const vectorStore = await Milvus.fromTexts( 6 | ["Hello world", "Bye bye", "hello nice world"], 7 | [{ id: 2 }, { id: 1 }, { id: 3 }], 8 | new OpenAIEmbeddings() 9 | ); 10 | 11 | const resultOne = await vectorStore.similaritySearch("hello world", 1); 12 | console.log(resultOne); 13 | }; 14 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/mongo_fromTexts.ts: -------------------------------------------------------------------------------- 1 | import { MongoVectorStore } from "langchain-gpt4all/vectorstores/mongo"; 2 | import { CohereEmbeddings } from "langchain-gpt4all/embeddings/cohere"; 3 | import { MongoClient } from "mongodb"; 4 | 5 | export const run = async () => { 6 | const client = new MongoClient(process.env.MONGO_URI || ""); 7 | 8 | const collection = client.db("langchain-gpt4all").collection("test"); 9 | 10 | await MongoVectorStore.fromTexts( 11 | ["Hello world", "Bye bye", "What's this?"], 12 | [{ id: 2 }, { id: 1 }, { id: 3 }], 13 | new CohereEmbeddings(), 14 | { 15 | client, 16 | collection, 17 | // indexName: "default", // make sure that this matches the index name in atlas if not using "default" 18 | } 19 | ); 20 | 21 | // remember to close the client 22 | await client.close(); 23 | }; 24 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/mongo_search.ts: -------------------------------------------------------------------------------- 1 | import { MongoVectorStore } from "langchain-gpt4all/vectorstores/mongo"; 2 | import { CohereEmbeddings } from "langchain-gpt4all/embeddings/cohere"; 3 | import { MongoClient } from "mongodb"; 4 | 5 | export const run = async () => { 6 | const client = new MongoClient(process.env.MONGO_URI || ""); 7 | 8 | const collection = client.db("langchain-gpt4all").collection("test"); 9 | 10 | const vectorStore = new MongoVectorStore(new CohereEmbeddings(), { 11 | client, 12 | collection, 13 | // indexName: "default", // make sure that this matches the index name in atlas if not using "default" 14 | }); 15 | 16 | const resultOne = await vectorStore.similaritySearch("Hello world", 1); 17 | 18 | console.log(resultOne); 19 | 20 | // remember to close the client 21 | await client.close(); 22 | }; 23 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/myscale_fromTexts.ts: -------------------------------------------------------------------------------- 1 | import { MyScaleStore } from "langchain-gpt4all/vectorstores/myscale"; 2 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 3 | 4 | const vectorStore = await MyScaleStore.fromTexts( 5 | ["Hello world", "Bye bye", "hello nice world"], 6 | [ 7 | { id: 2, name: "2" }, 8 | { id: 1, name: "1" }, 9 | { id: 3, name: "3" }, 10 | ], 11 | new OpenAIEmbeddings(), 12 | { 13 | host: process.env.MYSCALE_HOST || "localhost", 14 | port: process.env.MYSCALE_PORT || "8443", 15 | username: process.env.MYSCALE_USERNAME || "username", 16 | password: process.env.MYSCALE_PASSWORD || "password", 17 | } 18 | ); 19 | 20 | const results = await vectorStore.similaritySearch("hello world", 1); 21 | console.log(results); 22 | 23 | const filteredResults = await vectorStore.similaritySearch("hello world", 1, { 24 | whereStr: "metadata.name = '1'", 25 | }); 26 | console.log(filteredResults); 27 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/myscale_search.ts: -------------------------------------------------------------------------------- 1 | import { MyScaleStore } from "langchain-gpt4all/vectorstores/myscale"; 2 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 3 | 4 | const vectorStore = await MyScaleStore.fromExistingIndex( 5 | new OpenAIEmbeddings(), 6 | { 7 | host: process.env.MYSCALE_HOST || "localhost", 8 | port: process.env.MYSCALE_PORT || "8443", 9 | username: process.env.MYSCALE_USERNAME || "username", 10 | password: process.env.MYSCALE_PASSWORD || "password", 11 | database: "your_database", // defaults to "default" 12 | table: "your_table", // defaults to "vector_table" 13 | } 14 | ); 15 | 16 | const results = await vectorStore.similaritySearch("hello world", 1); 17 | console.log(results); 18 | 19 | const filteredResults = await vectorStore.similaritySearch("hello world", 1, { 20 | whereStr: "metadata.name = '1'", 21 | }); 22 | console.log(filteredResults); 23 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/opensearch/opensearch.ts: -------------------------------------------------------------------------------- 1 | import { Client } from "@opensearch-project/opensearch"; 2 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 3 | import { OpenSearchVectorStore } from "langchain-gpt4all/vectorstores/opensearch"; 4 | 5 | export async function run() { 6 | const client = new Client({ 7 | nodes: [process.env.OPENSEARCH_URL ?? "http://127.0.0.1:9200"], 8 | }); 9 | 10 | const vectorStore = await OpenSearchVectorStore.fromTexts( 11 | ["Hello world", "Bye bye", "What's this?"], 12 | [{ id: 2 }, { id: 1 }, { id: 3 }], 13 | new OpenAIEmbeddings(), 14 | { 15 | client, 16 | indexName: "documents", 17 | } 18 | ); 19 | 20 | const resultOne = await vectorStore.similaritySearch("Hello world", 1); 21 | console.log(resultOne); 22 | } 23 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/prisma_vectorstore/.env.example: -------------------------------------------------------------------------------- 1 | # Add DATABASE_URL to .env file in this directory 2 | DATABASE_URL=postgresql://[USERNAME]:[PASSWORD]@[ADDR]/[DBNAME] -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/prisma_vectorstore/.gitignore: -------------------------------------------------------------------------------- 1 | data 2 | docker-compose.yml -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/prisma_vectorstore/docker-compose.example.yml: -------------------------------------------------------------------------------- 1 | services: 2 | db: 3 | image: ankane/pgvector 4 | ports: 5 | - 5432:5432 6 | volumes: 7 | - ./data:/var/lib/postgresql/data 8 | environment: 9 | - POSTGRES_PASSWORD= 10 | - POSTGRES_USER= 11 | - POSTGRES_DB= -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/prisma_vectorstore/prisma.ts: -------------------------------------------------------------------------------- 1 | import { PrismaVectorStore } from "langchain-gpt4all/vectorstores/prisma"; 2 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 3 | import { PrismaClient, Prisma, Document } from "@prisma/client"; 4 | 5 | export const run = async () => { 6 | const db = new PrismaClient(); 7 | 8 | const vectorStore = PrismaVectorStore.withModel(db).create( 9 | new OpenAIEmbeddings(), 10 | { 11 | prisma: Prisma, 12 | tableName: "Document", 13 | vectorColumnName: "vector", 14 | columns: { 15 | id: PrismaVectorStore.IdColumn, 16 | content: PrismaVectorStore.ContentColumn, 17 | }, 18 | } 19 | ); 20 | 21 | const texts = ["Hello world", "Bye bye", "What's this?"]; 22 | await vectorStore.addModels( 23 | await db.$transaction( 24 | texts.map((content) => db.document.create({ data: { content } })) 25 | ) 26 | ); 27 | 28 | const resultOne = await vectorStore.similaritySearch("Hello world", 1); 29 | console.log(resultOne.at(0)?.metadata.content); 30 | }; 31 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/prisma_vectorstore/prisma/migrations/00_init/migration.sql: -------------------------------------------------------------------------------- 1 | -- CreateTable 2 | CREATE EXTENSION IF NOT EXISTS vector; 3 | CREATE TABLE "Document" ( 4 | "id" TEXT NOT NULL, 5 | "content" TEXT NOT NULL, 6 | "vector" vector, 7 | 8 | CONSTRAINT "Document_pkey" PRIMARY KEY ("id") 9 | ); 10 | -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/prisma_vectorstore/prisma/migrations/migration_lock.toml: -------------------------------------------------------------------------------- 1 | # Please do not edit this file manually 2 | # It should be added in your version-control system (i.e. Git) 3 | provider = "postgresql" -------------------------------------------------------------------------------- /examples/src/indexes/vector_stores/prisma_vectorstore/prisma/schema.prisma: -------------------------------------------------------------------------------- 1 | // This is your Prisma schema file, 2 | // learn more about it in the docs: https://pris.ly/d/prisma-schema 3 | 4 | generator client { 5 | provider = "prisma-client-js" 6 | } 7 | 8 | datasource db { 9 | provider = "postgresql" 10 | url = env("DATABASE_URL") 11 | } 12 | 13 | model Document { 14 | id String @id @default(cuid()) 15 | content String 16 | vector Unsupported("vector")? 17 | } 18 | -------------------------------------------------------------------------------- /examples/src/llms/cohere.ts: -------------------------------------------------------------------------------- 1 | import { Cohere } from "langchain-gpt4all/llms/cohere"; 2 | 3 | export const run = async () => { 4 | const model = new Cohere({ 5 | temperature: 0.7, 6 | maxTokens: 20, 7 | maxRetries: 5, 8 | }); 9 | const res = await model.call( 10 | "Question: What would be a good company name a company that makes colorful socks?\nAnswer:" 11 | ); 12 | console.log({ res }); 13 | }; 14 | -------------------------------------------------------------------------------- /examples/src/llms/gpt4all.ts: -------------------------------------------------------------------------------- 1 | import { GPT4All } from "langchain-gpt4all/llms/gpt4all"; 2 | 3 | export const run = async () => { 4 | const model = new GPT4All({ 5 | model: "gpt4all-lora-unfiltered-quantized", 6 | forceDownload: true, // Defaults to false 7 | decoderConfig: {}, // Defaults to {} 8 | }); 9 | const res = await model.call( 10 | "What would be a good company name a company that makes colorful socks?" 11 | ); 12 | console.log({ res }); 13 | }; 14 | -------------------------------------------------------------------------------- /examples/src/llms/hf.ts: -------------------------------------------------------------------------------- 1 | import { HuggingFaceInference } from "langchain-gpt4all/llms/hf"; 2 | 3 | export const run = async () => { 4 | const model = new HuggingFaceInference({ 5 | model: "gpt2", 6 | temperature: 0.7, 7 | maxTokens: 50, 8 | }); 9 | const res = await model.call( 10 | "Question: What would be a good company name a company that makes colorful socks?\nAnswer:" 11 | ); 12 | console.log({ res }); 13 | }; 14 | -------------------------------------------------------------------------------- /examples/src/llms/openai-chat.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIChat } from "langchain-gpt4all/llms/openai"; 2 | 3 | export const run = async () => { 4 | const model = new OpenAIChat({ 5 | prefixMessages: [ 6 | { 7 | role: "system", 8 | content: "You are a helpful assistant that answers in pirate language", 9 | }, 10 | ], 11 | maxTokens: 50, 12 | }); 13 | const res = await model.call( 14 | "What would be a good company name a company that makes colorful socks?" 15 | ); 16 | console.log({ res }); 17 | }; 18 | -------------------------------------------------------------------------------- /examples/src/llms/openai.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 2 | 3 | export const run = async () => { 4 | const model = new OpenAI({ 5 | modelName: "gpt-4", 6 | temperature: 0.7, 7 | maxTokens: 1000, 8 | maxRetries: 5, 9 | }); 10 | const res = await model.call( 11 | "Question: What would be a good company name a company that makes colorful socks?\nAnswer:" 12 | ); 13 | console.log({ res }); 14 | }; 15 | -------------------------------------------------------------------------------- /examples/src/llms/replicate.ts: -------------------------------------------------------------------------------- 1 | import { Replicate } from "langchain-gpt4all/llms/replicate"; 2 | 3 | export const run = async () => { 4 | const model = new Replicate({ 5 | model: 6 | "replicate/flan-t5-xl:3ae0799123a1fe11f8c89fd99632f843fc5f7a761630160521c4253149754523", 7 | }); 8 | const res = await model.call( 9 | "Question: What would be a good company name a company that makes colorful socks?\nAnswer:" 10 | ); 11 | console.log({ res }); 12 | }; 13 | -------------------------------------------------------------------------------- /examples/src/memory/buffer.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 2 | import { BufferMemory } from "langchain-gpt4all/memory"; 3 | import { LLMChain } from "langchain-gpt4all/chains"; 4 | import { PromptTemplate } from "langchain-gpt4all/prompts"; 5 | 6 | const memory = new BufferMemory({ memoryKey: "chat_history" }); 7 | const model = new OpenAI({ temperature: 0.9 }); 8 | const prompt = 9 | PromptTemplate.fromTemplate(`The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. 10 | 11 | Current conversation: 12 | {chat_history} 13 | Human: {input} 14 | AI:`); 15 | const chain = new LLMChain({ llm: model, prompt, memory }); 16 | 17 | const res1 = await chain.call({ input: "Hi! I'm Jim." }); 18 | console.log({ res1 }); 19 | 20 | const res2 = await chain.call({ input: "What's my name?" }); 21 | console.log({ res2 }); 22 | -------------------------------------------------------------------------------- /examples/src/models/chat/chat_quick_start.ts: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain-gpt4all/chat_models/openai"; 2 | import { HumanChatMessage } from "langchain-gpt4all/schema"; 3 | 4 | export const run = async () => { 5 | const chat = new ChatOpenAI(); 6 | // Pass in a list of messages to `call` to start a conversation. In this simple example, we only pass in one message. 7 | const response = await chat.call([ 8 | new HumanChatMessage( 9 | "What is a good name for a company that makes colorful socks?" 10 | ), 11 | ]); 12 | console.log(response); 13 | // AIChatMessage { text: '\n\nRainbow Sox Co.' } 14 | }; 15 | -------------------------------------------------------------------------------- /examples/src/models/chat/chat_streaming.ts: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain-gpt4all/chat_models/openai"; 2 | import { HumanChatMessage } from "langchain-gpt4all/schema"; 3 | 4 | const chat = new ChatOpenAI({ 5 | maxTokens: 25, 6 | streaming: true, 7 | }); 8 | 9 | const response = await chat.call( 10 | [new HumanChatMessage("Tell me a joke.")], 11 | undefined, 12 | [ 13 | { 14 | handleLLMNewToken(token: string) { 15 | console.log({ token }); 16 | }, 17 | }, 18 | ] 19 | ); 20 | 21 | console.log(response); 22 | // { token: '' } 23 | // { token: '\n\n' } 24 | // { token: 'Why' } 25 | // { token: ' don' } 26 | // { token: "'t" } 27 | // { token: ' scientists' } 28 | // { token: ' trust' } 29 | // { token: ' atoms' } 30 | // { token: '?\n\n' } 31 | // { token: 'Because' } 32 | // { token: ' they' } 33 | // { token: ' make' } 34 | // { token: ' up' } 35 | // { token: ' everything' } 36 | // { token: '.' } 37 | // { token: '' } 38 | // AIChatMessage { 39 | // text: "\n\nWhy don't scientists trust atoms?\n\nBecause they make up everything." 40 | // } 41 | -------------------------------------------------------------------------------- /examples/src/models/chat/chat_streaming_stdout.ts: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain-gpt4all/chat_models/openai"; 2 | import { HumanChatMessage } from "langchain-gpt4all/schema"; 3 | 4 | const chat = new ChatOpenAI({ 5 | streaming: true, 6 | callbacks: [ 7 | { 8 | handleLLMNewToken(token: string) { 9 | process.stdout.write(token); 10 | }, 11 | }, 12 | ], 13 | }); 14 | 15 | await chat.call([ 16 | new HumanChatMessage("Write me a song about sparkling water."), 17 | ]); 18 | /* 19 | Verse 1: 20 | Bubbles rise, crisp and clear 21 | Refreshing taste that brings us cheer 22 | Sparkling water, so light and pure 23 | Quenches our thirst, it's always secure 24 | 25 | Chorus: 26 | Sparkling water, oh how we love 27 | Its fizzy bubbles and grace above 28 | It's the perfect drink, anytime, anyplace 29 | Refreshing as it gives us a taste 30 | 31 | Verse 2: 32 | From morning brunch to evening feast 33 | It's the perfect drink for a treat 34 | A sip of it brings a smile so bright 35 | Our thirst is quenched in just one sip so light 36 | ... 37 | */ 38 | -------------------------------------------------------------------------------- /examples/src/models/chat/chat_timeout.ts: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain-gpt4all/chat_models/openai"; 2 | import { HumanChatMessage } from "langchain-gpt4all/schema"; 3 | 4 | export const run = async () => { 5 | const chat = new ChatOpenAI( 6 | { temperature: 1, timeout: 1000 } // 1s timeout 7 | ); 8 | 9 | const response = await chat.call([ 10 | new HumanChatMessage( 11 | "What is a good name for a company that makes colorful socks?" 12 | ), 13 | ]); 14 | console.log(response); 15 | // AIChatMessage { text: '\n\nRainbow Sox Co.' } 16 | }; 17 | -------------------------------------------------------------------------------- /examples/src/models/chat/integration_anthropic.ts: -------------------------------------------------------------------------------- 1 | import { ChatAnthropic } from "langchain-gpt4all/chat_models/anthropic"; 2 | 3 | const model = new ChatAnthropic({ 4 | temperature: 0.9, 5 | apiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.ANTHROPIC_API_KEY 6 | }); 7 | -------------------------------------------------------------------------------- /examples/src/models/chat/integration_azure_openai.ts: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain-gpt4all/chat_models/openai"; 2 | 3 | const model = new ChatOpenAI({ 4 | temperature: 0.9, 5 | azureOpenAIApiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY 6 | azureOpenAIApiInstanceName: "YOUR-INSTANCE-NAME", // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME 7 | azureOpenAIApiDeploymentName: "YOUR-DEPLOYMENT-NAME", // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME 8 | azureOpenAIApiVersion: "YOUR-API-VERSION", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION 9 | }); 10 | -------------------------------------------------------------------------------- /examples/src/models/chat/integration_openai.ts: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "langchain-gpt4all/chat_models/openai"; 2 | 3 | const model = new ChatOpenAI({ 4 | temperature: 0.9, 5 | openAIApiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.OPENAI_API_KEY 6 | }); 7 | -------------------------------------------------------------------------------- /examples/src/models/embeddings/cohere.ts: -------------------------------------------------------------------------------- 1 | import { CohereEmbeddings } from "langchain-gpt4all/embeddings/cohere"; 2 | 3 | export const run = async () => { 4 | /* Embed queries */ 5 | const embeddings = new CohereEmbeddings(); 6 | const res = await embeddings.embedQuery("Hello world"); 7 | console.log(res); 8 | /* Embed documents */ 9 | const documentRes = await embeddings.embedDocuments([ 10 | "Hello world", 11 | "Bye bye", 12 | ]); 13 | console.log({ documentRes }); 14 | }; 15 | -------------------------------------------------------------------------------- /examples/src/models/embeddings/openai.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 2 | 3 | export const run = async () => { 4 | /* Embed queries */ 5 | const embeddings = new OpenAIEmbeddings(); 6 | const res = await embeddings.embedQuery("Hello world"); 7 | console.log(res); 8 | /* Embed documents */ 9 | const documentRes = await embeddings.embedDocuments([ 10 | "Hello world", 11 | "Bye bye", 12 | ]); 13 | console.log({ documentRes }); 14 | }; 15 | -------------------------------------------------------------------------------- /examples/src/models/embeddings/openai_timeout.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 2 | 3 | export const run = async () => { 4 | const embeddings = new OpenAIEmbeddings({ 5 | timeout: 1000, // 1s timeout 6 | }); 7 | /* Embed queries */ 8 | const res = await embeddings.embedQuery("Hello world"); 9 | console.log(res); 10 | /* Embed documents */ 11 | const documentRes = await embeddings.embedDocuments([ 12 | "Hello world", 13 | "Bye bye", 14 | ]); 15 | console.log({ documentRes }); 16 | }; 17 | -------------------------------------------------------------------------------- /examples/src/models/embeddings/tensorflow.ts: -------------------------------------------------------------------------------- 1 | import "@tensorflow/tfjs-backend-cpu"; 2 | import { Document } from "langchain-gpt4all/document"; 3 | import { TensorFlowEmbeddings } from "langchain-gpt4all/embeddings/tensorflow"; 4 | import { MemoryVectorStore } from "langchain-gpt4all/vectorstores/memory"; 5 | 6 | const embeddings = new TensorFlowEmbeddings(); 7 | const store = new MemoryVectorStore(embeddings); 8 | 9 | const documents = [ 10 | "A document", 11 | "Some other piece of text", 12 | "One more", 13 | "And another", 14 | ]; 15 | 16 | await store.addDocuments( 17 | documents.map((pageContent) => new Document({ pageContent })) 18 | ); 19 | -------------------------------------------------------------------------------- /examples/src/models/llm/llm_promptlayer.ts: -------------------------------------------------------------------------------- 1 | import { PromptLayerOpenAI } from "langchain-gpt4all/llms/openai"; 2 | 3 | export const run = async () => { 4 | const model = new PromptLayerOpenAI({ temperature: 0.9 }); 5 | const res = await model.call( 6 | "What would be a good company name a company that makes colorful socks?" 7 | ); 8 | console.log({ res }); 9 | }; 10 | -------------------------------------------------------------------------------- /examples/src/models/llm/llm_quick_start.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 2 | 3 | export const run = async () => { 4 | const model = new OpenAI(); 5 | // `call` is a simple string-in, string-out method for interacting with the model. 6 | const resA = await model.call( 7 | "What would be a good company name a company that makes colorful socks?" 8 | ); 9 | console.log({ resA }); 10 | // { resA: '\n\nSocktastic Colors' } 11 | }; 12 | -------------------------------------------------------------------------------- /examples/src/models/llm/llm_timeout.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 2 | 3 | export const run = async () => { 4 | const model = new OpenAI( 5 | { temperature: 1, timeout: 1000 } // 1s timeout 6 | ); 7 | 8 | const resA = await model.call( 9 | "What would be a good company name a company that makes colorful socks?" 10 | ); 11 | 12 | console.log({ resA }); 13 | // '\n\nSocktastic Colors' } 14 | }; 15 | -------------------------------------------------------------------------------- /examples/src/models/llm/llm_with_tracing.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 2 | import { ChatOpenAI } from "langchain-gpt4all/chat_models/openai"; 3 | import { SystemChatMessage, HumanChatMessage } from "langchain-gpt4all/schema"; 4 | import * as process from "process"; 5 | 6 | export const run = async () => { 7 | process.env.LANGCHAIN_HANDLER = "langchain-gpt4all"; 8 | const model = new OpenAI({ temperature: 0.9 }); 9 | const resA = await model.call( 10 | "What would be a good company name a company that makes colorful socks?" 11 | ); 12 | console.log({ resA }); 13 | 14 | const chat = new ChatOpenAI({ temperature: 0 }); 15 | const system_message = new SystemChatMessage("You are to chat with a user."); 16 | const message = new HumanChatMessage("Hello!"); 17 | const resB = await chat.call([system_message, message]); 18 | console.log({ resB }); 19 | }; 20 | -------------------------------------------------------------------------------- /examples/src/models/llm/openai_basePath.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 2 | 3 | const model = new OpenAI( 4 | { temperature: 0 }, 5 | { basePath: "https://oai.hconeai.com/v1" } 6 | ); 7 | 8 | const res = await model.call( 9 | "What would be a good company name a company that makes colorful socks?" 10 | ); 11 | console.log(res); 12 | -------------------------------------------------------------------------------- /examples/src/models/llm/openai_cancellation.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 2 | 3 | export const run = async () => { 4 | const model = new OpenAI({ temperature: 1 }); 5 | 6 | const controller = new AbortController(); 7 | 8 | // Call `controller.abort()` somewhere to cancel the request. 9 | 10 | const res = await model.call( 11 | "What would be a good company name a company that makes colorful socks?", 12 | { 13 | options: { 14 | signal: controller.signal, 15 | }, 16 | } 17 | ); 18 | 19 | console.log(res); 20 | /* 21 | '\n\nSocktastic Colors' 22 | */ 23 | }; 24 | -------------------------------------------------------------------------------- /examples/src/models/llm/openai_userid.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 2 | 3 | const model = new OpenAI({ temperature: 0 }); 4 | 5 | const res = await model.call( 6 | "What would be a good company name a company that makes colorful socks?", 7 | { 8 | options: { 9 | headers: { 10 | "User-Id": "123", 11 | }, 12 | }, 13 | } 14 | ); 15 | console.log(res); 16 | -------------------------------------------------------------------------------- /examples/src/prompts/load_from_hub.ts: -------------------------------------------------------------------------------- 1 | import { loadPrompt } from "langchain-gpt4all/prompts/load"; 2 | 3 | export const run = async () => { 4 | const prompt = await loadPrompt("lc://prompts/hello-world/prompt.yaml"); 5 | const res = await prompt.format({}); 6 | console.log({ res }); 7 | }; 8 | -------------------------------------------------------------------------------- /examples/src/retrievers/chatgpt-plugin.ts: -------------------------------------------------------------------------------- 1 | import { ChatGPTPluginRetriever } from "langchain-gpt4all/retrievers/remote"; 2 | 3 | export const run = async () => { 4 | const retriever = new ChatGPTPluginRetriever({ 5 | url: "http://0.0.0.0:8000", 6 | auth: { 7 | bearer: "super-secret-jwt-token-with-at-least-32-characters-long", 8 | }, 9 | }); 10 | 11 | const docs = await retriever.getRelevantDocuments("hello world"); 12 | 13 | console.log(docs); 14 | }; 15 | -------------------------------------------------------------------------------- /examples/src/retrievers/databerry.ts: -------------------------------------------------------------------------------- 1 | import { DataberryRetriever } from "langchain-gpt4all/retrievers/databerry"; 2 | 3 | export const run = async () => { 4 | const retriever = new DataberryRetriever({ 5 | datastoreUrl: "https://api.databerry.ai/query/clg1xg2h80000l708dymr0fxc", 6 | apiKey: "DATABERRY_API_KEY", // optional: needed for private datastores 7 | topK: 8, // optional: default value is 3 8 | }); 9 | 10 | const docs = await retriever.getRelevantDocuments("hello"); 11 | 12 | console.log(docs); 13 | }; 14 | -------------------------------------------------------------------------------- /examples/src/retrievers/hyde.ts: -------------------------------------------------------------------------------- 1 | import { OpenAI } from "langchain-gpt4all/llms/openai"; 2 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 3 | import { MemoryVectorStore } from "langchain-gpt4all/vectorstores/memory"; 4 | import { HydeRetriever } from "langchain-gpt4all/retrievers/hyde"; 5 | import { Document } from "langchain-gpt4all/document"; 6 | 7 | const embeddings = new OpenAIEmbeddings(); 8 | const vectorStore = new MemoryVectorStore(embeddings); 9 | const llm = new OpenAI(); 10 | const retriever = new HydeRetriever({ 11 | vectorStore, 12 | llm, 13 | k: 1, 14 | }); 15 | 16 | await vectorStore.addDocuments( 17 | [ 18 | "My name is John.", 19 | "My name is Bob.", 20 | "My favourite food is pizza.", 21 | "My favourite food is pasta.", 22 | ].map((pageContent) => new Document({ pageContent })) 23 | ); 24 | 25 | const results = await retriever.getRelevantDocuments( 26 | "What is my favourite food?" 27 | ); 28 | 29 | console.log(results); 30 | /* 31 | [ 32 | Document { pageContent: 'My favourite food is pasta.', metadata: {} } 33 | ] 34 | */ 35 | -------------------------------------------------------------------------------- /examples/src/retrievers/metal.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable @typescript-eslint/no-non-null-assertion */ 2 | import Metal from "@getmetal/metal-sdk"; 3 | import { MetalRetriever } from "langchain-gpt4all/retrievers/metal"; 4 | 5 | export const run = async () => { 6 | const MetalSDK = Metal.default; 7 | 8 | const client = new MetalSDK( 9 | process.env.METAL_API_KEY!, 10 | process.env.METAL_CLIENT_ID!, 11 | process.env.METAL_INDEX_ID 12 | ); 13 | const retriever = new MetalRetriever({ client }); 14 | 15 | const docs = await retriever.getRelevantDocuments("hello"); 16 | 17 | console.log(docs); 18 | }; 19 | -------------------------------------------------------------------------------- /examples/src/retrievers/supabase_hybrid.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIEmbeddings } from "langchain-gpt4all/embeddings/openai"; 2 | import { createClient } from "@supabase/supabase-js"; 3 | import { SupabaseHybridSearch } from "langchain-gpt4all/retrievers/supabase"; 4 | 5 | export const run = async () => { 6 | const client = createClient( 7 | process.env.SUPABASE_URL || "", 8 | process.env.SUPABASE_PRIVATE_KEY || "" 9 | ); 10 | 11 | const embeddings = new OpenAIEmbeddings(); 12 | 13 | const retriever = new SupabaseHybridSearch(embeddings, { 14 | client, 15 | // Below are the defaults, expecting that you set up your supabase table and functions according to the guide above. Please change if necessary. 16 | similarityK: 2, 17 | keywordK: 2, 18 | tableName: "documents", 19 | similarityQueryName: "match_documents", 20 | keywordQueryName: "kw_match_documents", 21 | }); 22 | 23 | const results = await retriever.getRelevantDocuments("hello bye"); 24 | 25 | console.log(results); 26 | }; 27 | -------------------------------------------------------------------------------- /examples/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "@tsconfig/recommended", 3 | "compilerOptions": { 4 | "outDir": "dist", 5 | "lib": [ 6 | "ES2021", 7 | "ES2022.Object", 8 | "DOM" 9 | ], 10 | "target": "ES2021", 11 | "module": "nodenext", 12 | "sourceMap": true, 13 | "allowSyntheticDefaultImports": true, 14 | "baseUrl": "./src", 15 | "declaration": true, 16 | "noImplicitReturns": true, 17 | "noFallthroughCasesInSwitch": true, 18 | "noUnusedParameters": true, 19 | "useDefineForClassFields": true, 20 | "strictPropertyInitialization": false 21 | }, 22 | "exclude": [ 23 | "node_modules/", 24 | "dist/", 25 | "tests/" 26 | ], 27 | "include": [ 28 | "./src" 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /langchain-gpt4all/.release-it.json: -------------------------------------------------------------------------------- 1 | { 2 | "github": { 3 | "release": true, 4 | "autoGenerate": true, 5 | "tokenRef": "GITHUB_TOKEN_RELEASE" 6 | }, 7 | "npm": { 8 | "versionArgs": [ 9 | "--workspaces-update=false" 10 | ] 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /langchain-gpt4all/babel.config.cjs: -------------------------------------------------------------------------------- 1 | // babel.config.js 2 | module.exports = { 3 | presets: [["@babel/preset-env", { targets: { node: true } }]], 4 | }; 5 | -------------------------------------------------------------------------------- /langchain-gpt4all/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | langchain-gpt4all-frontend: 4 | platform: linux/amd64 5 | image: notlangchain/langchainplus-frontend:latest 6 | ports: 7 | - 4173:4173 8 | environment: 9 | - BACKEND_URL=http://langchain-gpt4all-backend:8000 10 | - PUBLIC_BASE_URL=http://localhost:8000 11 | - PUBLIC_DEV_MODE=true 12 | depends_on: 13 | - langchain-gpt4all-backend 14 | langchain-gpt4all-backend: 15 | platform: linux/amd64 16 | image: notlangchain/langchainplus:latest 17 | environment: 18 | - PORT=8000 19 | - LANGCHAIN_ENV=local 20 | ports: 21 | - 8000:8000 22 | depends_on: 23 | - langchain-gpt4all-db 24 | langchain-gpt4all-db: 25 | image: postgres:14.1 26 | environment: 27 | - POSTGRES_PASSWORD=postgres 28 | - POSTGRES_USER=postgres 29 | - POSTGRES_DB=postgres 30 | ports: 31 | - 5432:5432 32 | -------------------------------------------------------------------------------- /langchain-gpt4all/jest.config.cjs: -------------------------------------------------------------------------------- 1 | /** @type {import('ts-jest').JestConfigWithTsJest} */ 2 | module.exports = { 3 | preset: "ts-jest/presets/default-esm", 4 | testEnvironment: "node", 5 | modulePathIgnorePatterns: ["dist/", "docs/"], 6 | moduleNameMapper: { 7 | "^(\\.{1,2}/.*)\\.js$": "$1", 8 | }, 9 | transform: { 10 | "^.+\\.m?[tj]sx?$": ["ts-jest", { useESM: true }], 11 | }, 12 | setupFiles: ["dotenv/config"], 13 | testTimeout: 20_000, 14 | }; 15 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/agents/agent_toolkits/base.ts: -------------------------------------------------------------------------------- 1 | import { Tool } from "../../tools/base.js"; 2 | 3 | export abstract class Toolkit { 4 | abstract tools: Tool[]; 5 | } 6 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/agents/agent_toolkits/index.ts: -------------------------------------------------------------------------------- 1 | export { JsonToolkit, createJsonAgent } from "./json/json.js"; 2 | export { SqlToolkit, createSqlAgent, SqlCreatePromptArgs } from "./sql/sql.js"; 3 | export { 4 | RequestsToolkit, 5 | OpenApiToolkit, 6 | createOpenApiAgent, 7 | } from "./openapi/openapi.js"; 8 | export { 9 | VectorStoreInfo, 10 | VectorStoreToolkit, 11 | VectorStoreRouterToolkit, 12 | createVectorStoreAgent, 13 | createVectorStoreRouterAgent, 14 | } from "./vectorstore/vectorstore.js"; 15 | export { ZapierToolKit } from "./zapier/zapier.js"; 16 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/agents/agent_toolkits/vectorstore/prompt.ts: -------------------------------------------------------------------------------- 1 | export const VECTOR_PREFIX = `You are an agent designed to answer questions about sets of documents. 2 | You have access to tools for interacting with the documents, and the inputs to the tools are questions. 3 | Sometimes, you will be asked to provide sources for your questions, in which case you should use the appropriate tool to do so. 4 | If the question does not seem relevant to any of the tools provided, just return "I don't know" as the answer.`; 5 | 6 | export const VECTOR_ROUTER_PREFIX = `You are an agent designed to answer questions. 7 | You have access to tools for interacting with different sources, and the inputs to the tools are questions. 8 | Your main task is to decide which of the tools is relevant for answering question at hand. 9 | For complex questions, you can break the question down into sub questions and use tools to answers the sub questions.`; 10 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/agents/agent_toolkits/zapier/zapier.ts: -------------------------------------------------------------------------------- 1 | import { Toolkit } from "../base.js"; 2 | import { Tool } from "../../../tools/base.js"; 3 | import { ZapierNLARunAction, ZapierNLAWrapper } from "../../../tools/zapier.js"; 4 | 5 | export class ZapierToolKit extends Toolkit { 6 | tools: Tool[] = []; 7 | 8 | static async fromZapierNLAWrapper( 9 | zapierNLAWrapper: ZapierNLAWrapper 10 | ): Promise { 11 | const toolkit = new ZapierToolKit(); 12 | const actions = await zapierNLAWrapper.listActions(); 13 | for (const action of actions) { 14 | const tool = new ZapierNLARunAction( 15 | zapierNLAWrapper, 16 | action.id, 17 | action.description, 18 | action.params 19 | ); 20 | toolkit.tools.push(tool); 21 | } 22 | return toolkit; 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/agents/load.ts: -------------------------------------------------------------------------------- 1 | import { Agent } from "./agent.js"; 2 | import { Tool } from "../tools/base.js"; 3 | import { BaseLanguageModel } from "../base_language/index.js"; 4 | import { loadFromHub } from "../util/hub.js"; 5 | import { FileLoader, loadFromFile } from "../util/load.js"; 6 | import { parseFileConfig } from "../util/parse.js"; 7 | 8 | const loadAgentFromFile: FileLoader = async ( 9 | file: string, 10 | path: string, 11 | llmAndTools?: { llm?: BaseLanguageModel; tools?: Tool[] } 12 | ) => { 13 | const serialized = parseFileConfig(file, path); 14 | return Agent.deserialize({ ...serialized, ...llmAndTools }); 15 | }; 16 | 17 | export const loadAgent = async ( 18 | uri: string, 19 | llmAndTools?: { llm?: BaseLanguageModel; tools?: Tool[] } 20 | ): Promise => { 21 | const hubResult = await loadFromHub( 22 | uri, 23 | loadAgentFromFile, 24 | "agents", 25 | new Set(["json", "yaml"]), 26 | llmAndTools 27 | ); 28 | if (hubResult) { 29 | return hubResult; 30 | } 31 | 32 | return loadFromFile(uri, loadAgentFromFile, llmAndTools); 33 | }; 34 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/agents/mrkl/prompt.ts: -------------------------------------------------------------------------------- 1 | export const PREFIX = `Answer the following questions as best you can. You have access to the following tools:`; 2 | export const FORMAT_INSTRUCTIONS = `Use the following format in your response: 3 | 4 | Question: the input question you must answer 5 | Thought: you should always think about what to do 6 | Action: the action to take, should be one of [{tool_names}] 7 | Action Input: the input to the action 8 | Observation: the result of the action 9 | ... (this Thought/Action/Action Input/Observation can repeat N times) 10 | Thought: I now know the final answer 11 | Final Answer: the final answer to the original input question`; 12 | export const SUFFIX = `Begin! 13 | 14 | Question: {input} 15 | Thought:{agent_scratchpad}`; 16 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/cache/base.ts: -------------------------------------------------------------------------------- 1 | import hash from "object-hash"; 2 | 3 | /** 4 | * This cache key should be consistent across all versions of langchain-gpt4all. 5 | * It is currently NOT consistent across versions of langchain-gpt4all. 6 | * 7 | * A huge benefit of having a remote cache (like redis) is that you can 8 | * access the cache from different processes/machines. The allows you to 9 | * seperate concerns and scale horizontally. 10 | * 11 | * TODO: Make cache key consistent across versions of langchain-gpt4all. 12 | */ 13 | export const getCacheKey = (...strings: string[]): string => 14 | hash(strings.join("_")); 15 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/cache/index.ts: -------------------------------------------------------------------------------- 1 | import { getCacheKey } from "./base.js"; 2 | import { Generation, BaseCache } from "../schema/index.js"; 3 | 4 | const GLOBAL_MAP = new Map(); 5 | 6 | export class InMemoryCache extends BaseCache { 7 | private cache: Map; 8 | 9 | constructor(map?: Map) { 10 | super(); 11 | this.cache = map ?? new Map(); 12 | } 13 | 14 | lookup(prompt: string, llmKey: string): Promise { 15 | return Promise.resolve(this.cache.get(getCacheKey(prompt, llmKey)) ?? null); 16 | } 17 | 18 | async update(prompt: string, llmKey: string, value: T): Promise { 19 | this.cache.set(getCacheKey(prompt, llmKey), value); 20 | } 21 | 22 | static global(): InMemoryCache { 23 | return new InMemoryCache(GLOBAL_MAP); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/cache/tests/cache.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | 3 | import { InMemoryCache } from "../index.js"; 4 | 5 | test("InMemoryCache", async () => { 6 | const cache = new InMemoryCache(); 7 | await cache.update("foo", "bar", [{ text: "baz" }]); 8 | expect(await cache.lookup("foo", "bar")).toEqual([{ text: "baz" }]); 9 | }); 10 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/cache/tests/redis.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect, jest } from "@jest/globals"; 2 | import hash from "object-hash"; 3 | 4 | import { RedisCache } from "../redis.js"; 5 | 6 | const sha256 = (str: string) => hash(str); 7 | 8 | test("RedisCache", async () => { 9 | const redis = { 10 | get: jest.fn(async (key: string) => { 11 | if (key === sha256("foo_bar_0")) { 12 | return "baz"; 13 | } 14 | return null; 15 | }), 16 | }; 17 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 18 | const cache = new RedisCache(redis as any); 19 | expect(await cache.lookup("foo", "bar")).toEqual([{ text: "baz" }]); 20 | }); 21 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/callbacks/handlers/initialize.ts: -------------------------------------------------------------------------------- 1 | import { LangChainTracer } from "./tracers.js"; 2 | 3 | export async function getTracingCallbackHandler( 4 | session?: string 5 | ): Promise { 6 | const tracer = new LangChainTracer(); 7 | if (session) { 8 | await tracer.loadSession(session); 9 | } else { 10 | await tracer.loadDefaultSession(); 11 | } 12 | return tracer; 13 | } 14 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/callbacks/index.ts: -------------------------------------------------------------------------------- 1 | export { 2 | BaseCallbackHandler, 3 | CallbackHandlerMethods, 4 | BaseCallbackHandlerInput, 5 | } from "./base.js"; 6 | 7 | export { 8 | LangChainTracer, 9 | BaseRun, 10 | LLMRun, 11 | ChainRun, 12 | ToolRun, 13 | } from "./handlers/tracers.js"; 14 | 15 | export { getTracingCallbackHandler } from "./handlers/initialize.js"; 16 | 17 | export { 18 | CallbackManager, 19 | CallbackManagerForChainRun, 20 | CallbackManagerForLLMRun, 21 | CallbackManagerForToolRun, 22 | CallbackManagerOptions, 23 | Callbacks, 24 | } from "./manager.js"; 25 | export { ConsoleCallbackHandler } from "./handlers/console.js"; 26 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/chains/constitutional_ai/constitutional_principle.ts: -------------------------------------------------------------------------------- 1 | import { SerializedConstitutionalPrinciple } from "../serde.js"; 2 | 3 | export class ConstitutionalPrinciple { 4 | critiqueRequest: string; 5 | 6 | revisionRequest: string; 7 | 8 | name: string; 9 | 10 | constructor({ 11 | critiqueRequest, 12 | revisionRequest, 13 | name, 14 | }: { 15 | critiqueRequest: string; 16 | revisionRequest: string; 17 | name?: string; 18 | }) { 19 | this.critiqueRequest = critiqueRequest; 20 | this.revisionRequest = revisionRequest; 21 | this.name = name ?? "Constitutional Principle"; 22 | } 23 | 24 | serialize(): SerializedConstitutionalPrinciple { 25 | return { 26 | _type: "constitutional_principle", 27 | critiqueRequest: this.critiqueRequest, 28 | revisionRequest: this.revisionRequest, 29 | name: this.name, 30 | }; 31 | } 32 | } 33 | 34 | export const PRINCIPLES: { 35 | [key: string]: ConstitutionalPrinciple; 36 | } = {}; 37 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/chains/summarization/refine_prompts.ts: -------------------------------------------------------------------------------- 1 | import { PromptTemplate } from "../../prompts/prompt.js"; 2 | 3 | const refinePromptTemplate = `Your job is to produce a final summary 4 | We have provided an existing summary up to a certain point: "{existing_answer}" 5 | We have the opportunity to refine the existing summary 6 | (only if needed) with some more context below. 7 | ------------ 8 | "{text}" 9 | ------------ 10 | 11 | Given the new context, refine the original summary 12 | If the context isn't useful, return the original summary. 13 | 14 | REFINED SUMMARY:`; 15 | 16 | export const REFINE_PROMPT = /* #__PURE__ */ new PromptTemplate({ 17 | template: refinePromptTemplate, 18 | inputVariables: ["existing_answer", "text"], 19 | }); 20 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/chains/summarization/stuff_prompts.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable spaced-comment */ 2 | import { PromptTemplate } from "../../prompts/prompt.js"; 3 | 4 | const template = `Write a concise summary of the following: 5 | 6 | 7 | "{text}" 8 | 9 | 10 | CONCISE SUMMARY:`; 11 | 12 | export const DEFAULT_PROMPT = /*#__PURE__*/ new PromptTemplate({ 13 | template, 14 | inputVariables: ["text"], 15 | }); 16 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/chains/tests/conversation_chain.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test } from "@jest/globals"; 2 | import { OpenAI } from "../../llms/openai.js"; 3 | import { ConversationChain } from "../conversation.js"; 4 | 5 | test("Test ConversationChain", async () => { 6 | const model = new OpenAI({ modelName: "text-ada-001" }); 7 | const chain = new ConversationChain({ llm: model }); 8 | const res = await chain.call({ input: "my favorite color" }); 9 | console.log({ res }); 10 | }); 11 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/chains/tests/openai_moderation.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test } from "@jest/globals"; 2 | import { OpenAIModerationChain } from "../openai_moderation.js"; 3 | 4 | test("OpenAI Moderation Test", async () => { 5 | const badString = "I hate myself and want to do harm to myself"; 6 | const goodString = 7 | "The cat (Felis catus) is a domestic species of small carnivorous mammal."; 8 | 9 | const moderation = new OpenAIModerationChain(); 10 | const { output: badResult } = await moderation.call({ 11 | input: badString, 12 | }); 13 | 14 | const { output: goodResult } = await moderation.call({ 15 | input: goodString, 16 | }); 17 | 18 | expect(badResult).toEqual( 19 | "Text was found that violates OpenAI's content policy." 20 | ); 21 | expect(goodResult).toEqual( 22 | "The cat (Felis catus) is a domestic species of small carnivorous mammal." 23 | ); 24 | }); 25 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/chat_models/index.ts: -------------------------------------------------------------------------------- 1 | /* #__PURE__ */ console.error( 2 | "[WARN] Importing from 'langchain-gpt4all/chat_models' is deprecated. Import from eg. 'langchain-gpt4all/chat_models/openai' instead. See https://js.langchain-gpt4all.com/docs/getting-started/install#updating-from-0052 for upgrade instructions." 3 | ); 4 | 5 | export { BaseChatModel, BaseChatModelParams, SimpleChatModel } from "./base.js"; 6 | export { ChatOpenAI } from "./openai.js"; 7 | export { ChatAnthropic } from "./anthropic.js"; 8 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/docstore/base.ts: -------------------------------------------------------------------------------- 1 | import { Document } from "../document.js"; 2 | 3 | export abstract class Docstore { 4 | abstract search(search: string): Document | string; 5 | 6 | abstract add(texts: Record): void; 7 | } 8 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/docstore/in_memory.ts: -------------------------------------------------------------------------------- 1 | import { Document } from "../document.js"; 2 | import { Docstore } from "./base.js"; 3 | 4 | export class InMemoryDocstore extends Docstore { 5 | _docs: Map; 6 | 7 | constructor(docs?: Map) { 8 | super(); 9 | this._docs = docs ?? new Map(); 10 | } 11 | 12 | /** Method for getting count of documents in _docs */ 13 | get count() { 14 | return this._docs.size; 15 | } 16 | 17 | search(search: string): Document | string { 18 | return this._docs.get(search) ?? `ID ${search} not found.`; 19 | } 20 | 21 | add(texts: Record): void { 22 | const keys = [...this._docs.keys()]; 23 | const overlapping = Object.keys(texts).filter((x) => keys.includes(x)); 24 | 25 | if (overlapping.length > 0) { 26 | throw new Error(`Tried to add ids that already exist: ${overlapping}`); 27 | } 28 | 29 | for (const [key, value] of Object.entries(texts)) { 30 | this._docs.set(key, value); 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/docstore/index.ts: -------------------------------------------------------------------------------- 1 | export { Document } from "../document.js"; 2 | export { Docstore } from "./base.js"; 3 | export { InMemoryDocstore } from "./in_memory.js"; 4 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document.ts: -------------------------------------------------------------------------------- 1 | export interface DocumentInput< 2 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 3 | Metadata extends Record = Record 4 | > { 5 | pageContent: string; 6 | 7 | metadata?: Metadata; 8 | } 9 | 10 | /** 11 | * Interface for interacting with a document. 12 | */ 13 | export class Document< 14 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 15 | Metadata extends Record = Record 16 | > implements DocumentInput 17 | { 18 | pageContent: string; 19 | 20 | metadata: Metadata; 21 | 22 | constructor(fields: DocumentInput) { 23 | this.pageContent = fields.pageContent 24 | ? fields.pageContent.toString() 25 | : this.pageContent; 26 | this.metadata = fields.metadata ?? ({} as Metadata); 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/base.ts: -------------------------------------------------------------------------------- 1 | import { 2 | RecursiveCharacterTextSplitter, 3 | TextSplitter, 4 | } from "../text_splitter.js"; 5 | import { Document } from "../document.js"; 6 | 7 | export interface DocumentLoader { 8 | load(): Promise; 9 | loadAndSplit(textSplitter?: TextSplitter): Promise; 10 | } 11 | 12 | export abstract class BaseDocumentLoader implements DocumentLoader { 13 | abstract load(): Promise; 14 | 15 | async loadAndSplit( 16 | splitter: TextSplitter = new RecursiveCharacterTextSplitter() 17 | ): Promise { 18 | const docs = await this.load(); 19 | return splitter.splitDocuments(docs); 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/fs/notion.ts: -------------------------------------------------------------------------------- 1 | import { DirectoryLoader, UnknownHandling } from "./directory.js"; 2 | import { TextLoader } from "./text.js"; 3 | 4 | export class NotionLoader extends DirectoryLoader { 5 | constructor(directoryPath: string) { 6 | super( 7 | directoryPath, 8 | { 9 | ".md": (filePath) => new TextLoader(filePath), 10 | }, 11 | true, 12 | UnknownHandling.Ignore 13 | ); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/fs/srt.ts: -------------------------------------------------------------------------------- 1 | import type SRTParserT from "srt-parser-2"; 2 | import { TextLoader } from "./text.js"; 3 | 4 | export class SRTLoader extends TextLoader { 5 | constructor(filePathOrBlob: string | Blob) { 6 | super(filePathOrBlob); 7 | } 8 | 9 | protected async parse(raw: string): Promise { 10 | const { SRTParser2 } = await SRTLoaderImports(); 11 | const parser = new SRTParser2(); 12 | const srts = parser.fromSrt(raw); 13 | return [ 14 | srts 15 | .map((srt) => srt.text) 16 | .filter(Boolean) 17 | .join(" "), 18 | ]; 19 | } 20 | } 21 | 22 | async function SRTLoaderImports(): Promise<{ 23 | SRTParser2: typeof SRTParserT.default; 24 | }> { 25 | try { 26 | const SRTParser2 = (await import("srt-parser-2")).default.default; 27 | return { SRTParser2 }; 28 | } catch (e) { 29 | throw new Error( 30 | "Please install srt-parser-2 as a dependency with, e.g. `yarn add srt-parser-2`" 31 | ); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/cheerio.int.test.ts: -------------------------------------------------------------------------------- 1 | import { expect, test } from "@jest/globals"; 2 | import { CheerioWebBaseLoader } from "../web/cheerio.js"; 3 | 4 | test("Test cheerio web scraper loader", async () => { 5 | const loader = new CheerioWebBaseLoader( 6 | "https://news.ycombinator.com/item?id=34817881" 7 | ); 8 | await loader.load(); 9 | }); 10 | 11 | test("Test cheerio web scraper loader with selector", async () => { 12 | const selectH1 = "h1"; 13 | const loader = new CheerioWebBaseLoader("https://about.google/commitments/", { 14 | selector: selectH1, 15 | }); 16 | 17 | const doc = await loader.load(); 18 | expect(doc[0].pageContent.trim()).toBe( 19 | "Committed to significantly improving the lives of as many people as possible." 20 | ); 21 | }); 22 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/college_confidential.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test } from "@jest/globals"; 2 | import { CollegeConfidentialLoader } from "../web/college_confidential.js"; 3 | 4 | test("Test College confidential loader", async () => { 5 | const loader = new CollegeConfidentialLoader( 6 | "https://www.collegeconfidential.com/colleges/brown-university/" 7 | ); 8 | await loader.load(); 9 | }); 10 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/docx.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import * as url from "node:url"; 3 | import * as path from "node:path"; 4 | import { DocxLoader } from "../fs/docx.js"; 5 | 6 | test("Test Word doc loader from file", async () => { 7 | const filePath = path.resolve( 8 | path.dirname(url.fileURLToPath(import.meta.url)), 9 | "./example_data/attention.docx" 10 | ); 11 | const loader = new DocxLoader(filePath); 12 | const docs = await loader.load(); 13 | 14 | expect(docs.length).toBe(1); // not much text in the example 15 | expect(docs[0].pageContent).toContain("an interesting activity"); 16 | }); 17 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/epub.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import * as url from "node:url"; 3 | import * as path from "node:path"; 4 | import { EPubLoader } from "../fs/epub.js"; 5 | 6 | test("Test EPub loader from file", async () => { 7 | const filePath = path.resolve( 8 | path.dirname(url.fileURLToPath(import.meta.url)), 9 | "./example_data/attention.epub" 10 | ); 11 | const loader = new EPubLoader(filePath); 12 | const docs = await loader.load(); 13 | 14 | expect(docs.length).toBe(3); 15 | expect(docs[0].pageContent).toContain("Attention Is All You Need"); 16 | }); 17 | 18 | test("Test EPub loader from file to single document", async () => { 19 | const filePath = path.resolve( 20 | path.dirname(url.fileURLToPath(import.meta.url)), 21 | "./example_data/attention.epub" 22 | ); 23 | const loader = new EPubLoader(filePath, { splitChapters: false }); 24 | const docs = await loader.load(); 25 | 26 | expect(docs.length).toBe(1); 27 | expect(docs[0].pageContent).toContain("Attention Is All You Need"); 28 | }); 29 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/example_data/1706.03762.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/langchain-gpt4all/src/document_loaders/tests/example_data/1706.03762.pdf -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/example_data/attention.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/langchain-gpt4all/src/document_loaders/tests/example_data/attention.docx -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/example_data/attention.epub: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/langchain-gpt4all/src/document_loaders/tests/example_data/attention.epub -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/example_data/complex.json: -------------------------------------------------------------------------------- 1 | { 2 | "1": { 3 | "body": "BD 2023 SUMMER", 4 | "from": "LinkedIn Job", 5 | "labels": ["IMPORTANT", "CATEGORY_UPDATES", "INBOX"] 6 | }, 7 | "2": { 8 | "body": "Intern, Treasury and other roles are available", 9 | "from": "LinkedIn Job2", 10 | "labels": ["IMPORTANT"], 11 | "other": { 12 | "name": "plop", 13 | "surname": "bob" 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/example_data/example.txt: -------------------------------------------------------------------------------- 1 | Foo 2 | Bar 3 | Baz 4 | 5 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/github.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test } from "@jest/globals"; 2 | import { GithubRepoLoader } from "../web/github.js"; 3 | 4 | test("Test GithubRepoLoader", async () => { 5 | const loader = new GithubRepoLoader( 6 | "https://github.com/lujstn/langchainjs-gpt4all", 7 | { branch: "main", recursive: false, unknown: "warn" } 8 | ); 9 | const documents = await loader.load(); 10 | console.log(documents[0].pageContent); 11 | }); 12 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/hn.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test } from "@jest/globals"; 2 | import { HNLoader } from "../web/hn.js"; 3 | 4 | test("Test Hacker News loader", async () => { 5 | const loader = new HNLoader("https://news.ycombinator.com/item?id=34817881"); 6 | await loader.load(); 7 | }); 8 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/imsdb.test.ts: -------------------------------------------------------------------------------- 1 | import { test } from "@jest/globals"; 2 | import { IMSDBLoader } from "../web/imsdb.js"; 3 | 4 | test("Test IMSDB loader", async () => { 5 | const loader = new IMSDBLoader( 6 | "https://imsdb.com/scripts/BlacKkKlansman.html" 7 | ); 8 | await loader.load(); 9 | }); 10 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/jsonl.test.ts: -------------------------------------------------------------------------------- 1 | import * as url from "node:url"; 2 | import * as path from "node:path"; 3 | import { test, expect } from "@jest/globals"; 4 | import { JSONLinesLoader } from "../fs/json.js"; 5 | import { Document } from "../../document.js"; 6 | 7 | test("Test JSON loader from file", async () => { 8 | const filePath = path.resolve( 9 | path.dirname(url.fileURLToPath(import.meta.url)), 10 | "./example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.jsonl" 11 | ); 12 | const loader = new JSONLinesLoader(filePath, "/html"); 13 | const docs = await loader.load(); 14 | expect(docs.length).toBe(32); 15 | expect(docs[0]).toEqual( 16 | new Document({ 17 | metadata: { source: filePath, line: 1 }, 18 | pageContent: 19 | "Corruption discovered at the core of the Banking Clan!", 20 | }) 21 | ); 22 | }); 23 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/notion.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import * as url from "node:url"; 3 | import * as path from "node:path"; 4 | import { NotionLoader } from "../fs/notion.js"; 5 | 6 | test("Test Notion Loader", async () => { 7 | const directoryPath = path.resolve( 8 | path.dirname(url.fileURLToPath(import.meta.url)), 9 | "./example_data" 10 | ); 11 | const loader = new NotionLoader(directoryPath); 12 | const docs = await loader.load(); 13 | 14 | expect(docs.length).toBe(1); 15 | expect(docs[0].pageContent).toContain("Testing the notion markdownloader"); 16 | }); 17 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/pdf.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import * as url from "node:url"; 3 | import * as path from "node:path"; 4 | import { PDFLoader } from "../fs/pdf.js"; 5 | 6 | test("Test PDF loader from file", async () => { 7 | const filePath = path.resolve( 8 | path.dirname(url.fileURLToPath(import.meta.url)), 9 | "./example_data/1706.03762.pdf" 10 | ); 11 | const loader = new PDFLoader(filePath); 12 | const docs = await loader.load(); 13 | 14 | expect(docs.length).toBe(15); 15 | expect(docs[0].pageContent).toContain("Attention Is All You Need"); 16 | }); 17 | 18 | test("Test PDF loader from file to single document", async () => { 19 | const filePath = path.resolve( 20 | path.dirname(url.fileURLToPath(import.meta.url)), 21 | "./example_data/1706.03762.pdf" 22 | ); 23 | const loader = new PDFLoader(filePath, { splitPages: false }); 24 | const docs = await loader.load(); 25 | 26 | expect(docs.length).toBe(1); 27 | expect(docs[0].pageContent).toContain("Attention Is All You Need"); 28 | }); 29 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/playwright_web.int.test.ts: -------------------------------------------------------------------------------- 1 | import { expect, test } from "@jest/globals"; 2 | import { PlaywrightWebBaseLoader } from "../web/playwright.js"; 3 | 4 | test("Test playwright web scraper loader", async () => { 5 | const loader = new PlaywrightWebBaseLoader("https://www.google.com/"); 6 | const result = await loader.load(); 7 | 8 | expect(result).toBeDefined(); 9 | expect(result.length).toBe(1); 10 | }, 20_000); 11 | 12 | test("Test playwright web scraper loader with evaluate options", async () => { 13 | let nrTimesCalled = 0; 14 | const loader = new PlaywrightWebBaseLoader("https://www.google.com/", { 15 | launchOptions: { 16 | headless: true, 17 | }, 18 | gotoOptions: { 19 | waitUntil: "domcontentloaded", 20 | }, 21 | async evaluate(page) { 22 | nrTimesCalled += 1; 23 | return page.content(); 24 | }, 25 | }); 26 | const result = await loader.load(); 27 | 28 | expect(nrTimesCalled).toBe(1); 29 | expect(result).toBeDefined(); 30 | expect(result.length).toBe(1); 31 | }, 20_000); 32 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/srt-blob.test.ts: -------------------------------------------------------------------------------- 1 | import * as url from "node:url"; 2 | import * as path from "node:path"; 3 | import * as fs from "node:fs/promises"; 4 | import { test, expect } from "@jest/globals"; 5 | import { SRTLoader } from "../fs/srt.js"; 6 | 7 | test("Test SRT loader from blob", async () => { 8 | const filePath = path.resolve( 9 | path.dirname(url.fileURLToPath(import.meta.url)), 10 | "./example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt" 11 | ); 12 | const loader = new SRTLoader( 13 | new Blob([await fs.readFile(filePath)], { type: "application/x-subrip" }) 14 | ); 15 | const docs = await loader.load(); 16 | expect(docs.length).toBe(1); 17 | expect(docs[0].metadata).toMatchInlineSnapshot(` 18 | { 19 | "blobType": "application/x-subrip", 20 | "source": "blob", 21 | } 22 | `); 23 | expect(docs[0].pageContent).toContain("Corruption discovered"); 24 | }); 25 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/srt.test.ts: -------------------------------------------------------------------------------- 1 | import * as url from "node:url"; 2 | import * as path from "node:path"; 3 | import { test, expect } from "@jest/globals"; 4 | import { SRTLoader } from "../fs/srt.js"; 5 | 6 | test("Test SRT loader from file", async () => { 7 | const filePath = path.resolve( 8 | path.dirname(url.fileURLToPath(import.meta.url)), 9 | "./example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt" 10 | ); 11 | const loader = new SRTLoader(filePath); 12 | const docs = await loader.load(); 13 | expect(docs.length).toBe(1); 14 | expect(docs[0].metadata).toMatchInlineSnapshot(` 15 | { 16 | "source": "${filePath}", 17 | } 18 | `); 19 | expect(docs[0].pageContent).toContain("Corruption discovered"); 20 | }); 21 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/text-blob.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { TextLoader } from "../fs/text.js"; 3 | 4 | test("Test Text loader from blob", async () => { 5 | const loader = new TextLoader( 6 | new Blob(["Hello, world!"], { type: "text/plain" }) 7 | ); 8 | const docs = await loader.load(); 9 | 10 | expect(docs.length).toBe(1); 11 | expect(docs[0].pageContent).toBe("Hello, world!"); 12 | expect(docs[0].metadata).toMatchInlineSnapshot(` 13 | { 14 | "blobType": "text/plain", 15 | "source": "blob", 16 | } 17 | `); 18 | }); 19 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/tests/text.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { TextLoader } from "../fs/text.js"; 3 | 4 | test("Test Text loader from file", async () => { 5 | const loader = new TextLoader( 6 | "../examples/src/document_loaders/example_data/example.txt" 7 | ); 8 | const docs = await loader.load(); 9 | 10 | expect(docs.length).toBe(1); 11 | expect(docs[0].pageContent).toMatchInlineSnapshot(` 12 | "Foo 13 | Bar 14 | Baz 15 | 16 | " 17 | `); 18 | expect(docs[0].metadata).toMatchInlineSnapshot(` 19 | { 20 | "source": "../examples/src/document_loaders/example_data/example.txt", 21 | } 22 | `); 23 | }); 24 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/web/college_confidential.ts: -------------------------------------------------------------------------------- 1 | import { Document } from "../../document.js"; 2 | import { CheerioWebBaseLoader } from "./cheerio.js"; 3 | 4 | export class CollegeConfidentialLoader extends CheerioWebBaseLoader { 5 | constructor(webPath: string) { 6 | super(webPath); 7 | } 8 | 9 | public async load(): Promise { 10 | const $ = await this.scrape(); 11 | const text = $("main[class='skin-handler']").text(); 12 | const metadata = { source: this.webPath }; 13 | return [new Document({ pageContent: text, metadata })]; 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/document_loaders/web/imsdb.ts: -------------------------------------------------------------------------------- 1 | import { Document } from "../../document.js"; 2 | import { CheerioWebBaseLoader } from "./cheerio.js"; 3 | 4 | export class IMSDBLoader extends CheerioWebBaseLoader { 5 | constructor(public webPath: string) { 6 | super(webPath); 7 | } 8 | 9 | public async load(): Promise { 10 | const $ = await this.scrape(); 11 | const text = $("td[class='scrtext']").text().trim(); 12 | const metadata = { source: this.webPath }; 13 | return [new Document({ pageContent: text, metadata })]; 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/embeddings/base.ts: -------------------------------------------------------------------------------- 1 | import { AsyncCaller, AsyncCallerParams } from "../util/async_caller.js"; 2 | 3 | export type EmbeddingsParams = AsyncCallerParams; 4 | 5 | export abstract class Embeddings { 6 | /** 7 | * The async caller should be used by subclasses to make any async calls, 8 | * which will thus benefit from the concurrency and retry logic. 9 | */ 10 | caller: AsyncCaller; 11 | 12 | constructor(params: EmbeddingsParams) { 13 | this.caller = new AsyncCaller(params ?? {}); 14 | } 15 | 16 | abstract embedDocuments(documents: string[]): Promise; 17 | 18 | abstract embedQuery(document: string): Promise; 19 | } 20 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/embeddings/fake.ts: -------------------------------------------------------------------------------- 1 | import { Embeddings, EmbeddingsParams } from "./base.js"; 2 | 3 | export class FakeEmbeddings extends Embeddings { 4 | constructor(params?: EmbeddingsParams) { 5 | super(params ?? {}); 6 | } 7 | 8 | embedDocuments(documents: string[]): Promise { 9 | return Promise.resolve(documents.map(() => [0.1, 0.2, 0.3, 0.4])); 10 | } 11 | 12 | embedQuery(_: string): Promise { 13 | return Promise.resolve([0.1, 0.2, 0.3, 0.4]); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/embeddings/index.ts: -------------------------------------------------------------------------------- 1 | /* #__PURE__ */ console.error( 2 | "[WARN] Importing from 'langchain-gpt4all/embeddings' is deprecated. Import from eg. 'langchain-gpt4all/embeddings/openai' instead. See https://js.langchain-gpt4all.com/docs/getting-started/install#updating-from-0052 for upgrade instructions." 3 | ); 4 | 5 | export { OpenAIEmbeddings } from "./openai.js"; 6 | export { CohereEmbeddings } from "./cohere.js"; 7 | export { Embeddings } from "./base.js"; 8 | export { FakeEmbeddings } from "./fake.js"; 9 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/embeddings/tests/cohere.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { CohereEmbeddings } from "../cohere.js"; 3 | 4 | test("Test CohereEmbeddings.embedQuery", async () => { 5 | const embeddings = new CohereEmbeddings(); 6 | const res = await embeddings.embedQuery("Hello world"); 7 | expect(typeof res[0]).toBe("number"); 8 | }); 9 | 10 | test("Test CohereEmbeddings.embedDocuments", async () => { 11 | const embeddings = new CohereEmbeddings(); 12 | const res = await embeddings.embedDocuments(["Hello world", "Bye bye"]); 13 | expect(res).toHaveLength(2); 14 | expect(typeof res[0][0]).toBe("number"); 15 | expect(typeof res[1][0]).toBe("number"); 16 | }); 17 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/embeddings/tests/openai.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { OpenAIEmbeddings } from "../openai.js"; 3 | 4 | test("Test OpenAIEmbeddings.embedQuery", async () => { 5 | const embeddings = new OpenAIEmbeddings(); 6 | const res = await embeddings.embedQuery("Hello world"); 7 | expect(typeof res[0]).toBe("number"); 8 | }); 9 | 10 | test("Test OpenAIEmbeddings.embedDocuments", async () => { 11 | const embeddings = new OpenAIEmbeddings(); 12 | const res = await embeddings.embedDocuments(["Hello world", "Bye bye"]); 13 | expect(res).toHaveLength(2); 14 | expect(typeof res[0][0]).toBe("number"); 15 | expect(typeof res[1][0]).toBe("number"); 16 | }); 17 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/experimental/autogpt/index.ts: -------------------------------------------------------------------------------- 1 | export { AutoGPTPrompt, AutoGPTPromptInput } from "./prompt.js"; 2 | 3 | export { AutoGPTOutputParser, preprocessJsonInput } from "./output_parser.js"; 4 | 5 | export { AutoGPT, AutoGPTInput } from "./agent.js"; 6 | 7 | export { AutoGPTAction } from "./schema.js"; 8 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/experimental/autogpt/schema.ts: -------------------------------------------------------------------------------- 1 | import { StructuredTool } from "../../tools/base.js"; 2 | 3 | export type ObjectTool = StructuredTool; 4 | 5 | export const FINISH_NAME = "finish"; 6 | 7 | export interface AutoGPTAction { 8 | name: string; 9 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 10 | args: Record; 11 | } 12 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/experimental/babyagi/index.ts: -------------------------------------------------------------------------------- 1 | export { TaskCreationChain } from "./task_creation.js"; 2 | export { TaskExecutionChain } from "./task_execution.js"; 3 | export { TaskPrioritizationChain } from "./task_prioritization.js"; 4 | export { BabyAGI, Task, BabyAGIInputs } from "./agent.js"; 5 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/experimental/babyagi/task_execution.ts: -------------------------------------------------------------------------------- 1 | import { LLMChain, LLMChainInput } from "../../chains/llm_chain.js"; 2 | import { PromptTemplate } from "../../prompts/prompt.js"; 3 | 4 | /** Chain to execute tasks. */ 5 | export class TaskExecutionChain extends LLMChain { 6 | static fromLLM(fields: Omit): LLMChain { 7 | const executionTemplate = 8 | `You are an AI who performs one task based on the following objective: ` + 9 | `{objective}.` + 10 | `Take into account these previously completed tasks: {context}.` + 11 | ` Your task: {task}. Response:`; 12 | const prompt = new PromptTemplate({ 13 | template: executionTemplate, 14 | inputVariables: ["objective", "context", "task"], 15 | }); 16 | return new TaskExecutionChain({ prompt, ...fields }); 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/experimental/babyagi/task_prioritization.ts: -------------------------------------------------------------------------------- 1 | import { LLMChain, LLMChainInput } from "../../chains/llm_chain.js"; 2 | import { PromptTemplate } from "../../prompts/prompt.js"; 3 | 4 | /** Chain to prioritize tasks. */ 5 | export class TaskPrioritizationChain extends LLMChain { 6 | static fromLLM(fields: Omit): LLMChain { 7 | const taskPrioritizationTemplate = 8 | `You are a task prioritization AI tasked with cleaning the formatting of ` + 9 | `and reprioritizing the following tasks: {task_names}.` + 10 | ` Consider the ultimate objective of your team: {objective}.` + 11 | ` Do not remove any tasks. Return the result as a numbered list, like:` + 12 | ` #. First task` + 13 | ` #. Second task` + 14 | ` Start the task list with number {next_task_id}.`; 15 | const prompt = new PromptTemplate({ 16 | template: taskPrioritizationTemplate, 17 | inputVariables: ["task_names", "next_task_id", "objective"], 18 | }); 19 | return new TaskPrioritizationChain({ prompt, ...fields }); 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/index.ts: -------------------------------------------------------------------------------- 1 | /* #__PURE__ */ console.error( 2 | "[WARN] Importing from 'langchain-gpt4all' is deprecated. See https://js.langchain-gpt4all.com/docs/getting-started/install#updating-from-0052 for upgrade instructions." 3 | ); 4 | 5 | export { 6 | PromptTemplate, 7 | BasePromptTemplate, 8 | FewShotPromptTemplate, 9 | } from "./prompts/index.js"; 10 | export { LLMChain } from "./chains/llm_chain.js"; 11 | export { OpenAI } from "./llms/openai.js"; 12 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/llms/index.ts: -------------------------------------------------------------------------------- 1 | /* #__PURE__ */ console.error( 2 | "[WARN] Importing from 'langchain-gpt4all/llms' is deprecated. Import from eg. 'langchain-gpt4all/llms/openai' instead. See https://js.langchain-gpt4all.com/docs/getting-started/install#updating-from-0052 for upgrade instructions." 3 | ); 4 | 5 | export { BaseLLM, BaseLLMParams, LLM, SerializedLLM } from "./base.js"; 6 | export { OpenAI, PromptLayerOpenAI } from "./openai.js"; 7 | export { OpenAIChat } from "./openai-chat.js"; 8 | export { Cohere } from "./cohere.js"; 9 | export { HuggingFaceInference } from "./hf.js"; 10 | export { Replicate } from "./replicate.js"; 11 | export { GPT4All } from "./gpt4all.js"; 12 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/llms/load.ts: -------------------------------------------------------------------------------- 1 | import { FileLoader, loadFromFile } from "../util/load.js"; 2 | import { BaseLanguageModel } from "../base_language/index.js"; 3 | import { parseFileConfig } from "../util/parse.js"; 4 | 5 | /** 6 | * Load an LLM from a local file. 7 | * 8 | * @example 9 | * ```ts 10 | * import { loadLLM } from "langchain-gpt4all/llms/load"; 11 | * const model = await loadLLM("/path/to/llm.json"); 12 | * ``` 13 | */ 14 | const loader: FileLoader = (file: string, path: string) => 15 | BaseLanguageModel.deserialize(parseFileConfig(file, path)); 16 | 17 | export const loadLLM = (uri: string): Promise => 18 | loadFromFile(uri, loader); 19 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/llms/tests/cohere.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test } from "@jest/globals"; 2 | import { Cohere } from "../cohere.js"; 3 | 4 | test("Test Cohere", async () => { 5 | const model = new Cohere({ maxTokens: 20 }); 6 | const res = await model.call("1 + 1 ="); 7 | console.log(res); 8 | }, 50000); 9 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/llms/tests/gpt4all.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { GPT4All } from "../gpt4all.js"; 3 | 4 | // GPT4All will likely need to download the model, which may take a couple mins 5 | test( 6 | "Test GPT4All", 7 | async () => { 8 | const startTime = performance.now(); 9 | const model = new GPT4All({ 10 | model: "gpt4all-lora-quantized", 11 | }); 12 | const endTime = performance.now(); 13 | const timeElapsed = endTime - startTime; 14 | console.log(`GPT4All: Time elapsed: ${timeElapsed} milliseconds`); 15 | 16 | const res = await model.call("Hello, my name is "); 17 | 18 | expect(typeof res).toBe("string"); 19 | }, 20 | 600 * 1000 21 | ); 22 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/llms/tests/huggingface_hub.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test } from "@jest/globals"; 2 | import { HuggingFaceInference } from "../hf.js"; 3 | 4 | test("Test HuggingFace", async () => { 5 | const model = new HuggingFaceInference({ temperature: 0.1, topP: 0.5 }); 6 | const res = await model.call("1 + 1 ="); 7 | console.log(res); 8 | }, 50000); 9 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/llms/tests/replicate.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { Replicate } from "../replicate.js"; 3 | 4 | // Test skipped because Replicate appears to be timing out often when called 5 | test.skip("Test Replicate", async () => { 6 | const model = new Replicate({ 7 | model: 8 | "daanelson/flan-t5:04e422a9b85baed86a4f24981d7f9953e20c5fd82f6103b74ebc431588e1cec8", 9 | input: { 10 | max_length: 10, 11 | }, 12 | }); 13 | 14 | const res = await model.call("Hello, my name is "); 15 | 16 | expect(typeof res).toBe("string"); 17 | }); 18 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/memory/index.ts: -------------------------------------------------------------------------------- 1 | export { BufferMemory, BufferMemoryInput } from "./buffer_memory.js"; 2 | export { BaseMemory, getInputValue, getBufferString } from "./base.js"; 3 | export { 4 | ConversationSummaryMemory, 5 | ConversationSummaryMemoryInput, 6 | } from "./summary.js"; 7 | export { 8 | BufferWindowMemory, 9 | BufferWindowMemoryInput, 10 | } from "./buffer_window_memory.js"; 11 | export { BaseChatMemory, BaseChatMemoryInput } from "./chat_memory.js"; 12 | export { ChatMessageHistory } from "../stores/message/in_memory.js"; 13 | export { MotorheadMemory, MotorheadMemoryInput } from "./motorhead_memory.js"; 14 | export { 15 | VectorStoreRetrieverMemory, 16 | VectorStoreRetrieverMemoryParams, 17 | } from "./vector_store.js"; 18 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/output_parsers/index.ts: -------------------------------------------------------------------------------- 1 | export { ListOutputParser, CommaSeparatedListOutputParser } from "./list.js"; 2 | export { RegexParser } from "./regex.js"; 3 | export { 4 | StructuredOutputParser, 5 | JsonMarkdownStructuredOutputParser, 6 | JsonMarkdownFormatInstructionsOptions, 7 | JsonMarkdownStructuredOutputParserInput, 8 | } from "./structured.js"; 9 | export { OutputFixingParser } from "./fix.js"; 10 | export { CombiningOutputParser } from "./combining.js"; 11 | export { RouterOutputParser, RouterOutputParserInput } from "./router.js"; 12 | export { CustomListOutputParser } from "./list.js"; 13 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/output_parsers/prompts.ts: -------------------------------------------------------------------------------- 1 | import { PromptTemplate } from "../prompts/prompt.js"; 2 | 3 | export const NAIVE_FIX_TEMPLATE = `Instructions: 4 | -------------- 5 | {instructions} 6 | -------------- 7 | Completion: 8 | -------------- 9 | {completion} 10 | -------------- 11 | 12 | Above, the Completion did not satisfy the constraints given in the Instructions. 13 | Error: 14 | -------------- 15 | {error} 16 | -------------- 17 | 18 | Please try again. Please only respond with an answer that satisfies the constraints laid out in the Instructions:`; 19 | 20 | export const NAIVE_FIX_PROMPT = 21 | /* #__PURE__ */ PromptTemplate.fromTemplate(NAIVE_FIX_TEMPLATE); 22 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/prompts/tests/load.int.test.ts: -------------------------------------------------------------------------------- 1 | import { expect, test } from "@jest/globals"; 2 | import * as path from "node:path"; 3 | import { fileURLToPath } from "node:url"; 4 | import { loadPrompt } from "../load.js"; 5 | 6 | test("Load Hello World Prompt", async () => { 7 | const helloWorld = path.join( 8 | path.join(path.dirname(fileURLToPath(import.meta.url)), "prompts"), 9 | "hello_world.yaml" 10 | ); 11 | const prompt = await loadPrompt(helloWorld); 12 | expect(prompt._getPromptType()).toBe("prompt"); 13 | expect(await prompt.format({})).toBe("Say hello world."); 14 | }); 15 | 16 | test("Load hub prompt", async () => { 17 | const prompt = await loadPrompt( 18 | "lc@abb92d8://prompts/hello-world/prompt.yaml" 19 | ); 20 | expect(prompt._getPromptType()).toBe("prompt"); 21 | expect(await prompt.format({})).toBe("Say hello world."); 22 | }); 23 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/prompts/tests/prompts/hello_world.yaml: -------------------------------------------------------------------------------- 1 | input_variables: [] 2 | output_parser: null 3 | template: "Say hello world." 4 | template_format: f-string 5 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/prompts/tests/template.test.ts: -------------------------------------------------------------------------------- 1 | import { expect, test, describe } from "@jest/globals"; 2 | import { interpolateFString } from "../template.js"; 3 | 4 | describe.each([ 5 | ["{foo}", { foo: "bar" }, "bar"], 6 | ["pre{foo}post", { foo: "bar" }, "prebarpost"], 7 | ["{{pre{foo}post}}", { foo: "bar" }, "{prebarpost}"], 8 | ["text", {}, "text"], 9 | ["}}{{", {}, "}{"], 10 | ["{first}_{second}", { first: "foo", second: "bar" }, "foo_bar"], 11 | ])("Valid f-string", (template, variables, result) => { 12 | test(`Interpolation works: ${template}`, () => { 13 | expect(interpolateFString(template, variables)).toBe(result); 14 | }); 15 | }); 16 | 17 | describe.each([ 18 | ["{", {}], 19 | ["}", {}], 20 | ["{foo", {}], 21 | ["foo}", {}], 22 | ])("Invalid f-string", (template, variables) => { 23 | test(`Interpolation throws: ${template}`, () => { 24 | expect(() => interpolateFString(template, variables)).toThrow(); 25 | }); 26 | }); 27 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/retrievers/contextual_compression.ts: -------------------------------------------------------------------------------- 1 | import { BaseDocumentCompressor } from "./document_compressors/index.js"; 2 | import { Document } from "../document.js"; 3 | import { BaseRetriever } from "../schema/index.js"; 4 | 5 | export interface ContextualCompressionRetrieverArgs { 6 | baseCompressor: BaseDocumentCompressor; 7 | baseRetriever: BaseRetriever; 8 | } 9 | 10 | export class ContextualCompressionRetriever extends BaseRetriever { 11 | baseCompressor: BaseDocumentCompressor; 12 | 13 | baseRetriever: BaseRetriever; 14 | 15 | constructor({ 16 | baseCompressor, 17 | baseRetriever, 18 | }: ContextualCompressionRetrieverArgs) { 19 | super(); 20 | 21 | this.baseCompressor = baseCompressor; 22 | this.baseRetriever = baseRetriever; 23 | } 24 | 25 | async getRelevantDocuments(query: string): Promise { 26 | const docs = await this.baseRetriever.getRelevantDocuments(query); 27 | const compressedDocs = await this.baseCompressor.compressDocuments( 28 | docs, 29 | query 30 | ); 31 | return compressedDocs; 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/retrievers/document_compressors/chain_extract_prompt.ts: -------------------------------------------------------------------------------- 1 | export const PROMPT_TEMPLATE = ( 2 | noOutputStr: string 3 | ) => `Given the following question and context, extract any part of the context *AS IS* that is relevant to answer the question. If none of the context is relevant return ${noOutputStr}. 4 | 5 | Remember, *DO NOT* edit the extracted parts of the context. 6 | 7 | > Question: {question} 8 | > Context: 9 | >>> 10 | {context} 11 | >>> 12 | Extracted relevant parts:`; 13 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/retrievers/document_compressors/index.ts: -------------------------------------------------------------------------------- 1 | import { Document } from "../../document.js"; 2 | 3 | /** 4 | * Base Document Compression class. All compressors should extend this class. 5 | */ 6 | export abstract class BaseDocumentCompressor { 7 | abstract compressDocuments( 8 | documents: Document[], 9 | query: string 10 | ): Promise; 11 | } 12 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/retrievers/index.ts: -------------------------------------------------------------------------------- 1 | /* #__PURE__ */ console.error( 2 | "[WARN] Importing from 'langchain-gpt4all/retrievers' is deprecated. Import from eg. 'langchain-gpt4all/retrievers/remote' instead. See https://js.langchain-gpt4all.com/docs/getting-started/install#updating-from-0052 for upgrade instructions." 3 | ); 4 | 5 | export { RemoteRetriever } from "./remote/base.js"; 6 | export { ChatGPTPluginRetriever } from "./remote/chatgpt-plugin.js"; 7 | export { 8 | SupabaseHybridSearch, 9 | SupabaseHybridSearchParams, 10 | } from "./supabase.js"; 11 | export { RemoteLangChainRetriever } from "./remote/remote-retriever.js"; 12 | export { MetalRetriever } from "./metal.js"; 13 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/retrievers/metal.ts: -------------------------------------------------------------------------------- 1 | import { BaseRetriever } from "../schema/index.js"; 2 | import { Document } from "../document.js"; 3 | 4 | export interface MetalRetrieverFields { 5 | client: import("@getmetal/metal-sdk").default; 6 | } 7 | 8 | interface ResponseItem { 9 | text: string; 10 | [key: string]: unknown; 11 | } 12 | 13 | export class MetalRetriever extends BaseRetriever { 14 | private client: import("@getmetal/metal-sdk").default; 15 | 16 | constructor(fields: MetalRetrieverFields) { 17 | super(); 18 | 19 | this.client = fields.client; 20 | } 21 | 22 | async getRelevantDocuments(query: string): Promise { 23 | const res = await this.client.search({ text: query }); 24 | 25 | const items = ("data" in res ? res.data : res) as ResponseItem[]; 26 | return items.map( 27 | ({ text, metadata }) => 28 | new Document({ 29 | pageContent: text, 30 | metadata: metadata as Record, 31 | }) 32 | ); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/retrievers/remote/index.ts: -------------------------------------------------------------------------------- 1 | export { 2 | RemoteRetriever, 3 | RemoteRetrieverParams, 4 | RemoteRetrieverAuth, 5 | RemoteRetrieverValues, 6 | } from "./base.js"; 7 | export { 8 | ChatGPTPluginRetriever, 9 | ChatGPTPluginRetrieverFilter, 10 | ChatGPTPluginRetrieverParams, 11 | } from "./chatgpt-plugin.js"; 12 | export { 13 | RemoteLangChainRetriever, 14 | RemoteLangChainRetrieverParams, 15 | } from "./remote-retriever.js"; 16 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/retrievers/tests/metal.int.test.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-process-env */ 2 | /* eslint-disable @typescript-eslint/no-non-null-assertion */ 3 | import { test, expect } from "@jest/globals"; 4 | import Metal from "@getmetal/metal-sdk"; 5 | 6 | import { MetalRetriever } from "../metal.js"; 7 | 8 | test("MetalRetriever", async () => { 9 | const MetalSDK = Metal.default; 10 | const client = new MetalSDK( 11 | process.env.METAL_API_KEY!, 12 | process.env.METAL_CLIENT_ID!, 13 | process.env.METAL_INDEX_ID 14 | ); 15 | const retriever = new MetalRetriever({ client }); 16 | 17 | const docs = await retriever.getRelevantDocuments("hello"); 18 | 19 | expect(docs.length).toBeGreaterThan(0); 20 | 21 | console.log(docs); 22 | }); 23 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/retrievers/tests/supabase.int.test.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-process-env */ 2 | /* eslint-disable @typescript-eslint/no-non-null-assertion */ 3 | import { test, expect } from "@jest/globals"; 4 | import { createClient } from "@supabase/supabase-js"; 5 | import { OpenAIEmbeddings } from "../../embeddings/openai.js"; 6 | import { SupabaseHybridSearch } from "../supabase.js"; 7 | 8 | test("Supabase hybrid keyword search", async () => { 9 | const client = createClient( 10 | process.env.SUPABASE_URL!, 11 | process.env.SUPABASE_PRIVATE_KEY! 12 | ); 13 | 14 | const embeddings = new OpenAIEmbeddings(); 15 | 16 | const retriever = new SupabaseHybridSearch(embeddings, { 17 | client, 18 | similarityK: 2, 19 | keywordK: 2, 20 | }); 21 | 22 | expect(retriever).toBeDefined(); 23 | 24 | const results = await retriever.getRelevantDocuments("hello bye"); 25 | 26 | expect(results.length).toBeGreaterThan(0); 27 | }); 28 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/stores/file/in_memory.ts: -------------------------------------------------------------------------------- 1 | import { BaseFileStore } from "../../schema/index.js"; 2 | 3 | export class InMemoryFileStore extends BaseFileStore { 4 | private files: Map = new Map(); 5 | 6 | async readFile(path: string): Promise { 7 | const contents = this.files.get(path); 8 | if (contents === undefined) { 9 | throw new Error(`File not found: ${path}`); 10 | } 11 | return contents; 12 | } 13 | 14 | async writeFile(path: string, contents: string): Promise { 15 | this.files.set(path, contents); 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/stores/file/node.ts: -------------------------------------------------------------------------------- 1 | import * as fs from "node:fs/promises"; 2 | import { mkdtempSync } from "node:fs"; 3 | import { join } from "node:path"; 4 | 5 | import { BaseFileStore } from "../../schema/index.js"; 6 | 7 | export class NodeFileStore extends BaseFileStore { 8 | constructor(public basePath: string = mkdtempSync("langchain-gpt4all-")) { 9 | super(); 10 | } 11 | 12 | async readFile(path: string): Promise { 13 | return await fs.readFile(join(this.basePath, path), "utf8"); 14 | } 15 | 16 | async writeFile(path: string, contents: string): Promise { 17 | await fs.writeFile(join(this.basePath, path), contents, "utf8"); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/stores/message/in_memory.ts: -------------------------------------------------------------------------------- 1 | import { 2 | BaseChatMessage, 3 | BaseListChatMessageHistory, 4 | } from "../../schema/index.js"; 5 | 6 | export class ChatMessageHistory extends BaseListChatMessageHistory { 7 | private messages: BaseChatMessage[] = []; 8 | 9 | constructor(messages?: BaseChatMessage[]) { 10 | super(); 11 | this.messages = messages ?? []; 12 | } 13 | 14 | async getMessages(): Promise { 15 | return this.messages; 16 | } 17 | 18 | async addMessage(message: BaseChatMessage) { 19 | this.messages.push(message); 20 | } 21 | 22 | async clear() { 23 | this.messages = []; 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/tools/calculator.ts: -------------------------------------------------------------------------------- 1 | import { Parser } from "expr-eval"; 2 | 3 | import { Tool } from "./base.js"; 4 | 5 | export class Calculator extends Tool { 6 | name = "calculator"; 7 | 8 | /** @ignore */ 9 | async _call(input: string) { 10 | try { 11 | return Parser.evaluate(input).toString(); 12 | } catch (error) { 13 | return "I don't know how to do that."; 14 | } 15 | } 16 | 17 | description = `Useful for getting the result of a math expression. The input to this tool should be a valid mathematical expression that could be executed by a simple calculator.`; 18 | } 19 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/tools/chain.ts: -------------------------------------------------------------------------------- 1 | import { DynamicTool, DynamicToolInput } from "./dynamic.js"; 2 | import { BaseChain } from "../chains/base.js"; 3 | 4 | export interface ChainToolInput extends Omit { 5 | chain: BaseChain; 6 | } 7 | 8 | export class ChainTool extends DynamicTool { 9 | chain: BaseChain; 10 | 11 | constructor({ chain, ...rest }: ChainToolInput) { 12 | super({ 13 | ...rest, 14 | func: async (input, runManager) => 15 | chain.run(input, runManager?.getChild()), 16 | }); 17 | this.chain = chain; 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/types/type-utils.ts: -------------------------------------------------------------------------------- 1 | // Utility for marking only some keys of an interface as optional 2 | // Compare to Partial which marks all keys as optional 3 | export type Optional = Omit & Partial>; 4 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/util/axios-fetch-adapter.d.ts: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line import/no-extraneous-dependencies 2 | import { AxiosRequestConfig, AxiosPromise } from "axios"; 3 | 4 | export default function fetchAdapter(config: AxiosRequestConfig): AxiosPromise; 5 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/util/axios-types.ts: -------------------------------------------------------------------------------- 1 | import type { AxiosRequestConfig } from "axios"; 2 | import type { EventSourceMessage } from "./event-source-parse.js"; 3 | 4 | export interface StreamingAxiosRequestConfig extends AxiosRequestConfig { 5 | responseType: "stream"; 6 | 7 | /** 8 | * Called when a message is received. NOTE: Unlike the default browser 9 | * EventSource.onmessage, this callback is called for _all_ events, 10 | * even ones with a custom `event` field. 11 | */ 12 | onmessage?: (ev: EventSourceMessage) => void; 13 | } 14 | 15 | export type StreamingAxiosConfiguration = 16 | | StreamingAxiosRequestConfig 17 | | AxiosRequestConfig; 18 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/util/chunk.ts: -------------------------------------------------------------------------------- 1 | export const chunkArray = (arr: T[], chunkSize: number) => 2 | arr.reduce((chunks, elem, index) => { 3 | const chunkIndex = Math.floor(index / chunkSize); 4 | const chunk = chunks[chunkIndex] || []; 5 | // eslint-disable-next-line no-param-reassign 6 | chunks[chunkIndex] = chunk.concat([elem]); 7 | return chunks; 8 | }, [] as T[][]); 9 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/util/env.ts: -------------------------------------------------------------------------------- 1 | import { 2 | isBrowser, 3 | isNode, 4 | isWebWorker, 5 | isJsDom, 6 | isDeno, 7 | } from "browser-or-node"; 8 | 9 | export const getEnv = () => { 10 | let env: string; 11 | if (isBrowser) { 12 | env = "browser"; 13 | } else if (isNode) { 14 | env = "node"; 15 | } else if (isWebWorker) { 16 | env = "webworker"; 17 | } else if (isJsDom) { 18 | env = "jsdom"; 19 | } else if (isDeno) { 20 | env = "deno"; 21 | } else { 22 | env = "other"; 23 | } 24 | 25 | return env; 26 | }; 27 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/util/extname.ts: -------------------------------------------------------------------------------- 1 | export const extname = (path: string) => `.${path.split(".").pop()}`; 2 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/util/load.ts: -------------------------------------------------------------------------------- 1 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 2 | export type LoadValues = Record; 3 | 4 | export type FileLoader = ( 5 | text: string, 6 | filePath: string, 7 | values: LoadValues 8 | ) => Promise; 9 | 10 | export const loadFromFile = async ( 11 | uri: string, 12 | loader: FileLoader, 13 | values: LoadValues = {} 14 | ): Promise => { 15 | try { 16 | const fs = await import("node:fs/promises"); 17 | return loader(await fs.readFile(uri, { encoding: "utf-8" }), uri, values); 18 | } catch (e) { 19 | console.error(e); 20 | throw new Error(`Could not load file at ${uri}`); 21 | } 22 | }; 23 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/util/parse.ts: -------------------------------------------------------------------------------- 1 | import * as yaml from "yaml"; 2 | import { extname } from "./extname.js"; 3 | 4 | export const loadFileContents = (contents: string, format: string) => { 5 | switch (format) { 6 | case ".json": 7 | return JSON.parse(contents); 8 | case ".yml": 9 | case ".yaml": 10 | return yaml.parse(contents); 11 | default: 12 | throw new Error(`Unsupported filetype ${format}`); 13 | } 14 | }; 15 | 16 | export const parseFileConfig = ( 17 | text: string, 18 | path: string, 19 | supportedTypes?: string[] 20 | ) => { 21 | const suffix = extname(path); 22 | 23 | if ( 24 | ![".json", ".yaml"].includes(suffix) || 25 | (supportedTypes && !supportedTypes.includes(suffix)) 26 | ) { 27 | throw new Error(`Unsupported filetype ${suffix}`); 28 | } 29 | 30 | return loadFileContents(text, suffix); 31 | }; 32 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/util/set.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Source: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Set#implementing_basic_set_operations 3 | */ 4 | 5 | /** 6 | * returns intersection of two sets 7 | */ 8 | export function intersection(setA: Set, setB: Set) { 9 | const _intersection = new Set(); 10 | for (const elem of setB) { 11 | if (setA.has(elem)) { 12 | _intersection.add(elem); 13 | } 14 | } 15 | return _intersection; 16 | } 17 | 18 | /** 19 | * returns union of two sets 20 | */ 21 | export function union(setA: Set, setB: Set) { 22 | const _union = new Set(setA); 23 | for (const elem of setB) { 24 | _union.add(elem); 25 | } 26 | return _union; 27 | } 28 | 29 | /** 30 | * returns difference of two sets 31 | */ 32 | export function difference(setA: Set, setB: Set) { 33 | const _difference = new Set(setA); 34 | for (const elem of setB) { 35 | _difference.delete(elem); 36 | } 37 | return _difference; 38 | } 39 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/util/tests/set.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { difference, intersection, union } from "../set.js"; 3 | 4 | test("difference", () => { 5 | const set1 = new Set(["a", "b"]); 6 | const set2 = new Set(["b", "c"]); 7 | 8 | const resultSet = difference(set1, set2); 9 | expect(resultSet).toMatchInlineSnapshot(` 10 | Set { 11 | "a", 12 | } 13 | `); 14 | }); 15 | 16 | test("intersection", () => { 17 | const set1 = new Set(["a", "b", "c", "d"]); 18 | const set2 = new Set(["b", "c", "e"]); 19 | 20 | const resultSet = intersection(set1, set2); 21 | expect(resultSet).toMatchInlineSnapshot(` 22 | Set { 23 | "b", 24 | "c", 25 | } 26 | `); 27 | }); 28 | 29 | test("union", () => { 30 | const set1 = new Set(["a", "b"]); 31 | const set2 = new Set(["c", "d"]); 32 | 33 | const resultSet = union(set1, set2); 34 | expect(resultSet).toMatchInlineSnapshot(` 35 | Set { 36 | "a", 37 | "b", 38 | "c", 39 | "d", 40 | } 41 | `); 42 | }); 43 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/vectorstores/index.ts: -------------------------------------------------------------------------------- 1 | /* #__PURE__ */ console.error( 2 | "[WARN] Importing from 'langchain-gpt4all/vectorstores' is deprecated. Import from eg. 'langchain-gpt4all/vectorstores/pinecone' instead. See https://js.langchain-gpt4all.com/docs/getting-started/install#updating-from-0052 for upgrade instructions." 3 | ); 4 | 5 | export { HNSWLib } from "./hnswlib.js"; 6 | export { Chroma } from "./chroma.js"; 7 | export { PineconeStore } from "./pinecone.js"; 8 | export { VectorStore, SaveableVectorStore } from "./base.js"; 9 | export { SupabaseVectorStore } from "./supabase.js"; 10 | export { PrismaVectorStore } from "./prisma.js"; 11 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/vectorstores/tests/chroma.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | 3 | import { Chroma } from "../chroma.js"; 4 | 5 | // We'd want a much more thorough test here, 6 | // but sadly Chroma isn't very easy to test locally at the moment. 7 | test("Chroma imports correctly", async () => { 8 | const { ChromaClient } = await Chroma.imports(); 9 | 10 | expect(ChromaClient).toBeDefined(); 11 | }); 12 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/vectorstores/tests/memory.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | 3 | import { OpenAIEmbeddings } from "../../embeddings/openai.js"; 4 | import { Document } from "../../document.js"; 5 | import { MemoryVectorStore } from "../memory.js"; 6 | 7 | test("MemoryVectorStore with external ids", async () => { 8 | const embeddings = new OpenAIEmbeddings(); 9 | 10 | const store = new MemoryVectorStore(embeddings); 11 | 12 | expect(store).toBeDefined(); 13 | 14 | await store.addDocuments([ 15 | { pageContent: "hello", metadata: { a: 1 } }, 16 | { pageContent: "hi", metadata: { a: 1 } }, 17 | { pageContent: "bye", metadata: { a: 1 } }, 18 | { pageContent: "what's this", metadata: { a: 1 } }, 19 | ]); 20 | 21 | const results = await store.similaritySearch("hello", 1); 22 | 23 | expect(results).toHaveLength(1); 24 | 25 | expect(results).toEqual([ 26 | new Document({ metadata: { a: 1 }, pageContent: "hello" }), 27 | ]); 28 | }); 29 | -------------------------------------------------------------------------------- /langchain-gpt4all/src/vectorstores/tests/weaviate.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | 3 | import { flattenObjectForWeaviate } from "../weaviate.js"; 4 | 5 | test("flattenObjectForWeaviate", () => { 6 | expect( 7 | flattenObjectForWeaviate({ 8 | array2: [{}, "a"], 9 | deep: { 10 | string: "deep string", 11 | array: ["1", 2], 12 | array3: [1, 3], 13 | deepdeep: { 14 | string: "even a deeper string", 15 | }, 16 | }, 17 | }) 18 | ).toMatchInlineSnapshot(` 19 | { 20 | "deep_array3": [ 21 | 1, 22 | 3, 23 | ], 24 | "deep_deepdeep_string": "even a deeper string", 25 | "deep_string": "deep string", 26 | } 27 | `); 28 | }); 29 | -------------------------------------------------------------------------------- /langchain-gpt4all/tsconfig.cjs.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "./tsconfig.json", 3 | "compilerOptions": { 4 | "module": "commonjs", 5 | "declaration": false 6 | }, 7 | "exclude": [ 8 | "node_modules", 9 | "dist", 10 | "docs", 11 | "**/tests" 12 | ] 13 | } -------------------------------------------------------------------------------- /scripts/docker-ci-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euxo pipefail 4 | 5 | export CI=true 6 | 7 | cp -r ../package/* . 8 | 9 | # Replace the workspace dependency with the local copy, and install all others 10 | yarn add ../langchain-gpt4all 11 | 12 | # Check the build command completes successfully 13 | yarn build 14 | 15 | # Check the test command completes successfully 16 | yarn test 17 | -------------------------------------------------------------------------------- /scripts/release-branch.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ $(git branch --show-current) == "main" ]]; then 4 | git checkout -B release 5 | git push -u origin release 6 | fi 7 | -------------------------------------------------------------------------------- /test-exports-cf/.gitignore: -------------------------------------------------------------------------------- 1 | .dev.vars 2 | -------------------------------------------------------------------------------- /test-exports-cf/README.md: -------------------------------------------------------------------------------- 1 | # test-exports-cf 2 | 3 | This package was generated with `wrangler init` with the purpose of testing compatibility with Cloudlfare Workers. 4 | -------------------------------------------------------------------------------- /test-exports-cf/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "test-exports-cf", 3 | "version": "0.0.0", 4 | "devDependencies": { 5 | "@cloudflare/workers-types": "^4.20230321.0", 6 | "typescript": "^5.0.3", 7 | "vitest": "^0.29.8", 8 | "wrangler": "2.14.0" 9 | }, 10 | "dependencies": { 11 | "langchain-gpt4all": "workspace:*" 12 | }, 13 | "private": true, 14 | "scripts": { 15 | "start": "wrangler dev", 16 | "deploy": "wrangler publish", 17 | "build": "wrangler publish --dry-run --outdir=dist", 18 | "test": "vitest run **/*.unit.test.ts", 19 | "test:integration": "vitest run **/*.int.test.ts" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /test-exports-cf/src/index.int.test.ts: -------------------------------------------------------------------------------- 1 | import { unstable_dev } from "wrangler"; 2 | import type { UnstableDevWorker } from "wrangler"; 3 | import { describe, expect, it, beforeAll, afterAll } from "vitest"; 4 | 5 | describe("Worker", () => { 6 | let worker: UnstableDevWorker; 7 | 8 | beforeAll(async () => { 9 | worker = await unstable_dev("src/index.ts", { 10 | experimental: { disableExperimentalWarning: true }, 11 | }); 12 | }); 13 | 14 | afterAll(async () => { 15 | await worker.stop(); 16 | }); 17 | 18 | it("should return Hello World", async () => { 19 | const resp = await worker.fetch(); 20 | expect(resp.ok).toBe(true); 21 | if (resp) { 22 | const text = await resp.text(); 23 | expect(text.startsWith("Hello")).toBe(true); 24 | } 25 | }, 30000); 26 | }); 27 | -------------------------------------------------------------------------------- /test-exports-cf/src/index.unit.test.ts: -------------------------------------------------------------------------------- 1 | import { unstable_dev } from "wrangler"; 2 | import type { UnstableDevWorker } from "wrangler"; 3 | import { describe, expect, it, beforeAll, afterAll } from "vitest"; 4 | 5 | describe("Worker", () => { 6 | let worker: UnstableDevWorker; 7 | 8 | beforeAll(async () => { 9 | worker = await unstable_dev("src/index.ts", { 10 | experimental: { disableExperimentalWarning: true }, 11 | }); 12 | }, 30000); 13 | 14 | afterAll(async () => { 15 | await worker.stop(); 16 | }); 17 | 18 | it("should start", async () => { 19 | expect(true).toBe(true); 20 | }); 21 | }); 22 | -------------------------------------------------------------------------------- /test-exports-cf/wrangler.toml: -------------------------------------------------------------------------------- 1 | name = "test-exports-cf" 2 | main = "src/index.ts" 3 | compatibility_date = "2023-04-05" 4 | -------------------------------------------------------------------------------- /test-exports-cjs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "test-exports-cjs", 3 | "version": "0.0.0", 4 | "private": true, 5 | "description": "CJS Tests for the things exported by the langchain-gpt4all package", 6 | "main": "./index.mjs", 7 | "scripts": { 8 | "build": "tsc", 9 | "test": "npm run test:esm && npm run test:cjs && npm run test:cjs:import && npm run test:entrypoints && npm run test:ts", 10 | "test:esm": "node src/index.mjs", 11 | "test:cjs": "node src/require.js", 12 | "test:cjs:import": "node src/import.js", 13 | "test:entrypoints": "node src/entrypoints.js", 14 | "test:ts": "node dist/index.js", 15 | "format": "prettier --write src", 16 | "format:check": "prettier --check src" 17 | }, 18 | "author": "LangChain", 19 | "license": "MIT", 20 | "dependencies": { 21 | "d3-dsv": "2", 22 | "hnswlib-node": "^1.4.2", 23 | "langchain-gpt4all": "workspace:*" 24 | }, 25 | "devDependencies": { 26 | "@tsconfig/recommended": "^1.0.2", 27 | "@types/node": "^18.15.11", 28 | "prettier": "^2.8.3", 29 | "typescript": "^5.0.0" 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /test-exports-cjs/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "@tsconfig/recommended", 3 | "compilerOptions": { 4 | "outDir": "./dist", 5 | "rootDir": "./src", 6 | "lib": [ 7 | "ES2021", 8 | "ES2022.Object", 9 | "DOM" 10 | ], 11 | "target": "ES2021", 12 | }, 13 | "include": [ 14 | "src/**/*" 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /test-exports-cra/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # production 12 | /build 13 | 14 | # misc 15 | .DS_Store 16 | .env.local 17 | .env.development.local 18 | .env.test.local 19 | .env.production.local 20 | 21 | npm-debug.log* 22 | yarn-debug.log* 23 | yarn-error.log* 24 | -------------------------------------------------------------------------------- /test-exports-cra/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/test-exports-cra/public/favicon.ico -------------------------------------------------------------------------------- /test-exports-cra/public/logo192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/test-exports-cra/public/logo192.png -------------------------------------------------------------------------------- /test-exports-cra/public/logo512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/test-exports-cra/public/logo512.png -------------------------------------------------------------------------------- /test-exports-cra/public/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "short_name": "React App", 3 | "name": "Create React App Sample", 4 | "icons": [ 5 | { 6 | "src": "favicon.ico", 7 | "sizes": "64x64 32x32 24x24 16x16", 8 | "type": "image/x-icon" 9 | }, 10 | { 11 | "src": "logo192.png", 12 | "type": "image/png", 13 | "sizes": "192x192" 14 | }, 15 | { 16 | "src": "logo512.png", 17 | "type": "image/png", 18 | "sizes": "512x512" 19 | } 20 | ], 21 | "start_url": ".", 22 | "display": "standalone", 23 | "theme_color": "#000000", 24 | "background_color": "#ffffff" 25 | } 26 | -------------------------------------------------------------------------------- /test-exports-cra/public/robots.txt: -------------------------------------------------------------------------------- 1 | # https://www.robotstxt.org/robotstxt.html 2 | User-agent: * 3 | Disallow: 4 | -------------------------------------------------------------------------------- /test-exports-cra/src/App.test.js: -------------------------------------------------------------------------------- 1 | import { render, screen } from "@testing-library/react"; 2 | import App from "./App"; 3 | 4 | test("renders learn react link", () => { 5 | render(); 6 | const linkElement = screen.getByText(/learn react/i); 7 | expect(linkElement).toBeInTheDocument(); 8 | }); 9 | -------------------------------------------------------------------------------- /test-exports-cra/src/index.css: -------------------------------------------------------------------------------- 1 | body { 2 | margin: 0; 3 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 4 | 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', 5 | sans-serif; 6 | -webkit-font-smoothing: antialiased; 7 | -moz-osx-font-smoothing: grayscale; 8 | } 9 | 10 | code { 11 | font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', 12 | monospace; 13 | } 14 | -------------------------------------------------------------------------------- /test-exports-cra/src/index.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactDOM from 'react-dom/client'; 3 | import './index.css'; 4 | import App from './App'; 5 | import reportWebVitals from './reportWebVitals'; 6 | 7 | const root = ReactDOM.createRoot(document.getElementById('root')); 8 | root.render( 9 | 10 | 11 | 12 | ); 13 | 14 | // If you want to start measuring performance in your app, pass a function 15 | // to log results (for example: reportWebVitals(console.log)) 16 | // or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals 17 | reportWebVitals(); 18 | -------------------------------------------------------------------------------- /test-exports-cra/src/reportWebVitals.js: -------------------------------------------------------------------------------- 1 | const reportWebVitals = onPerfEntry => { 2 | if (onPerfEntry && onPerfEntry instanceof Function) { 3 | import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => { 4 | getCLS(onPerfEntry); 5 | getFID(onPerfEntry); 6 | getFCP(onPerfEntry); 7 | getLCP(onPerfEntry); 8 | getTTFB(onPerfEntry); 9 | }); 10 | } 11 | }; 12 | 13 | export default reportWebVitals; 14 | -------------------------------------------------------------------------------- /test-exports-cra/src/setupTests.js: -------------------------------------------------------------------------------- 1 | // jest-dom adds custom jest matchers for asserting on DOM nodes. 2 | // allows you to do things like: 3 | // expect(element).toHaveTextContent(/react/i) 4 | // learn more: https://github.com/testing-library/jest-dom 5 | import '@testing-library/jest-dom'; 6 | -------------------------------------------------------------------------------- /test-exports-esbuild/.gitignore: -------------------------------------------------------------------------------- 1 | dist-esm 2 | dist-cjs 3 | -------------------------------------------------------------------------------- /test-exports-esbuild/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euxo pipefail 4 | 5 | DIR=$1 6 | 7 | for file in $DIR/*; do 8 | if [[ $file == 'dist-esm/require.js' ]]; then 9 | continue; 10 | fi 11 | node $file; 12 | done 13 | -------------------------------------------------------------------------------- /test-exports-esbuild/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "@tsconfig/recommended", 3 | "compilerOptions": { 4 | "outDir": "./dist", 5 | "rootDir": "./src", 6 | "lib": [ 7 | "ES2021", 8 | "ES2022.Object", 9 | "DOM" 10 | ], 11 | "target": "ES2021", 12 | "module": "nodenext", 13 | }, 14 | "include": [ 15 | "src/**/*" 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /test-exports-esm/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "@tsconfig/recommended", 3 | "compilerOptions": { 4 | "outDir": "./dist", 5 | "rootDir": "./src", 6 | "lib": [ 7 | "ES2021", 8 | "ES2022.Object", 9 | "DOM" 10 | ], 11 | "target": "ES2021", 12 | "module": "nodenext", 13 | }, 14 | "include": [ 15 | "src/**/*" 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /test-exports-vercel/.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "next/core-web-vitals" 3 | } 4 | -------------------------------------------------------------------------------- /test-exports-vercel/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # next.js 12 | /.next/ 13 | /out/ 14 | 15 | # production 16 | /build 17 | 18 | # misc 19 | .DS_Store 20 | *.pem 21 | 22 | # debug 23 | npm-debug.log* 24 | yarn-debug.log* 25 | yarn-error.log* 26 | .pnpm-debug.log* 27 | 28 | # local env files 29 | .env*.local 30 | 31 | # vercel 32 | .vercel 33 | 34 | # typescript 35 | *.tsbuildinfo 36 | next-env.d.ts 37 | -------------------------------------------------------------------------------- /test-exports-vercel/next.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('next').NextConfig} */ 2 | const nextConfig = { 3 | reactStrictMode: true, 4 | webpack(config) { 5 | config.experiments = { 6 | asyncWebAssembly: true, 7 | layers: true, 8 | }; 9 | 10 | return config; 11 | }, 12 | }; 13 | 14 | module.exports = nextConfig; 15 | -------------------------------------------------------------------------------- /test-exports-vercel/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "test-exports-vercel", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "dev": "next dev", 7 | "build": "next build", 8 | "start": "next start", 9 | "test": "next lint" 10 | }, 11 | "dependencies": { 12 | "@types/node": "18.15.11", 13 | "@types/react": "18.0.33", 14 | "@types/react-dom": "18.0.11", 15 | "eslint": "8.37.0", 16 | "eslint-config-next": "13.3.0", 17 | "langchain-gpt4all": "workspace:*", 18 | "next": "13.3.0", 19 | "react": "18.2.0", 20 | "react-dom": "18.2.0", 21 | "typescript": "^5.0.0" 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /test-exports-vercel/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lujstn/langchainjs-gpt4all/8a90c9c1c6a2e2e8590523106db3c533724d6ceb/test-exports-vercel/public/favicon.ico -------------------------------------------------------------------------------- /test-exports-vercel/public/vercel.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test-exports-vercel/src/pages/_app.tsx: -------------------------------------------------------------------------------- 1 | import '@/styles/globals.css' 2 | import type { AppProps } from 'next/app' 3 | 4 | export default function App({ Component, pageProps }: AppProps) { 5 | return 6 | } 7 | -------------------------------------------------------------------------------- /test-exports-vercel/src/pages/_document.tsx: -------------------------------------------------------------------------------- 1 | import { Html, Head, Main, NextScript } from 'next/document' 2 | 3 | export default function Document() { 4 | return ( 5 | 6 | 7 | 8 |
9 | 10 | 11 | 12 | ) 13 | } 14 | -------------------------------------------------------------------------------- /test-exports-vercel/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es5", 4 | "lib": ["dom", "dom.iterable", "esnext"], 5 | "allowJs": true, 6 | "skipLibCheck": true, 7 | "strict": true, 8 | "forceConsistentCasingInFileNames": true, 9 | "noEmit": true, 10 | "esModuleInterop": true, 11 | "module": "esnext", 12 | "moduleResolution": "node", 13 | "resolveJsonModule": true, 14 | "isolatedModules": true, 15 | "jsx": "preserve", 16 | "incremental": true, 17 | "paths": { 18 | "@/*": ["./src/*"] 19 | } 20 | }, 21 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx"], 22 | "exclude": ["node_modules"] 23 | } 24 | -------------------------------------------------------------------------------- /test-exports-vite/.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | pnpm-debug.log* 8 | lerna-debug.log* 9 | 10 | node_modules 11 | dist 12 | dist-ssr 13 | *.local 14 | 15 | # Editor directories and files 16 | .vscode/* 17 | !.vscode/extensions.json 18 | .idea 19 | .DS_Store 20 | *.suo 21 | *.ntvs* 22 | *.njsproj 23 | *.sln 24 | *.sw? 25 | -------------------------------------------------------------------------------- /test-exports-vite/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Vite + TS 8 | 9 | 10 |
11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /test-exports-vite/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "test-exports-vite", 3 | "private": true, 4 | "version": "0.0.0", 5 | "type": "module", 6 | "scripts": { 7 | "dev": "vite", 8 | "build": "vite build", 9 | "preview": "vite preview", 10 | "test": "tsc" 11 | }, 12 | "dependencies": { 13 | "langchain-gpt4all": "workspace:*" 14 | }, 15 | "devDependencies": { 16 | "typescript": "^5.0.0", 17 | "vite": "^4.2.0", 18 | "vite-plugin-top-level-await": "^1.3.0", 19 | "vite-plugin-wasm": "^3.2.2" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /test-exports-vite/src/main.ts: -------------------------------------------------------------------------------- 1 | // import all entrypoints to test, do not do this in your own app 2 | import "./entrypoints.js"; 3 | 4 | import "./style.css"; 5 | import typescriptLogo from "./typescript.svg"; 6 | import viteLogo from "/vite.svg"; 7 | import { setupChain } from "./chain"; 8 | 9 | document.querySelector("#app")!.innerHTML = ` 10 |
11 | 12 | 13 | 14 | 15 | 16 | 17 |

Vite + TypeScript

18 |
19 | 20 |
21 |

22 | Click on the Vite and TypeScript logos to learn more 23 |

24 |
25 | `; 26 | 27 | setupChain(document.querySelector("#chain")!); 28 | -------------------------------------------------------------------------------- /test-exports-vite/src/vite-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | -------------------------------------------------------------------------------- /test-exports-vite/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ESNext", 4 | "useDefineForClassFields": true, 5 | "module": "ESNext", 6 | "lib": ["ESNext", "DOM"], 7 | "moduleResolution": "Node", 8 | "strict": true, 9 | "resolveJsonModule": true, 10 | "isolatedModules": true, 11 | "esModuleInterop": true, 12 | "noEmit": true, 13 | "noUnusedLocals": true, 14 | "noUnusedParameters": true, 15 | "noImplicitReturns": true, 16 | "skipLibCheck": true 17 | }, 18 | "include": ["src"] 19 | } 20 | -------------------------------------------------------------------------------- /test-exports-vite/vite.config.js: -------------------------------------------------------------------------------- 1 | import wasm from "vite-plugin-wasm"; 2 | import topLevelAwait from "vite-plugin-top-level-await"; 3 | import { defineConfig } from "vite"; 4 | 5 | export default defineConfig({ 6 | plugins: [wasm(), topLevelAwait()], 7 | }); 8 | -------------------------------------------------------------------------------- /turbo.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://turbo.build/schema.json", 3 | "globalDependencies": [ 4 | "**/.env" 5 | ], 6 | "pipeline": { 7 | "build": { 8 | "dependsOn": [ 9 | "^build" 10 | ], 11 | "outputs": [ 12 | "dist/**", 13 | "dist-cjs/**", 14 | "*.js", 15 | "*.cjs", 16 | "*.d.ts" 17 | ], 18 | "inputs": [ 19 | "src/**", 20 | "scripts/**", 21 | "package.json", 22 | "tsconfig.json" 23 | ] 24 | }, 25 | "lint": { 26 | "outputs": [] 27 | }, 28 | "format": { 29 | "outputs": [] 30 | }, 31 | "format:check": { 32 | "outputs": [] 33 | }, 34 | "test": { 35 | "outputs": [], 36 | "dependsOn": [ 37 | "^build" 38 | ] 39 | }, 40 | "test:integration": { 41 | "outputs": [], 42 | "dependsOn": [ 43 | "^build" 44 | ] 45 | }, 46 | "precommit": {}, 47 | "start": { 48 | "cache": false 49 | } 50 | } 51 | } 52 | --------------------------------------------------------------------------------