├── static
├── .nojekyll
└── img
│ ├── favicon.ico
│ ├── docusaurus.png
│ ├── HeliconeKeys.png
│ ├── favicon-16x16.png
│ ├── favicon-32x32.png
│ ├── parrot-icon.png
│ ├── DataberryDashboard.png
│ ├── HeliconeDashboard.png
│ ├── apple-touch-icon.png
│ ├── android-chrome-192x192.png
│ ├── android-chrome-512x512.png
│ ├── parrot-chainlink-icon.png
│ └── hmbd.js
├── examples
├── src
│ ├── document_loaders
│ │ ├── example_data
│ │ │ ├── example.txt
│ │ │ └── bitcoin.pdf
│ │ ├── hn.ts
│ │ ├── pdf.ts
│ │ ├── text.ts
│ │ ├── imsdb.ts
│ │ ├── cheerio_web.ts
│ │ ├── unstructured.ts
│ │ ├── srt.ts
│ │ ├── unstructured_directory.ts
│ │ ├── figma.ts
│ │ ├── notion_markdown.ts
│ │ ├── github.ts
│ │ ├── notiondb.ts
│ │ ├── college_confidential.ts
│ │ ├── github_ignore_paths.ts
│ │ ├── s3.ts
│ │ ├── gitbook.ts
│ │ ├── confluence.ts
│ │ ├── pdf_directory.ts
│ │ └── puppeteer_web.ts
│ ├── indexes
│ │ ├── vector_stores
│ │ │ ├── prisma_vectorstore
│ │ │ │ ├── .gitignore
│ │ │ │ ├── .env.example
│ │ │ │ ├── prisma
│ │ │ │ │ ├── migrations
│ │ │ │ │ │ ├── migration_lock.toml
│ │ │ │ │ │ └── 00_init
│ │ │ │ │ │ │ └── migration.sql
│ │ │ │ │ └── schema.prisma
│ │ │ │ └── docker-compose.example.yml
│ │ │ ├── redis
│ │ │ │ ├── docker-compose.yml
│ │ │ │ └── redis.ts
│ │ │ ├── typeorm_vectorstore
│ │ │ │ ├── docker-compose.example.yml
│ │ │ │ └── typeorm.ts
│ │ │ ├── hnswlib.ts
│ │ │ ├── milvus.ts
│ │ │ ├── faiss.ts
│ │ │ ├── memory.ts
│ │ │ ├── hnswlib_filter.ts
│ │ │ ├── faiss_loadfrompython.ts
│ │ │ ├── chroma
│ │ │ │ ├── search.ts
│ │ │ │ └── fromDocs.ts
│ │ │ ├── memory_custom_similarity.ts
│ │ │ ├── hnswlib_fromdocs.ts
│ │ │ ├── faiss_fromdocs.ts
│ │ │ ├── qdrant
│ │ │ │ ├── fromExisting.ts
│ │ │ │ └── fromDocs.ts
│ │ │ ├── memory_fromdocs.ts
│ │ │ ├── opensearch
│ │ │ │ └── opensearch.ts
│ │ │ ├── mongo_search.ts
│ │ │ ├── mongo_fromTexts.ts
│ │ │ ├── hnswlib_saveload.ts
│ │ │ ├── faiss_saveload.ts
│ │ │ ├── myscale_search.ts
│ │ │ ├── myscale_fromTexts.ts
│ │ │ ├── singlestore.ts
│ │ │ ├── weaviate_fromTexts.ts
│ │ │ ├── tigris
│ │ │ │ ├── search.ts
│ │ │ │ └── fromDocs.ts
│ │ │ ├── supabase.ts
│ │ │ └── supabase_with_metadata_filter.ts
│ │ ├── recursive_text_splitter.ts
│ │ ├── text_splitter.ts
│ │ ├── token_text_splitter.ts
│ │ └── python_text_splitter.ts
│ ├── models
│ │ ├── chat
│ │ │ ├── integration_googlevertexai.ts
│ │ │ ├── integration_openai.ts
│ │ │ ├── integration_anthropic.ts
│ │ │ ├── chat_timeout.ts
│ │ │ ├── chat_quick_start.ts
│ │ │ ├── chat_cancellation.ts
│ │ │ ├── integration_azure_openai.ts
│ │ │ ├── integration_googlevertexai-examples.ts
│ │ │ ├── chat_streaming_stdout.ts
│ │ │ └── chat_streaming.ts
│ │ ├── llm
│ │ │ ├── openai_basePath.ts
│ │ │ ├── llm_promptlayer.ts
│ │ │ ├── llm_timeout.ts
│ │ │ ├── ai21.ts
│ │ │ ├── openai_userid.ts
│ │ │ ├── llm_quick_start.ts
│ │ │ ├── llm_cancellation.ts
│ │ │ ├── llm_with_tracing.ts
│ │ │ ├── llm_streaming.ts
│ │ │ └── llm_streaming_stdout.ts
│ │ └── embeddings
│ │ │ ├── googlevertexai.ts
│ │ │ ├── cohere.ts
│ │ │ ├── openai.ts
│ │ │ ├── openai_timeout.ts
│ │ │ └── tensorflow.ts
│ ├── chains
│ │ ├── load_from_hub.ts
│ │ ├── conversation_chain.ts
│ │ ├── summarization.ts
│ │ ├── question_answering_map_reduce.ts
│ │ ├── llm_chain_stream.ts
│ │ ├── llm_chain.ts
│ │ ├── sql_db.ts
│ │ ├── advanced_subclass.ts
│ │ ├── llm_chain_chat.ts
│ │ ├── sql_db_sql_output.ts
│ │ ├── llm_chain_cancellation.ts
│ │ ├── question_answering.ts
│ │ └── retrieval_qa_with_remote.ts
│ ├── prompts
│ │ ├── load_from_hub.ts
│ │ └── regex_parser.ts
│ ├── callbacks
│ │ ├── docs_constructor_callbacks.ts
│ │ ├── docs_request_callbacks.ts
│ │ ├── docs_verbose.ts
│ │ ├── custom_handler.ts
│ │ └── console_handler.ts
│ ├── embeddings
│ │ ├── cohere.ts
│ │ ├── openai.ts
│ │ └── max_concurrency.ts
│ ├── llms
│ │ ├── cohere.ts
│ │ ├── hf.ts
│ │ ├── openai.ts
│ │ ├── replicate.ts
│ │ ├── openai-chat.ts
│ │ └── googlevertexai.ts
│ ├── retrievers
│ │ ├── chatgpt-plugin.ts
│ │ ├── zep.ts
│ │ ├── databerry.ts
│ │ ├── metal.ts
│ │ ├── vespa.ts
│ │ ├── supabase_hybrid.ts
│ │ └── hyde.ts
│ ├── cache
│ │ └── momento.ts
│ ├── agents
│ │ ├── plan_and_execute.ts
│ │ ├── mrkl.ts
│ │ ├── aiplugin-tool.ts
│ │ ├── zapier_mrkl.ts
│ │ ├── mrkl_with_tracing.ts
│ │ ├── load_from_hub.ts
│ │ ├── chat_mrkl.ts
│ │ ├── json.ts
│ │ ├── agent_timeout.ts
│ │ └── custom_tool.ts
│ ├── README.md
│ ├── chat
│ │ ├── llm_chain.ts
│ │ └── memory.ts
│ └── memory
│ │ ├── buffer.ts
│ │ ├── zep.ts
│ │ ├── buffer_window.ts
│ │ ├── entity.ts
│ │ ├── redis-advanced.ts
│ │ ├── redis.ts
│ │ └── dynamodb-store.ts
├── .yarn
│ └── install-state.gz
├── tsconfig.json
└── .eslintrc.cjs
├── babel.config.js
├── src
├── pages
│ ├── markdown-page.md
│ ├── index.js
│ └── index.module.css
├── components
│ └── HomepageFeatures
│ │ └── styles.module.css
└── theme
│ └── SearchBar.js
├── blog
├── 2021-08-26-welcome
│ ├── docusaurus-plushie-banner.jpeg
│ └── index.md
├── 2019-05-28-first-blog-post.md
├── authors.yml
└── 2021-08-01-mdx-blog-post.mdx
├── docs
├── modules
│ ├── memory
│ │ └── examples
│ │ │ ├── index.mdx
│ │ │ ├── buffer_memory_chat.mdx
│ │ │ ├── vector_store_memory.mdx
│ │ │ ├── entity_memory.mdx
│ │ │ ├── dynamodb.mdx
│ │ │ ├── zep_memory.mdx
│ │ │ ├── conversation_summary.mdx
│ │ │ ├── momento.mdx
│ │ │ ├── buffer_window_memory.md
│ │ │ └── redis.mdx
│ ├── indexes
│ │ ├── text_splitters
│ │ │ └── examples
│ │ │ │ ├── index.mdx
│ │ │ │ ├── character.mdx
│ │ │ │ ├── token.mdx
│ │ │ │ └── code.mdx
│ │ ├── vector_stores
│ │ │ └── integrations
│ │ │ │ ├── index.mdx
│ │ │ │ ├── weaviate.mdx
│ │ │ │ ├── chroma.mdx
│ │ │ │ ├── myscale.mdx
│ │ │ │ ├── memory.mdx
│ │ │ │ ├── tigris.mdx
│ │ │ │ ├── singlestore.mdx
│ │ │ │ ├── redis.mdx
│ │ │ │ └── typeorm.mdx
│ │ ├── document_loaders
│ │ │ └── examples
│ │ │ │ ├── index.mdx
│ │ │ │ ├── web_loaders
│ │ │ │ ├── index.mdx
│ │ │ │ ├── notiondb.mdx
│ │ │ │ ├── hn.md
│ │ │ │ ├── imsdb.md
│ │ │ │ ├── figma.mdx
│ │ │ │ ├── college_confidential.md
│ │ │ │ ├── confluence.mdx
│ │ │ │ ├── s3.mdx
│ │ │ │ ├── github.mdx
│ │ │ │ └── gitbook.md
│ │ │ │ └── file_loaders
│ │ │ │ ├── index.mdx
│ │ │ │ ├── text.md
│ │ │ │ ├── docx.md
│ │ │ │ ├── subtitles.md
│ │ │ │ ├── notion_markdown.mdx
│ │ │ │ ├── epub.md
│ │ │ │ ├── unstructured.mdx
│ │ │ │ └── jsonlines.md
│ │ ├── retrievers
│ │ │ ├── vectorstore.md
│ │ │ ├── databerry-retriever.mdx
│ │ │ ├── remote-retriever.mdx
│ │ │ ├── index.mdx
│ │ │ ├── metal-retriever.mdx
│ │ │ ├── chatgpt-retriever-plugin.mdx
│ │ │ ├── chroma-self-query.mdx
│ │ │ ├── pinecone-self-query.mdx
│ │ │ ├── contextual-compression-retriever.mdx
│ │ │ ├── zep-retriever.mdx
│ │ │ ├── hyde.mdx
│ │ │ ├── time-weighted-retriever.mdx
│ │ │ └── vespa-retriever.mdx
│ │ └── index.mdx
│ ├── schema
│ │ ├── index.mdx
│ │ ├── example.md
│ │ ├── document.md
│ │ └── chat-messages.md
│ ├── agents
│ │ ├── agents
│ │ │ ├── action
│ │ │ │ ├── index.mdx
│ │ │ │ ├── chat_mrkl.mdx
│ │ │ │ ├── llm_mrkl.mdx
│ │ │ │ └── structured_chat.mdx
│ │ │ ├── custom
│ │ │ │ ├── index.mdx
│ │ │ │ ├── custom_agent_chat.mdx
│ │ │ │ ├── custom_agent_llm.mdx
│ │ │ │ └── custom_prompt_chat.mdx
│ │ │ ├── plan_execute
│ │ │ │ └── index.mdx
│ │ │ └── index.mdx
│ │ ├── toolkits
│ │ │ ├── sql.mdx
│ │ │ ├── vectorstore.mdx
│ │ │ └── index.mdx
│ │ ├── executor
│ │ │ ├── index.mdx
│ │ │ └── getting-started.mdx
│ │ └── tools
│ │ │ ├── zapier_agent.mdx
│ │ │ ├── webbrowser.mdx
│ │ │ └── index.mdx
│ ├── chains
│ │ ├── other_chains
│ │ │ ├── index.mdx
│ │ │ ├── api_chain.mdx
│ │ │ ├── multi_prompt_chain.mdx
│ │ │ ├── analyze_document.mdx
│ │ │ ├── multi_retrieval_qa_chain.mdx
│ │ │ ├── constitutional_chain.mdx
│ │ │ ├── moderation_chain.mdx
│ │ │ ├── summarization.mdx
│ │ │ └── sql.mdx
│ │ ├── index_related_chains
│ │ │ ├── index.mdx
│ │ │ ├── retrieval_qa.mdx
│ │ │ └── document_qa.mdx
│ │ ├── sequential_chain.mdx
│ │ ├── index.mdx
│ │ └── prompt_selectors
│ │ │ └── index.mdx
│ ├── prompts
│ │ ├── index.mdx
│ │ └── prompt_templates
│ │ │ ├── prompt_composition.mdx
│ │ │ └── index.mdx
│ └── models
│ │ ├── llms
│ │ └── index.mdx
│ │ ├── index.mdx
│ │ └── chat
│ │ └── index.mdx
├── use_cases
│ ├── autonomous_agents
│ │ ├── index.mdx
│ │ ├── auto_gpt.mdx
│ │ └── baby_agi.mdx
│ ├── agent_simulations
│ │ ├── index.mdx
│ │ └── generative_agents.mdx
│ ├── summarization.mdx
│ ├── api.mdx
│ ├── personal_assistants.mdx
│ ├── tabular.mdx
│ └── question_answering.mdx
├── production
│ ├── callbacks
│ │ ├── creating-subclasses.mdx
│ │ └── create-handlers.mdx
│ └── deployment.md
└── ecosystem
│ └── databerry.md
├── .gitignore
└── README.md
/static/.nojekyll:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/example_data/example.txt:
--------------------------------------------------------------------------------
1 | Foo
2 | Bar
3 | Baz
4 |
5 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/prisma_vectorstore/.gitignore:
--------------------------------------------------------------------------------
1 | data
2 | docker-compose.yml
--------------------------------------------------------------------------------
/static/img/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liteli1987gmail/js-langchain-CN/HEAD/static/img/favicon.ico
--------------------------------------------------------------------------------
/babel.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | presets: [require.resolve('@docusaurus/core/lib/babel/preset')],
3 | };
4 |
--------------------------------------------------------------------------------
/static/img/docusaurus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liteli1987gmail/js-langchain-CN/HEAD/static/img/docusaurus.png
--------------------------------------------------------------------------------
/static/img/HeliconeKeys.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liteli1987gmail/js-langchain-CN/HEAD/static/img/HeliconeKeys.png
--------------------------------------------------------------------------------
/static/img/favicon-16x16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liteli1987gmail/js-langchain-CN/HEAD/static/img/favicon-16x16.png
--------------------------------------------------------------------------------
/static/img/favicon-32x32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liteli1987gmail/js-langchain-CN/HEAD/static/img/favicon-32x32.png
--------------------------------------------------------------------------------
/static/img/parrot-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liteli1987gmail/js-langchain-CN/HEAD/static/img/parrot-icon.png
--------------------------------------------------------------------------------
/examples/.yarn/install-state.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liteli1987gmail/js-langchain-CN/HEAD/examples/.yarn/install-state.gz
--------------------------------------------------------------------------------
/static/img/DataberryDashboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liteli1987gmail/js-langchain-CN/HEAD/static/img/DataberryDashboard.png
--------------------------------------------------------------------------------
/static/img/HeliconeDashboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liteli1987gmail/js-langchain-CN/HEAD/static/img/HeliconeDashboard.png
--------------------------------------------------------------------------------
/static/img/apple-touch-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liteli1987gmail/js-langchain-CN/HEAD/static/img/apple-touch-icon.png
--------------------------------------------------------------------------------
/static/img/android-chrome-192x192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liteli1987gmail/js-langchain-CN/HEAD/static/img/android-chrome-192x192.png
--------------------------------------------------------------------------------
/static/img/android-chrome-512x512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liteli1987gmail/js-langchain-CN/HEAD/static/img/android-chrome-512x512.png
--------------------------------------------------------------------------------
/static/img/parrot-chainlink-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liteli1987gmail/js-langchain-CN/HEAD/static/img/parrot-chainlink-icon.png
--------------------------------------------------------------------------------
/src/pages/markdown-page.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Markdown page example
3 | ---
4 |
5 | # Markdown page example
6 |
7 | You don't need React to write simple standalone pages.
8 |
--------------------------------------------------------------------------------
/blog/2021-08-26-welcome/docusaurus-plushie-banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liteli1987gmail/js-langchain-CN/HEAD/blog/2021-08-26-welcome/docusaurus-plushie-banner.jpeg
--------------------------------------------------------------------------------
/examples/src/document_loaders/example_data/bitcoin.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liteli1987gmail/js-langchain-CN/HEAD/examples/src/document_loaders/example_data/bitcoin.pdf
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/prisma_vectorstore/.env.example:
--------------------------------------------------------------------------------
1 | # Add DATABASE_URL to .env file in this directory
2 | DATABASE_URL=postgresql://[USERNAME]:[PASSWORD]@[ADDR]/[DBNAME]
--------------------------------------------------------------------------------
/docs/modules/memory/examples/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_label: 示例
3 | ---
4 |
5 | import DocCardList from "@theme/DocCardList";
6 |
7 |
8 | # 示例: 内存
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/redis/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | redis:
4 | container_name: redis-stack
5 | image: redis/redis-stack:latest
6 | ports:
7 | - 6379:6379
--------------------------------------------------------------------------------
/examples/src/models/chat/integration_googlevertexai.ts:
--------------------------------------------------------------------------------
1 | import { ChatGoogleVertexAI } from "langchain/chat_models/googlevertexai";
2 |
3 | const model = new ChatGoogleVertexAI({
4 | temperature: 0.7,
5 | });
6 |
--------------------------------------------------------------------------------
/docs/modules/indexes/text_splitters/examples/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_label: 示例
3 | ---
4 |
5 | import DocCardList from "@theme/DocCardList";
6 |
7 |
8 | # 文本分割器: 示例
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/docs/modules/indexes/vector_stores/integrations/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_label: 集成
3 | ---
4 |
5 | import DocCardList from "@theme/DocCardList";
6 |
7 |
8 | # 矢量存储: 集成
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/docs/modules/schema/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_position: 1
3 | ---
4 |
5 | import DocCardList from "@theme/DocCardList";
6 |
7 |
8 | # 数据结构
9 |
10 | 此部分介绍了在整个库中使用的接口。
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/prisma_vectorstore/prisma/migrations/migration_lock.toml:
--------------------------------------------------------------------------------
1 | # Please do not edit this file manually
2 | # It should be added in your version-control system (i.e. Git)
3 | provider = "postgresql"
--------------------------------------------------------------------------------
/src/components/HomepageFeatures/styles.module.css:
--------------------------------------------------------------------------------
1 | .features {
2 | display: flex;
3 | align-items: center;
4 | padding: 2rem 0;
5 | width: 100%;
6 | }
7 |
8 | .featureSvg {
9 | height: 200px;
10 | width: 200px;
11 | }
12 |
--------------------------------------------------------------------------------
/docs/modules/agents/agents/action/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_position: 1
4 | ---
5 |
6 | import DocCardList from "@theme/DocCardList";
7 |
8 |
9 | # 动作代理 Action Agents
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_label: 示例
3 | hide_table_of_contents: true
4 | ---
5 |
6 | import DocCardList from "@theme/DocCardList";
7 |
8 |
9 | # 示例: 文档加载器
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/docs/modules/chains/other_chains/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_label: 其他链
4 | ---
5 |
6 | import DocCardList from "@theme/DocCardList";
7 |
8 |
9 | # 其他链
10 |
11 | 本节介绍其它存在的链的示例。
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/examples/src/models/chat/integration_openai.ts:
--------------------------------------------------------------------------------
1 | import { ChatOpenAI } from "langchain/chat_models/openai";
2 |
3 | const model = new ChatOpenAI({
4 | temperature: 0.9,
5 | openAIApiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.OPENAI_API_KEY
6 | });
7 |
--------------------------------------------------------------------------------
/examples/src/models/chat/integration_anthropic.ts:
--------------------------------------------------------------------------------
1 | import { ChatAnthropic } from "langchain/chat_models/anthropic";
2 |
3 | const model = new ChatAnthropic({
4 | temperature: 0.9,
5 | apiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.ANTHROPIC_API_KEY
6 | });
7 |
--------------------------------------------------------------------------------
/examples/src/chains/load_from_hub.ts:
--------------------------------------------------------------------------------
1 | import { loadChain } from "langchain/chains/load";
2 |
3 | export const run = async () => {
4 | const chain = await loadChain("lc://chains/hello-world/chain.json");
5 | const res = chain.call({ topic: "foo" });
6 | console.log(res);
7 | };
8 |
--------------------------------------------------------------------------------
/examples/src/prompts/load_from_hub.ts:
--------------------------------------------------------------------------------
1 | import { loadPrompt } from "langchain/prompts/load";
2 |
3 | export const run = async () => {
4 | const prompt = await loadPrompt("lc://prompts/hello-world/prompt.yaml");
5 | const res = await prompt.format({});
6 | console.log({ res });
7 | };
8 |
--------------------------------------------------------------------------------
/docs/modules/indexes/retrievers/vectorstore.md:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # 向量库
6 |
7 | 一旦您创建了一个[向量库](../vector_stores/), ,使用它作为检索器就非常简单:
8 |
9 | ```typescript
10 |
11 | vectorStore = ...
12 |
13 | retriever = vectorStore.asRetriever()
14 |
15 | ```
16 |
17 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/hn.ts:
--------------------------------------------------------------------------------
1 | import { HNLoader } from "langchain/document_loaders/web/hn";
2 |
3 | export const run = async () => {
4 | const loader = new HNLoader("https://news.ycombinator.com/item?id=34817881");
5 | const docs = await loader.load();
6 | console.log({ docs });
7 | };
8 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/web_loaders/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_label: Web 加载程序
3 | hide_table_of_contents: true
4 | ---
5 |
6 | # Web 加载程序
7 |
8 | 这些加载程序用于加载 web 资源。
9 |
10 | import DocCardList from "@theme/DocCardList";
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/docs/modules/agents/agents/custom/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | import DocCardList from "@theme/DocCardList";
6 |
7 |
8 | # 自定义代理
9 |
10 | 请查看下面的示例,了解如何创建自定义代理。
11 |
12 | 第一个示例仅使用自定义提示前缀和后缀,这更容易入手。另外两个使用完全自定义的提示和输出解析器。
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/pdf.ts:
--------------------------------------------------------------------------------
1 | import { PDFLoader } from "langchain/document_loaders/fs/pdf";
2 |
3 | export const run = async () => {
4 | const loader = new PDFLoader("src/document_loaders/example_data/bitcoin.pdf");
5 |
6 | const docs = await loader.load();
7 |
8 | console.log({ docs });
9 | };
10 |
--------------------------------------------------------------------------------
/static/img/hmbd.js:
--------------------------------------------------------------------------------
1 | (function() {
2 | var _hmt = _hmt || [];
3 | (function() {
4 | var hm = document.createElement("script");
5 | hm.src = "https://hm.baidu.com/hm.js?e60fb290e204e04c5cb6f79b0ac1e697";
6 | var s = document.getElementsByTagName("script")[0];
7 | s.parentNode.insertBefore(hm, s);
8 | })();
9 | })();
--------------------------------------------------------------------------------
/examples/src/document_loaders/text.ts:
--------------------------------------------------------------------------------
1 | import { TextLoader } from "langchain/document_loaders/fs/text";
2 |
3 | export const run = async () => {
4 | const loader = new TextLoader(
5 | "src/document_loaders/example_data/example.txt"
6 | );
7 | const docs = await loader.load();
8 | console.log({ docs });
9 | };
10 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/imsdb.ts:
--------------------------------------------------------------------------------
1 | import { IMSDBLoader } from "langchain/document_loaders/web/imsdb";
2 |
3 | export const run = async () => {
4 | const loader = new IMSDBLoader(
5 | "https://imsdb.com/scripts/BlacKkKlansman.html"
6 | );
7 | const docs = await loader.load();
8 | console.log({ docs });
9 | };
10 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/prisma_vectorstore/docker-compose.example.yml:
--------------------------------------------------------------------------------
1 | services:
2 | db:
3 | image: ankane/pgvector
4 | ports:
5 | - 5432:5432
6 | volumes:
7 | - ./data:/var/lib/postgresql/data
8 | environment:
9 | - POSTGRES_PASSWORD=
10 | - POSTGRES_USER=
11 | - POSTGRES_DB=
--------------------------------------------------------------------------------
/examples/src/callbacks/docs_constructor_callbacks.ts:
--------------------------------------------------------------------------------
1 | import { ConsoleCallbackHandler } from "langchain/callbacks";
2 | import { OpenAI } from "langchain/llms/openai";
3 |
4 | const llm = new OpenAI({
5 | temperature: 0,
6 | // This handler will be used for all calls made with this LLM.
7 | callbacks: [new ConsoleCallbackHandler()],
8 | });
9 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Dependencies
2 | /node_modules
3 |
4 | # Production
5 | /build
6 |
7 | # Generated files
8 | .docusaurus
9 | .cache-loader
10 |
11 | # Misc
12 | .DS_Store
13 | .env.local
14 | .env.development.local
15 | .env.test.local
16 | .env.production.local
17 |
18 | npm-debug.log*
19 | yarn-debug.log*
20 | yarn-error.log*
21 |
22 | .vercel
23 |
--------------------------------------------------------------------------------
/docs/modules/chains/other_chains/api_chain.mdx:
--------------------------------------------------------------------------------
1 | import CodeBlock from "@theme/CodeBlock";
2 |
3 | import APIExample from "!!raw-loader!@examples/chains/api_chain.ts";
4 |
5 |
6 | # `APIChain`
7 |
8 | APIChain 可用于使用 LLM 与 API 交互,从而检索相关信息。提供关于所提供的 API 文档相关的问题以构造链。
9 |
10 |
11 | {APIExample}
12 |
13 |
--------------------------------------------------------------------------------
/docs/use_cases/autonomous_agents/index.mdx:
--------------------------------------------------------------------------------
1 | import DocCardList from "@theme/DocCardList";
2 |
3 |
4 | # 自主代理
5 |
6 | 自主代理是设计成更长时间运行的代理。您可以给它们一个或多个长期目标,它们会独立执行这些目标。这些应用程序结合了工具使用和长期记忆。
7 |
8 | 目前自主代理还相当实验性,基于其他开源项目。通过在 LangChain 基元中实现这些开源项目,我们可以得到 LangChain 的好处:轻松切换和尝试多个 LLMs,使用不同的向量存储作为内存,使用 LangChain 的工具集。
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/typeorm_vectorstore/docker-compose.example.yml:
--------------------------------------------------------------------------------
1 | services:
2 | db:
3 | image: ankane/pgvector
4 | ports:
5 | - 5432:5432
6 | volumes:
7 | - ./data:/var/lib/postgresql/data
8 | environment:
9 | - POSTGRES_PASSWORD=ChangeMe
10 | - POSTGRES_USER=myuser
11 | - POSTGRES_DB=api
--------------------------------------------------------------------------------
/docs/modules/agents/toolkits/sql.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # SQL Agent Toolkit
6 |
7 | 这个示例展示了如何加载和使用SQL工具包中的代理。
8 |
9 | import CodeBlock from "@theme/CodeBlock";
10 |
11 | import Example from "!!raw-loader!@examples/agents/sql.ts";
12 |
13 |
14 |
15 | {Example}
16 |
17 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/cheerio_web.ts:
--------------------------------------------------------------------------------
1 | import { CheerioWebBaseLoader } from "langchain/document_loaders/web/cheerio";
2 |
3 | export const run = async () => {
4 | const loader = new CheerioWebBaseLoader(
5 | "https://news.ycombinator.com/item?id=34817881"
6 | );
7 | const docs = await loader.load();
8 | console.log({ docs });
9 | };
10 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/unstructured.ts:
--------------------------------------------------------------------------------
1 | import { UnstructuredLoader } from "langchain/document_loaders/fs/unstructured";
2 |
3 | const options = {
4 | apiKey: "MY_API_KEY",
5 | };
6 |
7 | const loader = new UnstructuredLoader(
8 | "src/document_loaders/example_data/notion.md",
9 | options
10 | );
11 | const docs = await loader.load();
12 |
--------------------------------------------------------------------------------
/examples/src/embeddings/cohere.ts:
--------------------------------------------------------------------------------
1 | import { CohereEmbeddings } from "langchain/embeddings/cohere";
2 |
3 | export const run = async () => {
4 | const model = new CohereEmbeddings();
5 | const res = await model.embedQuery(
6 | "What would be a good company name a company that makes colorful socks?"
7 | );
8 | console.log({ res });
9 | };
10 |
--------------------------------------------------------------------------------
/examples/src/embeddings/openai.ts:
--------------------------------------------------------------------------------
1 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
2 |
3 | export const run = async () => {
4 | const model = new OpenAIEmbeddings();
5 | const res = await model.embedQuery(
6 | "What would be a good company name a company that makes colorful socks?"
7 | );
8 | console.log({ res });
9 | };
10 |
--------------------------------------------------------------------------------
/docs/modules/schema/example.md:
--------------------------------------------------------------------------------
1 |
2 | # 示例
3 |
4 | 示例是输入/输出对,表示对函数的输入和预期输出。它们可用于模型的训练和评估。
5 |
6 | ```typescript
7 | type Example = Record;
8 |
9 | ```
10 |
11 |
12 | ## 创建示例
13 |
14 | 您可以这样创建示例:
15 |
16 | ```typescript
17 |
18 | const example = {
19 |
20 | input: "foo",
21 |
22 | output: "bar",
23 |
24 | };
25 |
26 | ```
27 |
28 |
--------------------------------------------------------------------------------
/examples/src/models/llm/openai_basePath.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 |
3 | const model = new OpenAI(
4 | { temperature: 0 },
5 | { basePath: "https://oai.hconeai.com/v1" }
6 | );
7 |
8 | const res = await model.call(
9 | "What would be a good company name a company that makes colorful socks?"
10 | );
11 | console.log(res);
12 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/srt.ts:
--------------------------------------------------------------------------------
1 | import { SRTLoader } from "langchain/document_loaders/fs/srt";
2 |
3 | export const run = async () => {
4 | const loader = new SRTLoader(
5 | "src/document_loaders/example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt"
6 | );
7 | const docs = await loader.load();
8 | console.log({ docs });
9 | };
10 |
--------------------------------------------------------------------------------
/docs/production/callbacks/creating-subclasses.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_label: 自定义Chains中的回调
3 | ---
4 |
5 | # 自定义Chains/Agents中的回调
6 |
7 | LangChain旨在可扩展。 您可以将自己的自定义Chains和Agents添加到库中。 本页将向您展示如何将回调添加到自定义的Chains和Agents中。
8 |
9 | ## 将回调添加到自定义的Chains中
10 |
11 | 当你创建一个自定义链时,你可以很容易地设置它使用与所有内置链相同的回调系统。请参阅此指南,了解有关如何[创建自定义链并在其中使用回调的更多信息](../../modules/chains#subclassing-basechain)。
--------------------------------------------------------------------------------
/examples/src/models/llm/llm_promptlayer.ts:
--------------------------------------------------------------------------------
1 | import { PromptLayerOpenAI } from "langchain/llms/openai";
2 |
3 | export const run = async () => {
4 | const model = new PromptLayerOpenAI({ temperature: 0.9 });
5 | const res = await model.call(
6 | "What would be a good company name a company that makes colorful socks?"
7 | );
8 | console.log({ res });
9 | };
10 |
--------------------------------------------------------------------------------
/docs/modules/agents/agents/custom/custom_agent_chat.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | import CodeBlock from "@theme/CodeBlock";
6 |
7 | import Example from "!!raw-loader!@examples/agents/custom_llm_agent_chat.ts";
8 |
9 |
10 | # 自定义聊天代理
11 |
12 | 本示例介绍如何使用聊天模型创建自定义代理。
13 |
14 |
15 | {Example}
16 |
17 |
--------------------------------------------------------------------------------
/docs/modules/agents/agents/custom/custom_agent_llm.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | import CodeBlock from "@theme/CodeBlock";
6 |
7 | import Example from "!!raw-loader!@examples/agents/custom_llm_agent.ts";
8 |
9 |
10 | # 自定义LLM代理
11 |
12 | 本示例介绍如何创建由LLM驱动的自定义代理。
13 |
14 |
15 | {Example}
16 |
17 |
--------------------------------------------------------------------------------
/docs/modules/indexes/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_position: 4
3 | hide_table_of_contents: true
4 | ---
5 |
6 | import DocCardList from "@theme/DocCardList";
7 |
8 |
9 | # 索引
10 |
11 | :::info
12 | [概念指南](https://docs.langchain.com/docs/components/indexing)
13 | :::
14 |
15 | 本节涉及与将自己的数据导入 LangChain、对其进行索引和使其可用于 LLMs / 聊天模型相关的一切。
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/examples/src/models/llm/llm_timeout.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 |
3 | const model = new OpenAI({ temperature: 1 });
4 |
5 | const resA = await model.call(
6 | "What would be a good company name a company that makes colorful socks?",
7 | { timeout: 1000 } // 1s timeout
8 | );
9 |
10 | console.log({ resA });
11 | // '\n\nSocktastic Colors' }
12 |
--------------------------------------------------------------------------------
/docs/modules/agents/toolkits/vectorstore.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | import CodeBlock from "@theme/CodeBlock";
6 |
7 | import Example from "!!raw-loader!@examples/agents/vectorstore.ts";
8 |
9 |
10 | # VectorStore 代理工具包
11 |
12 | 这个例子展示了如何使用 VectorStore 工具包加载和使用代理。
13 |
14 |
15 | {Example}
16 |
17 |
--------------------------------------------------------------------------------
/docs/modules/prompts/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_position: 3
3 | hide_table_of_contents: true
4 | sidebar_label: 提示
5 | ---
6 |
7 | import DocCardList from "@theme/DocCardList";
8 |
9 |
10 | #提示
11 |
12 | :::info
13 | [概念指南](https@#//docs.langchain.com/docs/components/prompts)
14 | :::
15 |
16 | LangChain提供了几种实用工具来帮助管理语言模型的提示,包括聊天模型。
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/docs/use_cases/agent_simulations/index.mdx:
--------------------------------------------------------------------------------
1 | import DocCardList from "@theme/DocCardList";
2 |
3 |
4 | # 代理人模拟
5 |
6 | 代理人模拟涉及到将多个代理人与彼此互动。
7 |
8 | 它们往往使用模拟环境,其中LLM是它们的“核心”,而辅助类则用于提示它们吸收某些输入,例如预先构建的“观察”,并对新的刺激作出反应。
9 |
10 | 它们还从长期记忆中受益,以便它们可以在互动之间保持状态。
11 |
12 | 与自主代理人一样,代理人模拟仍处于实验阶段,基于论文的基础,例如[这篇](https://arxiv.org/abs/2304.03442)。
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/prisma_vectorstore/prisma/migrations/00_init/migration.sql:
--------------------------------------------------------------------------------
1 | -- CreateTable
2 | CREATE EXTENSION IF NOT EXISTS vector;
3 | CREATE TABLE "Document" (
4 | "id" TEXT NOT NULL,
5 | "content" TEXT NOT NULL,
6 | "namespace" TEXT DEFAULT 'default',
7 | "vector" vector,
8 |
9 | CONSTRAINT "Document_pkey" PRIMARY KEY ("id")
10 | );
11 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/unstructured_directory.ts:
--------------------------------------------------------------------------------
1 | import { UnstructuredDirectoryLoader } from "langchain/document_loaders/fs/unstructured";
2 |
3 | const options = {
4 | apiKey: "MY_API_KEY",
5 | };
6 |
7 | const loader = new UnstructuredDirectoryLoader(
8 | "langchain/src/document_loaders/tests/example_data",
9 | options
10 | );
11 | const docs = await loader.load();
12 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/file_loaders/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_label: 文件加载程序
3 | hide_table_of_contents: true
4 | sidebar_class_name: 仅限Node.js类别
5 | ---
6 |
7 | # 文件加载程序
8 |
9 | :::tip 兼容性
10 | 仅适用于Node.js。
11 | :::
12 |
13 | 这些加载程序用于加载给定的文件系统路径或Blob对象的文件。
14 |
15 | import DocCardList from "@theme/DocCardList";
16 |
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/figma.ts:
--------------------------------------------------------------------------------
1 | import { FigmaFileLoader } from "langchain/document_loaders/web/figma";
2 |
3 | const loader = new FigmaFileLoader({
4 | accessToken: "FIGMA_ACCESS_TOKEN", // or load it from process.env.FIGMA_ACCESS_TOKEN
5 | nodeIds: ["id1", "id2", "id3"],
6 | fileKey: "key",
7 | });
8 | const docs = await loader.load();
9 |
10 | console.log({ docs });
11 |
--------------------------------------------------------------------------------
/docs/modules/indexes/retrievers/databerry-retriever.mdx:
--------------------------------------------------------------------------------
1 | # Databerry Retriever
2 |
3 | 本示例展示如何在`RetrievalQAChain`中使用Databerry Retriever从Databerry.ai数据存储库检索文档。
4 |
5 | ## Usage使用方法
6 |
7 | import CodeBlock from "@theme/CodeBlock";
8 |
9 | import Example from "!!raw-loader!@examples/retrievers/databerry.ts";
10 |
11 |
12 |
13 | {Example}
14 |
15 |
--------------------------------------------------------------------------------
/examples/src/callbacks/docs_request_callbacks.ts:
--------------------------------------------------------------------------------
1 | import { ConsoleCallbackHandler } from "langchain/callbacks";
2 | import { OpenAI } from "langchain/llms/openai";
3 |
4 | const llm = new OpenAI({
5 | temperature: 0,
6 | });
7 |
8 | // This handler will be used only for this call.
9 | const response = await llm.call("1 + 1 =", undefined, [
10 | new ConsoleCallbackHandler(),
11 | ]);
12 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/notion_markdown.ts:
--------------------------------------------------------------------------------
1 | import { NotionLoader } from "langchain/document_loaders/fs/notion";
2 |
3 | export const run = async () => {
4 | /** Provide the directory path of your notion folder */
5 | const directoryPath = "Notion_DB";
6 | const loader = new NotionLoader(directoryPath);
7 | const docs = await loader.load();
8 | console.log({ docs });
9 | };
10 |
--------------------------------------------------------------------------------
/examples/src/models/embeddings/googlevertexai.ts:
--------------------------------------------------------------------------------
1 | import { GoogleVertexAIEmbeddings } from "langchain/embeddings/googlevertexai";
2 |
3 | export const run = async () => {
4 | const model = new GoogleVertexAIEmbeddings();
5 | const res = await model.embedQuery(
6 | "What would be a good company name for a company that makes colorful socks?"
7 | );
8 | console.log({ res });
9 | };
10 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/github.ts:
--------------------------------------------------------------------------------
1 | import { GithubRepoLoader } from "langchain/document_loaders/web/github";
2 |
3 | export const run = async () => {
4 | const loader = new GithubRepoLoader(
5 | "https://github.com/hwchase17/langchainjs",
6 | { branch: "main", recursive: false, unknown: "warn" }
7 | );
8 | const docs = await loader.load();
9 | console.log({ docs });
10 | };
11 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/notiondb.ts:
--------------------------------------------------------------------------------
1 | import { NotionDBLoader } from "langchain/document_loaders/web/notiondb";
2 |
3 | const loader = new NotionDBLoader({
4 | pageSizeLimit: 10,
5 | databaseId: "databaseId",
6 | notionIntegrationToken: "", // Or set as process.env.NOTION_INTEGRATION_TOKEN
7 | });
8 | const docs = await loader.load();
9 |
10 | console.log({ docs });
11 |
--------------------------------------------------------------------------------
/examples/src/embeddings/max_concurrency.ts:
--------------------------------------------------------------------------------
1 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
2 |
3 | export const run = async () => {
4 | const model = new OpenAIEmbeddings({
5 | maxConcurrency: 1,
6 | });
7 | const res = await model.embedQuery(
8 | "What would be a good company name a company that makes colorful socks?"
9 | );
10 | console.log({ res });
11 | };
12 |
--------------------------------------------------------------------------------
/examples/src/models/llm/ai21.ts:
--------------------------------------------------------------------------------
1 | import { AI21 } from "langchain/llms/ai21";
2 |
3 | const model = new AI21({
4 | ai21ApiKey: "YOUR_AI21_API_KEY", // Or set as process.env.AI21_API_KEY
5 | });
6 |
7 | const res = await model.call(`Translate "I love programming" into German.`);
8 |
9 | console.log({ res });
10 |
11 | /*
12 | {
13 | res: "\nIch liebe das Programmieren."
14 | }
15 | */
16 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/college_confidential.ts:
--------------------------------------------------------------------------------
1 | import { CollegeConfidentialLoader } from "langchain/document_loaders/web/college_confidential";
2 |
3 | export const run = async () => {
4 | const loader = new CollegeConfidentialLoader(
5 | "https://www.collegeconfidential.com/colleges/brown-university/"
6 | );
7 | const docs = await loader.load();
8 | console.log({ docs });
9 | };
10 |
--------------------------------------------------------------------------------
/examples/src/models/llm/openai_userid.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 |
3 | const model = new OpenAI({ temperature: 0 });
4 |
5 | const res = await model.call(
6 | "What would be a good company name a company that makes colorful socks?",
7 | {
8 | options: {
9 | headers: {
10 | "User-Id": "123",
11 | },
12 | },
13 | }
14 | );
15 | console.log(res);
16 |
--------------------------------------------------------------------------------
/docs/modules/chains/index_related_chains/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_label: 与索引相关的链
4 | sidebar_position: 2
5 | ---
6 |
7 | import DocCardList from "@theme/DocCardList";
8 |
9 |
10 | # 与索引相关的链
11 |
12 | :::info
13 | [概念指南](https://docs.langchain.com/docs/components/chains/index_related_chains)
14 | :::
15 |
16 | 与存储在索引中的非结构化数据一起工作相关的链。
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/docs/modules/agents/agents/custom/custom_prompt_chat.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_label: 自定义聊天提示
4 | sidebar_position: 1
5 | ---
6 |
7 | import CodeBlock from "@theme/CodeBlock";
8 |
9 | import Example from "!!raw-loader!@examples/chat/agent.ts";
10 |
11 |
12 | # 自定义提示, 使用聊天模型
13 |
14 | 本示例介绍如何为聊天模型代理创建自定义提示。
15 |
16 |
17 | {Example}
18 |
19 |
--------------------------------------------------------------------------------
/docs/modules/indexes/retrievers/remote-retriever.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # 远程检索器
6 |
7 | 本示例展示如何在 `RetrievalQAChain` 中使用远程检索器从远程服务器检索文档。
8 |
9 | ## 使用
10 |
11 | import CodeBlock from "@theme/CodeBlock";
12 |
13 | import Example from "!!raw-loader!@examples/chains/retrieval_qa_with_remote.ts";
14 |
15 |
16 |
17 | {Example}
18 |
19 |
--------------------------------------------------------------------------------
/examples/src/llms/cohere.ts:
--------------------------------------------------------------------------------
1 | import { Cohere } from "langchain/llms/cohere";
2 |
3 | export const run = async () => {
4 | const model = new Cohere({
5 | temperature: 0.7,
6 | maxTokens: 20,
7 | maxRetries: 5,
8 | });
9 | const res = await model.call(
10 | "Question: What would be a good company name a company that makes colorful socks?\nAnswer:"
11 | );
12 | console.log({ res });
13 | };
14 |
--------------------------------------------------------------------------------
/docs/modules/chains/other_chains/multi_prompt_chain.mdx:
--------------------------------------------------------------------------------
1 | import CodeBlock from "@theme/CodeBlock";
2 |
3 | import MultiPromptExample from "!!raw-loader!@examples/chains/multi_prompt.ts";
4 |
5 |
6 |
7 | ## `MultiPromptChain`多次提示链
8 |
9 |
10 | MultiPromptChain 允许 LLM 从多个提示中进行选择。通过提供一组模板/提示以及它们对应的名称和描述来构建该链。该链接受一个字符串作为输入,选择一个合适的提示,然后将输入传递到所选的提示中。
11 |
12 | {MultiPromptExample}
13 |
14 |
--------------------------------------------------------------------------------
/src/pages/index.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) Meta Platforms, Inc. and affiliates.
3 | *
4 | * This source code is licensed under the MIT license found in the
5 | * LICENSE file in the root directory of this source tree.
6 | *
7 | * @format
8 | */
9 |
10 | import React from "react";
11 | import { Redirect } from "@docusaurus/router";
12 |
13 | export default function Home() {
14 | return ;
15 | }
16 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/file_loaders/text.md:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # 文本文件
6 |
7 | 本例将介绍如何从文本文件中加载数据。
8 |
9 | ```typescript
10 |
11 | import { TextLoader } from "langchain/document_loaders/fs/text";
12 |
13 |
14 |
15 | const loader = new TextLoader("src/document_loaders/example_data/example.txt");
16 |
17 |
18 |
19 | const docs = await loader.load();
20 |
21 | ```
22 |
23 |
--------------------------------------------------------------------------------
/docs/modules/indexes/retrievers/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_position: 4
4 | ---
5 |
6 | import DocCardList from "@theme/DocCardList";
7 |
8 |
9 | # 召回器(Retrievers)
10 |
11 | :::info
12 | [概念指南](https://docs.langchain.com/docs/components/indexing/retriever)
13 | :::
14 |
15 | 一种存储数据的方式,可以通过语言模型进行查询。这个对象必须公开的唯一接口是一个 `getRelevantDocuments` 方法,该方法接受一个字符串查询并返回一个文档列表。
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/examples/src/llms/hf.ts:
--------------------------------------------------------------------------------
1 | import { HuggingFaceInference } from "langchain/llms/hf";
2 |
3 | export const run = async () => {
4 | const model = new HuggingFaceInference({
5 | model: "gpt2",
6 | temperature: 0.7,
7 | maxTokens: 50,
8 | });
9 | const res = await model.call(
10 | "Question: What would be a good company name a company that makes colorful socks?\nAnswer:"
11 | );
12 | console.log({ res });
13 | };
14 |
--------------------------------------------------------------------------------
/docs/modules/memory/examples/buffer_memory_chat.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | import CodeBlock from "@theme/CodeBlock";
6 |
7 | import Example from "!!raw-loader!@examples/chat/memory.ts";
8 |
9 |
10 | # 使用缓冲内存与聊天模型翻译的中文结果
11 |
12 | 本示例介绍如何将聊天特定的内存类与聊天模型配合使用。翻译的中文结果
13 | 需要注意的关键点是,设置 `returnMessages: true` 会使内存返回聊天消息列表而不是字符串。翻译的中文结果
14 |
15 |
16 | {Example}
17 |
18 |
--------------------------------------------------------------------------------
/examples/src/callbacks/docs_verbose.ts:
--------------------------------------------------------------------------------
1 | import { PromptTemplate } from "langchain/prompts";
2 | import { LLMChain } from "langchain/chains";
3 | import { OpenAI } from "langchain/llms/openai";
4 |
5 | const chain = new LLMChain({
6 | llm: new OpenAI({ temperature: 0 }),
7 | prompt: PromptTemplate.fromTemplate("Hello, world!"),
8 | // This will enable logging of all Chain *and* LLM events to the console.
9 | verbose: true,
10 | });
11 |
--------------------------------------------------------------------------------
/examples/src/llms/openai.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 |
3 | export const run = async () => {
4 | const model = new OpenAI({
5 | modelName: "gpt-4",
6 | temperature: 0.7,
7 | maxTokens: 1000,
8 | maxRetries: 5,
9 | });
10 | const res = await model.call(
11 | "Question: What would be a good company name a company that makes colorful socks?\nAnswer:"
12 | );
13 | console.log({ res });
14 | };
15 |
--------------------------------------------------------------------------------
/docs/modules/chains/other_chains/analyze_document.mdx:
--------------------------------------------------------------------------------
1 | import CodeBlock from "@theme/CodeBlock";
2 |
3 | import AnalyzeDocumentExample from "!!raw-loader!@examples/chains/analyze_document_chain_summarize.ts";
4 |
5 |
6 | # `AnalyzeDocumentChain`
7 |
8 | 您可以使用`AnalyzeDocumentChain`,它接受单个文本作为输入并对其进行操作。
9 | 这个链条负责拆分文本,然后将其传递给 `MapReduceDocumentsChain` 生成摘要。
10 |
11 |
12 | {AnalyzeDocumentExample}
13 |
14 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/web_loaders/notiondb.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # Notion数据库
6 |
7 | 本示例演示了如何从Notion数据库加载数据。
8 | 你需要你的Notion集成令牌和要访问的资源的“databaseId”。
9 | 不要忘记将你的集成添加到数据库中!
10 |
11 | import CodeBlock from "@theme/CodeBlock";
12 |
13 | import Example from "!!raw-loader!@examples/document_loaders/notiondb.ts";
14 |
15 |
16 |
17 | {Example}
18 |
19 |
--------------------------------------------------------------------------------
/docs/use_cases/summarization.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_position: 6
4 | ---
5 |
6 | # 总结
7 |
8 | :::info
9 | [概念指南](https://docs.langchain.com/docs/use-cases/summarization)
10 | :::
11 |
12 | 一个常见的用例是想要总结长文档。
13 | 这自然会遇到上下文窗口的限制。
14 | 与问答不同,您不能只做些语义搜索技巧来仅选择与问题最相关的文本部分(因为在这种情况下没有特定的问题
15 | - 您想总结每个东西
16 |
17 | 为了开始,我们建议查看汇总链,该链以递归方式解决此问题。
18 |
19 | - [Summarization Chain](../modules/chains/other_chains/summarization)
20 |
21 |
--------------------------------------------------------------------------------
/docs/modules/agents/agents/action/chat_mrkl.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_position: 2
4 | ---
5 |
6 | import CodeBlock from "@theme/CodeBlock";
7 |
8 | import Example from "!!raw-loader!@examples/agents/chat_mrkl.ts";
9 |
10 |
11 | # MRKL聊天模型代理
12 |
13 | 这个例子介绍了如何使用一个使用ReAct框架(基于工具描述)来决定采取什么行动的代理。该代理被优化为在聊天模型中使用。如果您想在LLM中使用它,您可以使用[LLM MRKL代理](./llm_mrkl)。
14 |
15 |
16 | {Example}
17 |
18 |
--------------------------------------------------------------------------------
/examples/src/models/llm/llm_quick_start.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 |
3 | export const run = async () => {
4 | const model = new OpenAI();
5 | // `call` is a simple string-in, string-out method for interacting with the model.
6 | const resA = await model.call(
7 | "What would be a good company name a company that makes colorful socks?"
8 | );
9 | console.log({ resA });
10 | // { resA: '\n\nSocktastic Colors' }
11 | };
12 |
--------------------------------------------------------------------------------
/docs/modules/chains/other_chains/multi_retrieval_qa_chain.mdx:
--------------------------------------------------------------------------------
1 | import CodeBlock from "@theme/CodeBlock";
2 |
3 | import MultiRetrievalQAExample from "!!raw-loader!@examples/chains/multi_retrieval_qa.ts";
4 |
5 | 换行
6 | # `MultiRetrievalQAChain`多重检索问题解答链
7 | 换行
8 | MultiRetrievalQAChain使LLM能够从多个检索器中进行选择。通过提供一组向量存储器(作为检索器)及其相应的名称和描述来构建链。该链将查询作为输入,选择适当的检索器,并随后将输入馈送到所选的检索器中。
9 |
10 |
11 | {MultiRetrievalQAExample}
12 |
13 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/hnswlib.ts:
--------------------------------------------------------------------------------
1 | import { HNSWLib } from "langchain/vectorstores/hnswlib";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 |
4 | const vectorStore = await HNSWLib.fromTexts(
5 | ["Hello world", "Bye bye", "hello nice world"],
6 | [{ id: 2 }, { id: 1 }, { id: 3 }],
7 | new OpenAIEmbeddings()
8 | );
9 |
10 | const resultOne = await vectorStore.similaritySearch("hello world", 1);
11 | console.log(resultOne);
12 |
--------------------------------------------------------------------------------
/examples/src/llms/replicate.ts:
--------------------------------------------------------------------------------
1 | import { Replicate } from "langchain/llms/replicate";
2 |
3 | export const run = async () => {
4 | const model = new Replicate({
5 | model:
6 | "replicate/flan-t5-xl:3ae0799123a1fe11f8c89fd99632f843fc5f7a761630160521c4253149754523",
7 | });
8 | const res = await model.call(
9 | "Question: What would be a good company name a company that makes colorful socks?\nAnswer:"
10 | );
11 | console.log({ res });
12 | };
13 |
--------------------------------------------------------------------------------
/blog/2019-05-28-first-blog-post.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: first-blog-post
3 | title: First Blog Post
4 | authors:
5 | name: Gao Wei
6 | title: Docusaurus Core Team
7 | url: https://github.com/wgao19
8 | image_url: https://github.com/wgao19.png
9 | tags: [hola, docusaurus]
10 | ---
11 |
12 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet
13 |
--------------------------------------------------------------------------------
/docs/modules/chains/other_chains/constitutional_chain.mdx:
--------------------------------------------------------------------------------
1 | import CodeBlock from "@theme/CodeBlock";
2 |
3 | import ConstitutionalChainExample from "!!raw-loader!@examples/chains/constitutional_chain.ts";
4 |
5 |
6 | # `宪法链`
7 |
8 | `宪法链`是一种链式结构,它确保语言模型的输出遵循预定义的宪法原则。通过纳入特定的规则和指南,`宪法链`可以过滤和修改生成的内容以符合这些原则,从而提供更加受控、道德和上下文恰当的响应。这种机制有助于保持输出的完整性,同时最大程度地减少生成可能违反指南、具有冒犯性或偏离所需上下文的内容的风险。
9 |
10 |
11 | {ConstitutionalChainExample}
12 |
13 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/github_ignore_paths.ts:
--------------------------------------------------------------------------------
1 | import { GithubRepoLoader } from "langchain/document_loaders/web/github";
2 |
3 | export const run = async () => {
4 | const loader = new GithubRepoLoader(
5 | "https://github.com/hwchase17/langchainjs",
6 | { branch: "main", recursive: false, unknown: "warn", ignorePaths: ["*.md"] }
7 | );
8 | const docs = await loader.load();
9 | console.log({ docs });
10 | // Will not include any .md files
11 | };
12 |
--------------------------------------------------------------------------------
/examples/src/retrievers/chatgpt-plugin.ts:
--------------------------------------------------------------------------------
1 | import { ChatGPTPluginRetriever } from "langchain/retrievers/remote";
2 |
3 | export const run = async () => {
4 | const retriever = new ChatGPTPluginRetriever({
5 | url: "http://0.0.0.0:8000",
6 | auth: {
7 | bearer: "super-secret-jwt-token-with-at-least-32-characters-long",
8 | },
9 | });
10 |
11 | const docs = await retriever.getRelevantDocuments("hello world");
12 |
13 | console.log(docs);
14 | };
15 |
--------------------------------------------------------------------------------
/examples/src/chains/conversation_chain.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { ConversationChain } from "langchain/chains";
3 |
4 | export const run = async () => {
5 | const model = new OpenAI({});
6 | const chain = new ConversationChain({ llm: model });
7 | const res1 = await chain.call({ input: "Hi! I'm Jim." });
8 | console.log({ res1 });
9 | const res2 = await chain.call({ input: "What's my name?" });
10 | console.log({ res2 });
11 | };
12 |
--------------------------------------------------------------------------------
/docs/modules/indexes/retrievers/metal-retriever.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # 金属检索器
6 |
7 | 该示例展示了如何在“检索QAChain”中使用金属检索器从金属索引中检索文档。
8 |
9 | ## 设置
10 |
11 | ```bash npm2yarn
12 | npm i @getmetal/metal-sdk
13 |
14 | ```
15 |
16 |
17 | ## 用法
18 |
19 | import CodeBlock from "@theme/CodeBlock";
20 |
21 | import Example from "!!raw-loader!@examples/retrievers/metal.ts";
22 |
23 |
24 |
25 | {Example}
26 |
27 |
--------------------------------------------------------------------------------
/src/pages/index.module.css:
--------------------------------------------------------------------------------
1 | /**
2 | * CSS files with the .module.css suffix will be treated as CSS modules
3 | * and scoped locally.
4 | */
5 |
6 | .heroBanner {
7 | padding: 4rem 0;
8 | text-align: center;
9 | position: relative;
10 | overflow: hidden;
11 | }
12 |
13 | @media screen and (max-width: 996px) {
14 | .heroBanner {
15 | padding: 2rem;
16 | }
17 | }
18 |
19 | .buttons {
20 | display: flex;
21 | align-items: center;
22 | justify-content: center;
23 | }
24 |
--------------------------------------------------------------------------------
/docs/modules/memory/examples/vector_store_memory.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | import CodeBlock from "@theme/CodeBlock";
6 |
7 | import Example from "!!raw-loader!@examples/memory/vector_store.ts";
8 |
9 |
10 | # 基于向量库的内存
11 |
12 | `VectorStoreRetrieverMemory`将记忆存储在向量数据库中,并在每次调用时查询最“显著”的前K个文档。
13 |
14 | 这与大多数其他内存类不同,它没有显式跟踪交互的顺序。
15 |
16 | 在这种情况下,“文档”是先前的会话片段。这可以用来提到AI之前在对话中被告知的相关信息。
17 |
18 |
19 | {Example}
20 |
21 |
--------------------------------------------------------------------------------
/docs/production/callbacks/create-handlers.mdx:
--------------------------------------------------------------------------------
1 | import CodeBlock from "@theme/CodeBlock";
2 |
3 |
4 | # 创建回调处理程序
5 |
6 | ## 创建自定义处理程序
7 |
8 | 您还可以通过实现 `BaseCallbackHandler` 接口来创建自己的处理程序。如果您想做一些比仅记录到控制台更复杂的工作, 比如将事件发送到日志记录服务。这里是一个简单的处理程序实现,用于记录到控制台 :
9 |
10 | import CustomHandlerExample from "!!raw-loader!@examples/callbacks/custom_handler.ts";
11 |
12 |
13 | {CustomHandlerExample}
14 |
15 |
16 | 然后,你可以按照上面的[部分](#built-in-handlers)所述使用它。
17 |
18 |
--------------------------------------------------------------------------------
/examples/src/models/embeddings/cohere.ts:
--------------------------------------------------------------------------------
1 | import { CohereEmbeddings } from "langchain/embeddings/cohere";
2 |
3 | export const run = async () => {
4 | /* Embed queries */
5 | const embeddings = new CohereEmbeddings();
6 | const res = await embeddings.embedQuery("Hello world");
7 | console.log(res);
8 | /* Embed documents */
9 | const documentRes = await embeddings.embedDocuments([
10 | "Hello world",
11 | "Bye bye",
12 | ]);
13 | console.log({ documentRes });
14 | };
15 |
--------------------------------------------------------------------------------
/examples/src/models/embeddings/openai.ts:
--------------------------------------------------------------------------------
1 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
2 |
3 | export const run = async () => {
4 | /* Embed queries */
5 | const embeddings = new OpenAIEmbeddings();
6 | const res = await embeddings.embedQuery("Hello world");
7 | console.log(res);
8 | /* Embed documents */
9 | const documentRes = await embeddings.embedDocuments([
10 | "Hello world",
11 | "Bye bye",
12 | ]);
13 | console.log({ documentRes });
14 | };
15 |
--------------------------------------------------------------------------------
/examples/src/models/llm/llm_cancellation.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 |
3 | const model = new OpenAI({ temperature: 1 });
4 | const controller = new AbortController();
5 |
6 | // Call `controller.abort()` somewhere to cancel the request.
7 |
8 | const res = await model.call(
9 | "What would be a good company name a company that makes colorful socks?",
10 | { signal: controller.signal }
11 | );
12 |
13 | console.log(res);
14 | /*
15 | '\n\nSocktastic Colors'
16 | */
17 |
--------------------------------------------------------------------------------
/docs/modules/indexes/retrievers/chatgpt-retriever-plugin.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # ChatGPT插件检索器
6 |
7 | 本示例演示如何在LangChain中使用ChatGPT检索器插件。
8 |
9 | 要设置ChatGPT检索器插件,请按照[此处](https://github.com/openai/chatgpt-retrieval-plugin)的说明进行操作。
10 |
11 | ## 使用方法
12 |
13 | import CodeBlock from "@theme/CodeBlock";
14 |
15 | import Example from "!!raw-loader!@examples/retrievers/chatgpt-plugin.ts";
16 |
17 |
18 |
19 | {Example}
20 |
21 |
--------------------------------------------------------------------------------
/examples/src/retrievers/zep.ts:
--------------------------------------------------------------------------------
1 | import { ZepRetriever } from "langchain/retrievers/zep";
2 |
3 | export const run = async () => {
4 | const url = process.env.ZEP_URL || "http://localhost:8000";
5 | const sessionId = "TestSession1232";
6 | console.log(`Session ID: ${sessionId}, URL: ${url}`);
7 |
8 | const retriever = new ZepRetriever({ sessionId, url });
9 |
10 | const query = "hello";
11 | const docs = await retriever.getRelevantDocuments(query);
12 |
13 | console.log(docs);
14 | };
15 |
--------------------------------------------------------------------------------
/docs/modules/agents/agents/action/llm_mrkl.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 |
4 | sidebar_position: 1
5 |
6 | ---
7 |
8 |
9 |
10 | import CodeBlock from "@theme/CodeBlock";
11 |
12 | import Example from "!!raw-loader!@examples/agents/mrkl.ts";
13 |
14 |
15 |
16 | # LLMs的MRKL代理
17 |
18 |
19 | 本示例涵盖了如何使用使用ReAct Framework的代理人(基于工具的描述)来决定采取什么行动。
20 |
21 | 该代理人被优化用于LLMs。如果你想将其与聊天模型一起使用,请尝试[Chat MRKL Agent](./chat_mrkl)。
22 |
23 | {Example}
24 |
25 |
--------------------------------------------------------------------------------
/docs/modules/agents/toolkits/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_label: 工具包
3 | sidebar_position: 4
4 | hide_table_of_contents: true
5 | ---
6 |
7 | # 入门指南: 工具包
8 |
9 | :::info
10 | [概念指南](https://docs.langchain.com/docs/components/agents/toolkit)
11 | :::
12 |
13 | 一组工具,可用于解决特定问题/必需的工具组。
14 |
15 | ```typescript
16 | interface Toolkit {
17 |
18 | tools: Tool[];
19 |
20 | }
21 |
22 | ```
23 |
24 |
25 | ## 所有工具包
26 |
27 | import DocCardList from "@theme/DocCardList";
28 |
29 |
30 |
31 |
32 |
33 |
--------------------------------------------------------------------------------
/examples/src/models/chat/chat_timeout.ts:
--------------------------------------------------------------------------------
1 | import { ChatOpenAI } from "langchain/chat_models/openai";
2 | import { HumanChatMessage } from "langchain/schema";
3 |
4 | const chat = new ChatOpenAI({ temperature: 1 });
5 |
6 | const response = await chat.call(
7 | [
8 | new HumanChatMessage(
9 | "What is a good name for a company that makes colorful socks?"
10 | ),
11 | ],
12 | { timeout: 1000 } // 1s timeout
13 | );
14 | console.log(response);
15 | // AIChatMessage { text: '\n\nRainbow Sox Co.' }
16 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/milvus.ts:
--------------------------------------------------------------------------------
1 | import { Milvus } from "langchain/vectorstores/milvus";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 |
4 | export const run = async () => {
5 | const vectorStore = await Milvus.fromTexts(
6 | ["Hello world", "Bye bye", "hello nice world"],
7 | [{ id: 2 }, { id: 1 }, { id: 3 }],
8 | new OpenAIEmbeddings()
9 | );
10 |
11 | const resultOne = await vectorStore.similaritySearch("hello world", 1);
12 | console.log(resultOne);
13 | };
14 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/faiss.ts:
--------------------------------------------------------------------------------
1 | import { FaissStore } from "langchain/vectorstores/faiss";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 |
4 | export const run = async () => {
5 | const vectorStore = await FaissStore.fromTexts(
6 | ["Hello world", "Bye bye", "hello nice world"],
7 | [{ id: 2 }, { id: 1 }, { id: 3 }],
8 | new OpenAIEmbeddings()
9 | );
10 |
11 | const resultOne = await vectorStore.similaritySearch("hello world", 1);
12 | console.log(resultOne);
13 | };
14 |
--------------------------------------------------------------------------------
/examples/src/retrievers/databerry.ts:
--------------------------------------------------------------------------------
1 | import { DataberryRetriever } from "langchain/retrievers/databerry";
2 |
3 | export const run = async () => {
4 | const retriever = new DataberryRetriever({
5 | datastoreUrl: "https://api.databerry.ai/query/clg1xg2h80000l708dymr0fxc",
6 | apiKey: "DATABERRY_API_KEY", // optional: needed for private datastores
7 | topK: 8, // optional: default value is 3
8 | });
9 |
10 | const docs = await retriever.getRelevantDocuments("hello");
11 |
12 | console.log(docs);
13 | };
14 |
--------------------------------------------------------------------------------
/blog/authors.yml:
--------------------------------------------------------------------------------
1 | endi:
2 | name: Endilie Yacop Sucipto
3 | title: Maintainer of Docusaurus
4 | url: https://github.com/endiliey
5 | image_url: https://github.com/endiliey.png
6 |
7 | yangshun:
8 | name: Yangshun Tay
9 | title: Front End Engineer @ Facebook
10 | url: https://github.com/yangshun
11 | image_url: https://github.com/yangshun.png
12 |
13 | slorber:
14 | name: Sébastien Lorber
15 | title: Docusaurus maintainer
16 | url: https://sebastienlorber.com
17 | image_url: https://github.com/slorber.png
18 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/s3.ts:
--------------------------------------------------------------------------------
1 | import { S3Loader } from "langchain/document_loaders/web/s3";
2 |
3 | const loader = new S3Loader({
4 | bucket: "my-document-bucket-123",
5 | key: "AccountingOverview.pdf",
6 | s3Config: {
7 | region: "us-east-1",
8 | accessKeyId: "AKIAIOSFODNN7EXAMPLE",
9 | secretAccessKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
10 | },
11 | unstructuredAPIURL: "http://localhost:8000/general/v0/general",
12 | });
13 |
14 | const docs = await loader.load();
15 |
16 | console.log(docs);
17 |
--------------------------------------------------------------------------------
/examples/src/llms/openai-chat.ts:
--------------------------------------------------------------------------------
1 | import { OpenAIChat } from "langchain/llms/openai";
2 |
3 | export const run = async () => {
4 | const model = new OpenAIChat({
5 | prefixMessages: [
6 | {
7 | role: "system",
8 | content: "You are a helpful assistant that answers in pirate language",
9 | },
10 | ],
11 | maxTokens: 50,
12 | });
13 | const res = await model.call(
14 | "What would be a good company name a company that makes colorful socks?"
15 | );
16 | console.log({ res });
17 | };
18 |
--------------------------------------------------------------------------------
/docs/modules/indexes/retrievers/chroma-self-query.mdx:
--------------------------------------------------------------------------------
1 | # 自我查询色度检索器
2 |
3 | 自我查询检索器正如其名称所示具有查询自身的能力。具体而言给定任何自然语言查询检索器使用查询构建LLM链来撰写结构化查询,然后将该结构化查询应用于其基础向量存储中。这使得检索器不仅可以使用用户输入的查询进行与存储文档内容的语义相似性比较,而且可以从用户查询中提取存储文档的元数据过滤器并执行这些过滤器。
4 |
5 | 此示例使用Chroma向量存储。
6 |
7 | ## 用法
8 |
9 | 此示例演示如何使用向量存储初始化`SelfQueryRetriever` :
10 |
11 | import CodeBlock from "@theme/CodeBlock";
12 |
13 | import Example from "!!raw-loader!@examples/retrievers/chroma_self_query.ts";
14 |
15 |
16 |
17 | {Example}
18 |
19 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/memory.ts:
--------------------------------------------------------------------------------
1 | import { MemoryVectorStore } from "langchain/vectorstores/memory";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 |
4 | export const run = async () => {
5 | const vectorStore = await MemoryVectorStore.fromTexts(
6 | ["Hello world", "Bye bye", "hello nice world"],
7 | [{ id: 2 }, { id: 1 }, { id: 3 }],
8 | new OpenAIEmbeddings()
9 | );
10 |
11 | const resultOne = await vectorStore.similaritySearch("hello world", 1);
12 | console.log(resultOne);
13 | };
14 |
--------------------------------------------------------------------------------
/examples/src/models/embeddings/openai_timeout.ts:
--------------------------------------------------------------------------------
1 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
2 |
3 | export const run = async () => {
4 | const embeddings = new OpenAIEmbeddings({
5 | timeout: 1000, // 1s timeout
6 | });
7 | /* Embed queries */
8 | const res = await embeddings.embedQuery("Hello world");
9 | console.log(res);
10 | /* Embed documents */
11 | const documentRes = await embeddings.embedDocuments([
12 | "Hello world",
13 | "Bye bye",
14 | ]);
15 | console.log({ documentRes });
16 | };
17 |
--------------------------------------------------------------------------------
/blog/2021-08-01-mdx-blog-post.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | slug: mdx-blog-post
3 | title: MDX Blog Post
4 | authors: [slorber]
5 | tags: [docusaurus]
6 | ---
7 |
8 | Blog posts support [Docusaurus Markdown features](https://docusaurus.io/docs/markdown-features), such as [MDX](https://mdxjs.com/).
9 |
10 | :::tip
11 |
12 | Use the power of React to create interactive blog posts.
13 |
14 | ```js
15 |
16 | ```
17 |
18 |
19 |
20 | :::
21 |
--------------------------------------------------------------------------------
/examples/src/indexes/recursive_text_splitter.ts:
--------------------------------------------------------------------------------
1 | import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";
2 |
3 | export const run = async () => {
4 | const text = `Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f.
5 | This is a weird text to write, but gotta test the splittingggg some how.\n\n
6 | Bye!\n\n-H.`;
7 | const splitter = new RecursiveCharacterTextSplitter({
8 | chunkSize: 10,
9 | chunkOverlap: 1,
10 | });
11 | const output = splitter.createDocuments([text]);
12 | console.log(output);
13 | };
14 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/gitbook.ts:
--------------------------------------------------------------------------------
1 | import { GitbookLoader } from "langchain/document_loaders/web/gitbook";
2 |
3 | export const run = async () => {
4 | const loader = new GitbookLoader("https://docs.gitbook.com");
5 | const docs = await loader.load(); // load single path
6 | console.log(docs);
7 | const allPathsLoader = new GitbookLoader("https://docs.gitbook.com", {
8 | shouldLoadAllPaths: true,
9 | });
10 | const docsAllPaths = await allPathsLoader.load(); // loads all paths of the given gitbook
11 | console.log(docsAllPaths);
12 | };
13 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/web_loaders/hn.md:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # 黑客新闻
6 |
7 | 本例介绍如何使用Cheerio从黑客新闻网站加载数据。每页将创建一个文档。
8 |
9 | ## 设置
10 |
11 | ```bash npm2yarn(备注:将npm命令转化为yarn)
12 | npm install cheerio
13 |
14 | ```
15 |
16 |
17 | ## 用法
18 |
19 | ```typescript
20 |
21 | import { HNLoader } from "langchain/document_loaders/web/hn";
22 |
23 |
24 |
25 | const loader = new HNLoader("https://news.ycombinator.com/item?id=34817881");
26 |
27 |
28 |
29 | const docs = await loader.load();
30 |
31 | ```
32 |
33 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/prisma_vectorstore/prisma/schema.prisma:
--------------------------------------------------------------------------------
1 | // This is your Prisma schema file,
2 | // learn more about it in the docs: https://pris.ly/d/prisma-schema
3 |
4 | generator client {
5 | provider = "prisma-client-js"
6 | }
7 |
8 | datasource db {
9 | provider = "postgresql"
10 | url = env("DATABASE_URL")
11 | }
12 |
13 | model Document {
14 | id String @id @default(cuid())
15 | content String
16 | namespace String? @default("default")
17 | vector Unsupported("vector")?
18 | }
19 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/web_loaders/imsdb.md:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # IMSDB
6 |
7 | 本例介绍如何使用Cheerio从互联网电影剧本数据库网站加载数据。每个页面将创建一个文档。
8 |
9 | ## 设置
10 |
11 | ```bash 将npm转换为yarn
12 | npm install cheerio
13 |
14 | ```
15 |
16 |
17 | ## 用法
18 |
19 | ```typescript
20 |
21 | import { IMSDBLoader } from "langchain/document_loaders/web/imsdb";
22 |
23 |
24 |
25 | const loader = new IMSDBLoader("https://imsdb.com/scripts/BlacKkKlansman.html");
26 |
27 |
28 |
29 | const docs = await loader.load();
30 |
31 | ```
32 |
33 |
--------------------------------------------------------------------------------
/docs/modules/chains/other_chains/moderation_chain.mdx:
--------------------------------------------------------------------------------
1 | import CodeBlock from "@theme/CodeBlock";
2 |
3 | import OpenAIModerationExample from "!!raw-loader!@examples/chains/openai_moderation.ts";
4 |
5 |
6 |
7 | ## `OpenAIModerationChain`
8 |
9 |
10 | 您可以使用 `OpenAIModerationChain`,它负责评估输入并确定是否违反了 OpenAI 的服务条款。
11 |
12 | 如果输入包含任何违反服务条款的内容,并且 `throwError` 设置为 `true`,则会抛出并捕获错误。如果 `throwError` 设为 `false`,则该链将返回 "Text was found that violates OpenAI's content policy."(文本中发现违反 OpenAI 内容政策的内容)。
13 |
14 | {OpenAIModerationExample}
15 |
16 |
--------------------------------------------------------------------------------
/docs/modules/indexes/retrievers/pinecone-self-query.mdx:
--------------------------------------------------------------------------------
1 | # 自查Pinecone检索器
2 |
3 | 自查检索器具备查询自身的能力,正如其名称所示。具体地说,对于任何自然语言查询,检索器使用基于查询结构构建的LLM链来编写结构化查询,然后将该结构化查询应用于其底层向量存储。这不仅允许检索器使用用户输入的查询与所存储文件内容进行语义相似性比较,还可以从用户查询中提取有关存储文档元数据的过滤器并执行这些过滤器。[注:LLM链,指的是“罗杰局部语言模型”,是一种NLP技术]
4 |
5 | 本示例使用Pinecone向量存储。
6 |
7 | ## 用法
8 |
9 | 本示例演示如何使用向量存储来初始化`SelfQueryRetriever`。:
10 |
11 | import CodeBlock from "@theme/CodeBlock";
12 |
13 | import Example from "!!raw-loader!@examples/retrievers/pinecone_self_query.ts";
14 |
15 |
16 |
17 | {Example}
18 |
19 |
--------------------------------------------------------------------------------
/docs/use_cases/agent_simulations/generative_agents.mdx:
--------------------------------------------------------------------------------
1 | import CodeBlock from "@theme/CodeBlock";
2 |
3 |
4 | # 生成式智能体
5 |
6 | import GenerativeAgentsScript from "!!raw-loader!@examples/experimental/generative_agents/generative_agents.ts";
7 |
8 |
9 | 该脚本实现了一种基于论文[Generating Agents 交互式仿真人类行为](https //arxiv.org/abs/2304.03442)的生成式智能体, 作者为Park et.al.
10 |
11 | 在其中,我们利用了由LangChain检索器支持的时态加权记忆体对象。
12 | 下面的脚本创建了两个生成式智能体的实例Tommy和Eve,并通过他们的观察运行了一个交互的模拟。
13 | Tommy扮演一个搬到新城镇寻找工作的人,Eve则扮演职业顾问。
14 |
15 |
16 | {GenerativeAgentsScript}
17 |
18 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/hnswlib_filter.ts:
--------------------------------------------------------------------------------
1 | import { HNSWLib } from "langchain/vectorstores/hnswlib";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 |
4 | const vectorStore = await HNSWLib.fromTexts(
5 | ["Hello world", "Bye bye", "hello nice world"],
6 | [{ id: 2 }, { id: 1 }, { id: 3 }],
7 | new OpenAIEmbeddings()
8 | );
9 |
10 | const result = await vectorStore.similaritySearch(
11 | "hello world",
12 | 10,
13 | (document) => document.metadata.id === 3
14 | );
15 |
16 | // only "hello nice world" will be returned
17 | console.log(result);
18 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/file_loaders/docx.md:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # Docx files
6 |
7 | 本示例介绍如何从docx文件中加载数据。
8 |
9 | # 安装 Setup
10 |
11 | ```bash npm2yarn
12 | npm install mammoth
13 |
14 | ```
15 |
16 |
17 | # 用法 Usage
18 |
19 | ```typescript
20 |
21 | import { DocxLoader } from "langchain/document_loaders/fs/docx";
22 |
23 |
24 |
25 | const loader = new DocxLoader(
26 |
27 | "src/document_loaders/tests/example_data/attention.docx"
28 |
29 | );
30 |
31 |
32 |
33 | const docs = await loader.load();
34 |
35 | ```
36 |
37 |
--------------------------------------------------------------------------------
/examples/src/retrievers/metal.ts:
--------------------------------------------------------------------------------
1 | /* eslint-disable @typescript-eslint/no-non-null-assertion */
2 | import Metal from "@getmetal/metal-sdk";
3 | import { MetalRetriever } from "langchain/retrievers/metal";
4 |
5 | export const run = async () => {
6 | const MetalSDK = Metal;
7 |
8 | const client = new MetalSDK(
9 | process.env.METAL_API_KEY!,
10 | process.env.METAL_CLIENT_ID!,
11 | process.env.METAL_INDEX_ID
12 | );
13 | const retriever = new MetalRetriever({ client });
14 |
15 | const docs = await retriever.getRelevantDocuments("hello");
16 |
17 | console.log(docs);
18 | };
19 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/faiss_loadfrompython.ts:
--------------------------------------------------------------------------------
1 | import { FaissStore } from "langchain/vectorstores/faiss";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 |
4 | // The directory of data saved from Python
5 | const directory = "your/directory/here";
6 |
7 | // Load the vector store from the directory
8 | const loadedVectorStore = await FaissStore.loadFromPython(
9 | directory,
10 | new OpenAIEmbeddings()
11 | );
12 |
13 | // Search for the most similar document
14 | const result = await loadedVectorStore.similaritySearch("test", 2);
15 | console.log("result", result);
16 |
--------------------------------------------------------------------------------
/docs/use_cases/api.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_position: 4
4 | ---
5 |
6 | # 与API交互
7 |
8 | :::info
9 | [概念指南](https://docs.langchain.com/docs/use-cases/apis)
10 | :::
11 |
12 | 许多数据和信息存储在API后面。
13 | 本页涵盖了在LangChain中使用API的所有资源。
14 |
15 | ## 链
16 |
17 | 如果您刚开始并且有相对简单的API,请从链开始。
18 | 链是一系列预定步骤,因此它们很适合入门,因为它们能够给您更多控制,让您
19 | 更好地了解发生的情况。
20 |
21 | - [API链](../modules/chains/other_chains/api_chain.mdx)
22 |
23 | ## 代理
24 |
25 | 代理更复杂,并涉及对LLM的多个查询,以了解要做什么。
26 | 代理的缺点是您的控制力少了。优点是它们更强大
27 | 这使您可以在更大,更复杂的模式上使用它们。
28 |
29 | - [OpenAPI Agent](../modules/agents/toolkits/openapi.md)
30 |
31 |
--------------------------------------------------------------------------------
/examples/src/models/chat/chat_quick_start.ts:
--------------------------------------------------------------------------------
1 | import { ChatOpenAI } from "langchain/chat_models/openai";
2 | import { HumanChatMessage } from "langchain/schema";
3 |
4 | export const run = async () => {
5 | const chat = new ChatOpenAI();
6 | // Pass in a list of messages to `call` to start a conversation. In this simple example, we only pass in one message.
7 | const response = await chat.call([
8 | new HumanChatMessage(
9 | "What is a good name for a company that makes colorful socks?"
10 | ),
11 | ]);
12 | console.log(response);
13 | // AIChatMessage { text: '\n\nRainbow Sox Co.' }
14 | };
15 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Langchain中文网 JS/TS 教程
2 |
3 | Langchain中文网是Langchain的中文社区。
4 |
5 |
6 | Langchain中文网的目的是帮助中国人阅读 Langchain 的文档。
7 |
8 | 如果遇到翻译错误,请指出错误,欢迎热爱社区的人一起来翻译。
9 |
10 | 联系人:特美丽,微信号是 abc18601613801。
11 |
12 | 更多关于如何使用Langchain的信息,请参阅[文档](https://python.langchain.com.cn)。
13 |
14 | ## 贡献指南
15 |
16 | 如果你想为Langchain中文网做出贡献,你可以在GitHub上fork我们的仓库并创建一个分支来提交你的更改。
17 |
18 | 我们欢迎任何形式的贡献,包括但不限于:
19 |
20 | - 纠正拼写、语法或文档错误
21 | - 提供新的代码或功能
22 | - 提交测试或改进测试覆盖
23 | - 维护或修复已有的代码
24 |
25 | ## 技术交流社群
26 |
27 | 
28 |
29 |
30 |
31 |
32 |
33 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/web_loaders/figma.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # Figma
6 |
7 | 本例演示如何从Figma文件加载数据。
8 | 您需要Figma访问令牌才能开始使用。
9 |
10 | import CodeBlock from "@theme/CodeBlock";
11 |
12 | import Example from "!!raw-loader!@examples/document_loaders/figma.ts";
13 |
14 |
15 | {Example}
16 |
17 |
18 | 您可以通过在浏览器中打开文件并从URL中提取它们来找到Figma文件的密钥和节点ID:
19 |
20 | ```
21 |
22 | https://www.figma.com/file//LangChainJS-Test?type=whiteboard&node-id=&t=e6lqWkKecuYQRyRg-0
23 |
24 | ```
25 |
26 |
--------------------------------------------------------------------------------
/examples/src/chains/summarization.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { loadSummarizationChain } from "langchain/chains";
3 | import { Document } from "langchain/document";
4 |
5 | export const run = async () => {
6 | const model = new OpenAI({});
7 | const chain = loadSummarizationChain(model, { type: "stuff" });
8 | const docs = [
9 | new Document({ pageContent: "harrison went to harvard" }),
10 | new Document({ pageContent: "ankush went to princeton" }),
11 | ];
12 | const res = await chain.call({
13 | input_documents: docs,
14 | });
15 | console.log(res);
16 | };
17 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/file_loaders/subtitles.md:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # 字幕
6 |
7 | 本示例介绍如何从字幕文件中加载数据。每个字幕文件将创建一个文档。
8 |
9 | ## 设置
10 |
11 | ```bash npm2yarn
12 | npm install srt-parser-2
13 |
14 | ```
15 |
16 |
17 | ## 用法
18 |
19 | ```typescript
20 |
21 | import { SRTLoader } from "langchain/document_loaders/fs/srt";
22 |
23 |
24 |
25 | const loader = new SRTLoader(
26 |
27 | "src/document_loaders/example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt"
28 |
29 | );
30 |
31 |
32 |
33 | const docs = await loader.load();
34 |
35 | ```
36 |
37 |
--------------------------------------------------------------------------------
/examples/src/models/chat/chat_cancellation.ts:
--------------------------------------------------------------------------------
1 | import { ChatOpenAI } from "langchain/chat_models/openai";
2 | import { HumanChatMessage } from "langchain/schema";
3 |
4 | const model = new ChatOpenAI({ temperature: 1 });
5 | const controller = new AbortController();
6 |
7 | // Call `controller.abort()` somewhere to cancel the request.
8 |
9 | const res = await model.call(
10 | [
11 | new HumanChatMessage(
12 | "What is a good name for a company that makes colorful socks?"
13 | ),
14 | ],
15 | { signal: controller.signal }
16 | );
17 |
18 | console.log(res);
19 | /*
20 | '\n\nSocktastic Colors'
21 | */
22 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/chroma/search.ts:
--------------------------------------------------------------------------------
1 | import { Chroma } from "langchain/vectorstores/chroma";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 |
4 | const vectorStore = await Chroma.fromExistingCollection(
5 | new OpenAIEmbeddings(),
6 | { collectionName: "godel-escher-bach" }
7 | );
8 |
9 | const response = await vectorStore.similaritySearch("scared", 2);
10 | console.log(response);
11 | /*
12 | [
13 | Document { pageContent: 'Achilles: Oh, no!', metadata: {} },
14 | Document {
15 | pageContent: 'Achilles: Yiikes! What is that?',
16 | metadata: { id: 1 }
17 | }
18 | ]
19 | */
20 |
--------------------------------------------------------------------------------
/examples/src/models/embeddings/tensorflow.ts:
--------------------------------------------------------------------------------
1 | import "@tensorflow/tfjs-backend-cpu";
2 | import { Document } from "langchain/document";
3 | import { TensorFlowEmbeddings } from "langchain/embeddings/tensorflow";
4 | import { MemoryVectorStore } from "langchain/vectorstores/memory";
5 |
6 | const embeddings = new TensorFlowEmbeddings();
7 | const store = new MemoryVectorStore(embeddings);
8 |
9 | const documents = [
10 | "A document",
11 | "Some other piece of text",
12 | "One more",
13 | "And another",
14 | ];
15 |
16 | await store.addDocuments(
17 | documents.map((pageContent) => new Document({ pageContent }))
18 | );
19 |
--------------------------------------------------------------------------------
/docs/modules/indexes/retrievers/contextual-compression-retriever.mdx:
--------------------------------------------------------------------------------
1 | 上下文压缩检索器
2 |
3 |
4 | 上下文压缩检索器旨在通过更好地考虑查询上下文,改进向量存储文档相似性搜索返回的答案。
5 |
6 |
7 | 它包装另一个检索器,并在初始相似性搜索后使用文档压缩器作为中间步骤,从检索到的文档中删除与初始查询无关的信息。
8 | 这减少了后续链在解析检索到的文档和作出最终判断时必须处理的干扰量。
9 |
10 |
11 | ## 用法
12 |
13 |
14 | This example shows how to intialize a `ContextualCompressionRetriever` with a vector store and a document compressor:
15 |
16 |
17 |
18 | import CodeBlock from "@theme/CodeBlock";
19 |
20 | import Example from "!!raw-loader!@examples/retrievers/contextual_compression.ts";
21 |
22 |
23 |
24 | {Example}
25 |
26 |
--------------------------------------------------------------------------------
/docs/modules/indexes/retrievers/zep-retriever.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # Zep Retriever
6 |
7 | 这个示例展示了如何在 `RetrievalQAChain` 中使用 Zep Retriever 从 Zep 内存存储中检索文档。(This example shows how to use the Zep Retriever in a `RetrievalQAChain` to retrieve documents from Zep memory store.)
8 |
9 | ## 设置(## Setup)
10 |
11 | ```bash npm2yarn
12 | npm i @getzep/zep-js
13 |
14 | ```
15 |
16 |
17 | ## 使用(## Usage)
18 |
19 | import CodeBlock from "@theme/CodeBlock";
20 |
21 | import Example from "!!raw-loader!@examples/retrievers/zep.ts";
22 |
23 |
24 |
25 | {Example}
26 |
27 |
--------------------------------------------------------------------------------
/examples/src/models/chat/integration_azure_openai.ts:
--------------------------------------------------------------------------------
1 | import { ChatOpenAI } from "langchain/chat_models/openai";
2 |
3 | const model = new ChatOpenAI({
4 | temperature: 0.9,
5 | azureOpenAIApiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY
6 | azureOpenAIApiInstanceName: "YOUR-INSTANCE-NAME", // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME
7 | azureOpenAIApiDeploymentName: "YOUR-DEPLOYMENT-NAME", // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME
8 | azureOpenAIApiVersion: "YOUR-API-VERSION", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION
9 | });
10 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/file_loaders/notion_markdown.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # Notion markdown export
6 |
7 | 本示例介绍如何从导出的 Notion 页面中加载数据。
8 |
9 | 首先,按照官方说明 [这里](https://www.notion.so/help/export-your-content) 导出 Notion 页面为 **Markdown & CSV**。确保选择 `包括子页面` 和 `为子页面创建文件夹`。
10 |
11 | 然后,解压下载的文件并将未压缩的文件夹移动到存储库中。它应该包含你页面的 markdown 文件。
12 |
13 | 一旦文件夹在存储库中,只需运行下面的示例即可:
14 |
15 | import CodeBlock from "@theme/CodeBlock";
16 |
17 | import Example from "!!raw-loader!@examples/document_loaders/notion_markdown.ts";
18 |
19 |
20 |
21 | {Example}
22 |
23 |
--------------------------------------------------------------------------------
/docs/modules/indexes/retrievers/hyde.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # HyDE Retriever
6 |
7 | 本示例展示了如何使用HyDE Retriever,其实现了Hypothetical Document Embeddings(HyDE),具体内容参见[这篇论文](https://arxiv.org/abs/2212.10496)。
8 |
9 | 在更高的层次上,HyDE是一种嵌入技术,它接受查询,生成假定答案,然后将生成的文档嵌入并将其用作最终示例。
10 |
11 | 为了使用HyDE,我们需要提供基础嵌入模型以及可用于生成这些文档的LLM。默认情况下,HyDE类带有一些默认提示(有关它们的详细信息,请参见论文),但我们也可以创建自己的提示,这些提示应该有一个单一的输入变量 `{question}`。
12 |
13 | ## 用法
14 |
15 | import CodeBlock from "@theme/CodeBlock";
16 |
17 | import Example from "!!raw-loader!@examples/retrievers/hyde.ts";
18 |
19 |
20 |
21 | {Example}
22 |
23 |
--------------------------------------------------------------------------------
/examples/src/indexes/text_splitter.ts:
--------------------------------------------------------------------------------
1 | import { Document } from "langchain/document";
2 | import { CharacterTextSplitter } from "langchain/text_splitter";
3 |
4 | export const run = async () => {
5 | /* Split text */
6 | const text = "foo bar baz 123";
7 | const splitter = new CharacterTextSplitter({
8 | separator: " ",
9 | chunkSize: 7,
10 | chunkOverlap: 3,
11 | });
12 | const output = splitter.createDocuments([text]);
13 | console.log({ output });
14 | /* Split documents */
15 | const docOutput = splitter.splitDocuments([
16 | new Document({ pageContent: text }),
17 | ]);
18 | console.log({ docOutput });
19 | };
20 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/memory_custom_similarity.ts:
--------------------------------------------------------------------------------
1 | import { MemoryVectorStore } from "langchain/vectorstores/memory";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 | import { similarity } from "ml-distance";
4 |
5 | export const run = async () => {
6 | const vectorStore = await MemoryVectorStore.fromTexts(
7 | ["Hello world", "Bye bye", "hello nice world"],
8 | [{ id: 2 }, { id: 1 }, { id: 3 }],
9 | new OpenAIEmbeddings(),
10 | { similarity: similarity.pearson }
11 | );
12 |
13 | const resultOne = await vectorStore.similaritySearch("hello world", 1);
14 | console.log(resultOne);
15 | };
16 |
--------------------------------------------------------------------------------
/docs/modules/indexes/text_splitters/examples/character.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # 字符文本分割器
6 |
7 | 除了递归字符文本分割器之外,还有更常见的字符文本分割器。
8 |
9 | 它仅在一个类型的字符上分割(默认为`"\\"`)。您可以以完全相同的方式使用它。
10 | ```typescript
11 |
12 | import { Document } from "langchain/document";
13 |
14 | import { CharacterTextSplitter } from "langchain/text_splitter";
15 |
16 |
17 |
18 | const text = "foo bar baz 123";
19 |
20 | const splitter = new CharacterTextSplitter({
21 |
22 | separator: " ",
23 |
24 | chunkSize: 7,
25 |
26 | chunkOverlap: 3,
27 |
28 | });
29 |
30 | const output = await splitter.createDocuments([text]);
31 |
32 | ```
33 |
34 |
--------------------------------------------------------------------------------
/docs/modules/agents/agents/plan_execute/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_position: 2
4 | ---
5 |
6 | import CodeBlock from "@theme/CodeBlock";
7 |
8 | import Example from "!!raw-loader!@examples/agents/plan_and_execute.ts";
9 |
10 |
11 | # 计划执行代理
12 |
13 | 这个例子展示了如何使用一个使用计划执行框架来回答查询的代理。
14 | 该框架与其他目前支持的代理(全部被归类为行动代理)的工作方式不同,因为它使用了一个两步过程:。
15 |
16 | 1. 首先,代理使用 LLM 创建一个带有明确步骤的计划来回答查询。
17 | 2. 一旦它有了计划,它就使用嵌入式传统行动代理来解决每一步。
18 |
19 | 这个想法是计划步骤通过将一个较大的任务分解成更简单的子任务,使 LLM 保持更“在轨道”上。
20 | 然而,这种方法需要更多的单独 LLM 查询,并且与行动代理相比具有更高的延迟。
21 |
22 | **注**: 这个代理目前只支持聊天模型。
23 |
24 |
25 | {Example}
26 |
27 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/web_loaders/college_confidential.md:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # 大学机密
6 |
7 | 本例说明了如何使用Cheerio从大学机密网站加载数据。每个页面将创建一个文档。
8 |
9 | ## 设置
10 |
11 | ```bash npm2yarn
12 | npm install cheerio
13 |
14 | ```
15 |
16 |
17 | ## 用法
18 |
19 | ```typescript
20 |
21 | import { CollegeConfidentialLoader } from "langchain/document_loaders/web/college_confidential";
22 |
23 |
24 |
25 | const loader = new CollegeConfidentialLoader(
26 |
27 | "https://www.collegeconfidential.com/colleges/brown-university/"
28 |
29 | );
30 |
31 |
32 |
33 | const docs = await loader.load();
34 |
35 | ```
36 |
37 |
--------------------------------------------------------------------------------
/examples/src/retrievers/vespa.ts:
--------------------------------------------------------------------------------
1 | import { VespaRetriever } from "langchain/retrievers/vespa";
2 |
3 | export const run = async () => {
4 | const url = "https://doc-search.vespa.oath.cloud";
5 | const query_body = {
6 | yql: "select content from paragraph where userQuery()",
7 | hits: 5,
8 | ranking: "documentation",
9 | locale: "en-us",
10 | };
11 | const content_field = "content";
12 |
13 | const retriever = new VespaRetriever({
14 | url,
15 | auth: false,
16 | query_body,
17 | content_field,
18 | });
19 |
20 | const result = await retriever.getRelevantDocuments("what is vespa?");
21 | console.log(result);
22 | };
23 |
--------------------------------------------------------------------------------
/docs/modules/models/llms/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_label: LLMs
4 | ---
5 |
6 | import CodeBlock from "@theme/CodeBlock";
7 |
8 | import Example from "!!raw-loader!@examples/models/llm/llm_quick_start.ts";
9 |
10 | import DocCardList from "@theme/DocCardList";
11 |
12 |
13 | # 入门指南: LLMs
14 |
15 | :::info
16 | [概念指南](https://docs.langchain.com/docs/components/models/language-model)
17 | :::
18 |
19 | LangChain 提供了使用各种 LLM 的标准界面。
20 |
21 | 要开始, 只需使用 `LLM` 实现的 `call` 方法, 传递一个 `string` 输入。在这个例子中, 我们使用了 `OpenAI` 实现:
22 |
23 | {Example}
24 |
25 |
26 | ## 深入研究
27 |
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/docs/use_cases/personal_assistants.mdx:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | hide_table_of_contents: true
4 | sidebar_position: 1
5 | ---
6 |
7 |
8 |
9 | # 个人助理
10 |
11 |
12 | :::info
13 |
14 | [概念指南](https://docs.langchain.com/docs/use-cases/personal-assistants)
15 | :::
16 |
17 |
18 |
19 | 我们在这里非常广义地使用“个人助理”这个词。
20 | 个人助理具有一些特征:
21 |
22 |
23 | - 它们可以与外界互动
24 | - 它们具有您的数据知识
25 | - 它们记得您的互动
26 |
27 |
28 | 实际上,LangChain 中的所有功能都与构建个人助理相关。
29 | Highlighting specific parts:
30 |
31 |
32 |
33 | - [Agent 文档](../modules/agents/index.mdx)(用于与外界互动)
34 | - [Index 文档](../modules/indexes/index.mdx)(用于提供数据知识)
35 | - [Memory](../modules/memory/index.mdx) (for helping them remember interactions)
36 |
37 |
--------------------------------------------------------------------------------
/docs/modules/prompts/prompt_templates/prompt_composition.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_position: 1
4 | ---
5 |
6 | import CodeBlock from "@theme/CodeBlock";
7 |
8 |
9 | # 提示组合
10 |
11 | 流水线提示模板允许您将多个单独的提示模板组合在一起。
12 | 当您想重用单个提示的部分时,这可能很有用。
13 |
14 | 与将`inputVariables`作为参数不同,流水线提示模板需要两个新参数:
15 |
16 | - `pipelinePrompts`: 一个包含字符串 (`name`) 和 `PromptTemplate` 对象的数组。
17 | - 每一个 `PromptTemplate` 会被格式化,然后作为一个输入变量传递给管道中下一个提示模板,并使用与 `name` 相同的名称。
18 | - `finalPrompt`: 将返回的最终提示。
19 |
20 | 以下是实际操作的示例:
21 |
22 | import Example from "!!raw-loader!@examples/prompts/pipeline_prompt.ts";
23 |
24 |
25 |
26 | {Example}
27 |
28 |
--------------------------------------------------------------------------------
/examples/src/chains/question_answering_map_reduce.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { loadQAMapReduceChain } from "langchain/chains";
3 | import { Document } from "langchain/document";
4 |
5 | export const run = async () => {
6 | const model = new OpenAI({ temperature: 0 });
7 | const chain = loadQAMapReduceChain(model);
8 | const docs = [
9 | new Document({ pageContent: "harrison went to harvard" }),
10 | new Document({ pageContent: "ankush went to princeton" }),
11 | ];
12 | const res = await chain.call({
13 | input_documents: docs,
14 | question: "Where did harrison go to college",
15 | });
16 | console.log({ res });
17 | };
18 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/hnswlib_fromdocs.ts:
--------------------------------------------------------------------------------
1 | import { HNSWLib } from "langchain/vectorstores/hnswlib";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 | import { TextLoader } from "langchain/document_loaders/fs/text";
4 |
5 | // Create docs with a loader
6 | const loader = new TextLoader("src/document_loaders/example_data/example.txt");
7 | const docs = await loader.load();
8 |
9 | // Load the docs into the vector store
10 | const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings());
11 |
12 | // Search for the most similar document
13 | const result = await vectorStore.similaritySearch("hello world", 1);
14 | console.log(result);
15 |
--------------------------------------------------------------------------------
/docs/modules/indexes/text_splitters/examples/token.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # TokenTextSplitter
6 |
7 | 最后, `TokenTextSplitter` 将原始文本字符串转换为 BPE 标记,并将这些标记分成块,然后将单个块中的标记转换回文本。#(Finally)
8 |
9 | ```typescript
10 |
11 | import { Document } from "langchain/document";
12 |
13 | import { TokenTextSplitter } from "langchain/text_splitter";
14 |
15 |
16 |
17 | const text = "foo bar baz 123";
18 |
19 |
20 |
21 | const splitter = new TokenTextSplitter({
22 |
23 | encodingName: "gpt2",
24 |
25 | chunkSize: 10,
26 |
27 | chunkOverlap: 0,
28 |
29 | });
30 |
31 |
32 |
33 | const output = await splitter.createDocuments([text]);
34 |
35 | ```
36 |
37 |
--------------------------------------------------------------------------------
/docs/modules/memory/examples/entity_memory.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_label: 实体记忆
4 | ---
5 |
6 | import CodeBlock from "@theme/CodeBlock";
7 |
8 |
9 | # 实体记忆
10 |
11 | 实体记忆是会话中记忆特定实体的给定事实。
12 | 它使用 LLM 提取实体 (使用 LLM 同时建立对实体的知识)。
13 |
14 | ## 用法:
15 |
16 | import Example from "!!raw-loader!@examples/memory/entity.ts";
17 |
18 |
19 | {Example}
20 |
21 |
22 | ### 检查记忆存储
23 |
24 | 您还可以直接检查记忆存储,以查看每个实体的当前摘要:
25 |
26 | import MemoryInspectionExample from "!!raw-loader!@examples/memory/entity_memory_inspection.ts";
27 |
28 |
29 |
30 | {MemoryInspectionExample}
31 |
32 |
--------------------------------------------------------------------------------
/docs/modules/prompts/prompt_templates/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_label: 提示模板
4 | sidebar_position: 1
5 | ---
6 |
7 | import CodeBlock from "@theme/CodeBlock";
8 |
9 | import Example from "!!raw-loader!@examples/prompts/prompts.ts";
10 |
11 | import DocCardList from "@theme/DocCardList";
12 |
13 |
14 | # 提示模板
15 |
16 | :::info
17 | [概念指南](https://docs.langchain.com/docs/components/prompts/prompt-template)
18 | :::
19 |
20 | `PromptTemplate` 允许您使用模板生成提示。当您想在多个地方使用相同的提示概要,但更改某些值时,这非常有用。
21 | 如下所示,`PromptTemplate`对LLM和聊天模型都有支持:
22 |
23 | {Example}
24 |
25 |
26 | ## 深入了解
27 |
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/docs/use_cases/tabular.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_position: 3
4 | ---
5 |
6 | # 表格问答
7 |
8 | :::info
9 | [概念指南](https://docs.langchain.com/docs/use-cases/qa-tabular)
10 | :::
11 |
12 | 大量的数据和信息存储在表格数据中,例如csv、excel表格或SQL表格。
13 | 本页面介绍LangChain提供的与这种格式数据处理有关的所有资源。
14 |
15 | ## 链
16 |
17 | 如果您刚开始并且有相对较小/简单的表格数据,建议您使用链。
18 | 链是一系列预定步骤,因此它们很适合初学者,因为它们给您更多的控制,并让您更好地理解正在发生的事情。
19 |
20 | - [SQL数据库链](../modules/chains/other_chains/sql)
21 |
22 | ## 代理
23 |
24 | 代理更加复杂,并涉及多个查询到LLM以理解要做什么。代理的缺点是您的控制力较小。优点是它们更强大,可以在更大的数据库和更复杂的模式上使用。
25 |
26 | 代理人的缺点是你的控制力会变弱,但好处在于它们更加强大,这使得你可以在更大的数据库和更复杂的模式上使用它们。
27 |
28 |
29 |
30 | - [SQL Agent](../modules/agents/toolkits/sql.mdx)
31 |
32 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/confluence.ts:
--------------------------------------------------------------------------------
1 | import { ConfluencePagesLoader } from "langchain/document_loaders/web/confluence";
2 |
3 | const username = process.env.CONFLUENCE_USERNAME;
4 | const accessToken = process.env.CONFLUENCE_ACCESS_TOKEN;
5 |
6 | if (username && accessToken) {
7 | const loader = new ConfluencePagesLoader({
8 | baseUrl: "https://example.atlassian.net/wiki",
9 | spaceKey: "~EXAMPLE362906de5d343d49dcdbae5dEXAMPLE",
10 | username,
11 | accessToken,
12 | });
13 |
14 | const documents = await loader.load();
15 | console.log(documents);
16 | } else {
17 | console.log(
18 | "You must provide a username and access token to run this example."
19 | );
20 | }
21 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/faiss_fromdocs.ts:
--------------------------------------------------------------------------------
1 | import { FaissStore } from "langchain/vectorstores/faiss";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 | import { TextLoader } from "langchain/document_loaders/fs/text";
4 |
5 | // Create docs with a loader
6 | const loader = new TextLoader("src/document_loaders/example_data/example.txt");
7 | const docs = await loader.load();
8 |
9 | // Load the docs into the vector store
10 | const vectorStore = await FaissStore.fromDocuments(
11 | docs,
12 | new OpenAIEmbeddings()
13 | );
14 |
15 | // Search for the most similar document
16 | const resultOne = await vectorStore.similaritySearch("hello world", 1);
17 | console.log(resultOne);
18 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/qdrant/fromExisting.ts:
--------------------------------------------------------------------------------
1 | import { QdrantVectorStore } from "langchain/vectorstores/qdrant";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 |
4 | const vectorStore = await QdrantVectorStore.fromExistingCollection(
5 | new OpenAIEmbeddings(),
6 | {
7 | url: process.env.QDRANT_URL,
8 | collectionName: "goldel_escher_bach",
9 | }
10 | );
11 |
12 | const response = await vectorStore.similaritySearch("scared", 2);
13 |
14 | console.log(response);
15 |
16 | /*
17 | [
18 | Document { pageContent: 'Achilles: Oh, no!', metadata: {} },
19 | Document {
20 | pageContent: 'Achilles: Yiikes! What is that?',
21 | metadata: { id: 1 }
22 | }
23 | ]
24 | */
25 |
--------------------------------------------------------------------------------
/examples/src/cache/momento.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { MomentoCache } from "langchain/cache/momento";
3 | import {
4 | CacheClient,
5 | Configurations,
6 | CredentialProvider,
7 | } from "@gomomento/sdk";
8 |
9 | // See https://github.com/momentohq/client-sdk-javascript for connection options
10 | const client = new CacheClient({
11 | configuration: Configurations.Laptop.v1(),
12 | credentialProvider: CredentialProvider.fromEnvironmentVariable({
13 | environmentVariableName: "MOMENTO_AUTH_TOKEN",
14 | }),
15 | defaultTtlSeconds: 60 * 60 * 24,
16 | });
17 | const cache = await MomentoCache.fromProps({
18 | client,
19 | cacheName: "langchain",
20 | });
21 |
22 | const model = new OpenAI({ cache });
23 |
--------------------------------------------------------------------------------
/examples/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "@tsconfig/recommended",
3 | "compilerOptions": {
4 | "outDir": "dist",
5 | "lib": [
6 | "ES2021",
7 | "ES2022.Object",
8 | "DOM"
9 | ],
10 | "target": "ES2021",
11 | "module": "nodenext",
12 | "sourceMap": true,
13 | "allowSyntheticDefaultImports": true,
14 | "baseUrl": "./src",
15 | "declaration": true,
16 | "noImplicitReturns": true,
17 | "noFallthroughCasesInSwitch": true,
18 | "noUnusedParameters": true,
19 | "useDefineForClassFields": true,
20 | "strictPropertyInitialization": false
21 | },
22 | "exclude": [
23 | "node_modules/",
24 | "dist/",
25 | "tests/"
26 | ],
27 | "include": [
28 | "./src"
29 | ]
30 | }
31 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/web_loaders/confluence.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_class_name: nodeonly
3 | ---
4 |
5 | # Confluence(维基软件)
6 |
7 | :::info:兼容性
8 | 仅在 Node.js 上可用。
9 | :::
10 |
11 | 本文介绍如何从 Confluence 空间中加载文档对象。
12 |
13 | ## 凭证
14 |
15 | - 您需要设置访问令牌,并提供您的 Confluence 用户名,以便身份验证请求
16 | - 您还需要 “空间密钥” 来获取包含要加载为文档的页面的空间。导航到您的空间时,可以在 url 中找到它,例如 `https://example.atlassian.net/wiki/spaces/{SPACE_KEY}`
17 | - 您需要安装 `html-to-text` 将页面解析为纯文本
18 |
19 | ```bash npm2yarn
20 | npm install html-to-text
21 |
22 | ```
23 |
24 |
25 | ## 用法
26 |
27 | import CodeBlock from "@theme/CodeBlock";
28 |
29 | import Example from "!!raw-loader!@examples/document_loaders/confluence.ts";
30 |
31 |
32 |
33 | {Example}
34 |
35 |
--------------------------------------------------------------------------------
/docs/modules/agents/agents/action/structured_chat.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_position: 4
4 | ---
5 |
6 | import CodeBlock from "@theme/CodeBlock";
7 |
8 | import Example from "!!raw-loader!@examples/agents/structured_chat.ts";
9 |
10 |
11 | # 结构化工具聊天代理
12 |
13 | 结构化工具聊天代理是专为与符合任意对象模式的输入数据的工具配合使用设计的,相比其他仅支持接受单个字符串作为输入的代理,它们具有更高的灵活性。
14 |
15 | 这使得更容易创建和使用需要多个输入值的工具 - 而不是提示输入字符串化的对象或逗号分隔列表,可以指定具有多个键的对象。
16 | 这里有一个使用`DynamicStructuredTool`的示例::
17 |
18 | {Example}
19 |
20 |
21 | ## 添加记忆
22 |
23 | 您可以像这样为该代理添加记忆::
24 |
25 | import MemoryExample from "!!raw-loader!@examples/agents/structured_chat_with_memory.ts";
26 |
27 |
28 |
29 | {MemoryExample}
30 |
31 |
--------------------------------------------------------------------------------
/docs/modules/memory/examples/dynamodb.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | import CodeBlock from "@theme/CodeBlock";
6 |
7 |
8 | # DynamoDB支持的聊天记录
9 |
10 | 如果需要在聊天进程之间进行更长期的持久化,您可以将默认的内存chatHistory替换为DynamoDB实例,作为支持BufferMemory等聊天记录类的后端。,注意:`chatHistory`指聊天记录类,`BufferMemory`指缓存存储器类。
11 |
12 | ## 设置
13 |
14 | 首先,在您的项目中安装AWS DynamoDB客户端
15 |
16 | ```bash npm2yarn
17 | npm install @aws-sdk/client-dynamodb
18 |
19 | ```
20 |
21 |
22 | 接下来,登录您的AWS帐户并创建一个DynamoDB表格。将表格命名为`langchain`,指定您的分区键为`id`,分区键必须是字符串类型,其他设置保持默认即可。
23 |
24 | 您还需要检索一个AWS访问密钥和密钥,以便拥有访问该表格的角色或用户,并将它们添加到环境变量中。
25 |
26 | ## 使用方法
27 |
28 | import Example from "!!raw-loader!@examples/memory/dynamodb-store.ts";
29 |
30 |
31 |
32 | {Example}
33 |
34 |
--------------------------------------------------------------------------------
/examples/src/agents/plan_and_execute.ts:
--------------------------------------------------------------------------------
1 | import { Calculator } from "langchain/tools/calculator";
2 | import { SerpAPI } from "langchain/tools";
3 | import { ChatOpenAI } from "langchain/chat_models/openai";
4 | import { PlanAndExecuteAgentExecutor } from "langchain/experimental/plan_and_execute";
5 |
6 | const tools = [new Calculator(), new SerpAPI()];
7 | const model = new ChatOpenAI({
8 | temperature: 0,
9 | modelName: "gpt-3.5-turbo",
10 | verbose: true,
11 | });
12 | const executor = PlanAndExecuteAgentExecutor.fromLLMAndTools({
13 | llm: model,
14 | tools,
15 | });
16 |
17 | const result = await executor.call({
18 | input: `Who is the current president of the United States? What is their current age raised to the second power?`,
19 | });
20 |
21 | console.log({ result });
22 |
--------------------------------------------------------------------------------
/docs/modules/memory/examples/zep_memory.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # Zep Memory
6 |
7 | [Zep](https://github.com/getzep/zep)是存储、,总结、内嵌、索引和丰富对话AI聊天历史、自治代理历史、文档Q&A历史的记忆服务器,并通过简单、低延迟的API公开它们。
8 |
9 | 主要功能:
10 |
11 | - 长期存储记忆,无论您的总结策略如何,都可访问历史消息。
12 | - 基于可配置的消息窗口自动总结记忆消息。一系列的总结被存储,为未来的总结策略提供了灵活性。
13 | - 对记忆进行向量搜索,消息在创建时自动嵌入。
14 | - 自动记忆和摘要的令牌计数,允许更细粒度地控制提示组合。
15 | - [Python](https://github.com/getzep/zep-python)和[JavaScript](https://github.com/getzep/zep-js)SDK。
16 |
17 | ## 设置
18 |
19 | 请参阅[Zep](https://github.com/getzep/zep)的说明,以在本地或通过自动托管提供程序运行服务器。
20 |
21 | ## 用法
22 |
23 | import CodeBlock from "@theme/CodeBlock";
24 |
25 | import Example from "!!raw-loader!@examples/memory/zep.ts";
26 |
27 |
28 |
29 | {Example}
30 |
31 |
--------------------------------------------------------------------------------
/docs/modules/memory/examples/conversation_summary.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_label: Conversation Summary(对话总结)
4 | ---
5 |
6 |
7 | import CodeBlock from "@theme/CodeBlock";
8 |
9 |
10 |
11 | # Conversation Summary Memory(对话总结记忆)
12 |
13 |
14 | 对话总结记忆会在对话进行时对其进行总结并储存在记忆中。这个记忆能够被用于将当前对话总结注入到提示/链中。这个记忆在对较长的对话进行总结时非常有用,因为直接在提示中保留所有过去的对话历史信息将会占用过多的token。
15 |
16 |
17 | ## Usage(使用方法),与LLM一起使用
18 |
19 |
20 | import TextExample from "!!raw-loader!@examples/memory/summary_llm.ts";
21 |
22 |
23 |
24 | {TextExample}
25 |
26 |
27 |
28 | ## Usage(使用方法),与聊天模型一起使用
29 |
30 |
31 | import ChatExample from "!!raw-loader!@examples/memory/summary_chat.ts";
32 |
33 |
34 |
35 | {ChatExample}
36 |
37 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/memory_fromdocs.ts:
--------------------------------------------------------------------------------
1 | import { MemoryVectorStore } from "langchain/vectorstores/memory";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 | import { TextLoader } from "langchain/document_loaders/fs/text";
4 |
5 | export const run = async () => {
6 | // Create docs with a loader
7 | const loader = new TextLoader(
8 | "src/document_loaders/example_data/example.txt"
9 | );
10 | const docs = await loader.load();
11 |
12 | // Load the docs into the vector store
13 | const vectorStore = await MemoryVectorStore.fromDocuments(
14 | docs,
15 | new OpenAIEmbeddings()
16 | );
17 |
18 | // Search for the most similar document
19 | const resultOne = await vectorStore.similaritySearch("hello world", 1);
20 |
21 | console.log(resultOne);
22 | };
23 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/opensearch/opensearch.ts:
--------------------------------------------------------------------------------
1 | import { Client } from "@opensearch-project/opensearch";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 | import { OpenSearchVectorStore } from "langchain/vectorstores/opensearch";
4 |
5 | export async function run() {
6 | const client = new Client({
7 | nodes: [process.env.OPENSEARCH_URL ?? "http://127.0.0.1:9200"],
8 | });
9 |
10 | const vectorStore = await OpenSearchVectorStore.fromTexts(
11 | ["Hello world", "Bye bye", "What's this?"],
12 | [{ id: 2 }, { id: 1 }, { id: 3 }],
13 | new OpenAIEmbeddings(),
14 | {
15 | client,
16 | indexName: "documents",
17 | }
18 | );
19 |
20 | const resultOne = await vectorStore.similaritySearch("Hello world", 1);
21 | console.log(resultOne);
22 | }
23 |
--------------------------------------------------------------------------------
/docs/modules/chains/other_chains/summarization.mdx:
--------------------------------------------------------------------------------
1 | import CodeBlock from "@theme/CodeBlock";
2 |
3 | import SummarizeExample from "!!raw-loader!@examples/chains/summarization_map_reduce.ts";
4 |
5 | import SummarizeExampleIntermediateSteps from "!!raw-loader!@examples/chains/summarization_map_reduce_intermediate_steps.ts";
6 |
7 |
8 | # 摘要
9 |
10 | 摘要链可以用来总结多个文档。一种方法是在将多个较小的文档分成块后将它们作为输入,与`MapReduceDocumentsChain`一起操作。您还可以选择将进行摘要的链替换为StuffDocumentsChain,或RefineDocumentsChain。在此处了解有关它们之间差异的更多信息[here](../index_related_chains/document_qa)
11 |
12 | {SummarizeExample}
13 |
14 |
15 | ## 中间步骤
16 |
17 | 如果需要检查它们,我们还可以返回`map_reduce`链的中间步骤,。这是通过传递`returnIntermediateSteps`参数来完成的。
18 |
19 |
20 | {SummarizeExampleIntermediateSteps}
21 |
22 |
--------------------------------------------------------------------------------
/docs/modules/memory/examples/momento.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | import CodeBlock from "@theme/CodeBlock";
6 |
7 |
8 | # Momento支持的聊天记录
9 |
10 | 如果要在聊天会话中使用分布式、无服务器的持久性,可以使用Momento支持的聊天消息历史记录,即刻缓存,无需任何基础设施维护,无论是在本地构建还是在生产环境中都是一个很好的起点。
11 |
12 |
13 | ## 设置
14 |
15 | 在项目中安装[Momento客户端库](https://github.com/momentohq/client-sdk-javascript):
16 |
17 | ```bash npm2yarn
18 | npm install @gomomento/sdk
19 |
20 | ```
21 |
22 |
23 | 您还需要从[Momento](https://gomomento.com/)获得API密钥。您可以在此处签署免费帐户[这里](https://console.gomomento.com/)。
24 |
25 | ## 用法
26 |
27 | 为了区分一个聊天历史记录会话和另一个会话,我们需要一个唯一的“sessionId”。您还可以提供一个可选的“sessionTtl”,以使会话在给定的秒数后过期。
28 |
29 | import MomentoExample from "!!raw-loader!@examples/memory/momento.ts";
30 |
31 |
32 |
33 | {MomentoExample}
34 |
35 |
--------------------------------------------------------------------------------
/examples/src/agents/mrkl.ts:
--------------------------------------------------------------------------------
1 | import { initializeAgentExecutorWithOptions } from "langchain/agents";
2 | import { OpenAI } from "langchain/llms/openai";
3 | import { SerpAPI } from "langchain/tools";
4 | import { Calculator } from "langchain/tools/calculator";
5 |
6 | const model = new OpenAI({ temperature: 0 });
7 | const tools = [
8 | new SerpAPI(process.env.SERPAPI_API_KEY, {
9 | location: "Austin,Texas,United States",
10 | hl: "en",
11 | gl: "us",
12 | }),
13 | new Calculator(),
14 | ];
15 |
16 | const executor = await initializeAgentExecutorWithOptions(tools, model, {
17 | agentType: "zero-shot-react-description",
18 | verbose: true,
19 | });
20 |
21 | const input = `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`;
22 |
23 | const result = await executor.call({ input });
24 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/mongo_search.ts:
--------------------------------------------------------------------------------
1 | import { MongoVectorStore } from "langchain/vectorstores/mongo";
2 | import { CohereEmbeddings } from "langchain/embeddings/cohere";
3 | import { MongoClient } from "mongodb";
4 |
5 | export const run = async () => {
6 | const client = new MongoClient(process.env.MONGO_URI || "");
7 |
8 | const collection = client.db("langchain").collection("test");
9 |
10 | const vectorStore = new MongoVectorStore(new CohereEmbeddings(), {
11 | client,
12 | collection,
13 | // indexName: "default", // make sure that this matches the index name in atlas if not using "default"
14 | });
15 |
16 | const resultOne = await vectorStore.similaritySearch("Hello world", 1);
17 |
18 | console.log(resultOne);
19 |
20 | // remember to close the client
21 | await client.close();
22 | };
23 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/mongo_fromTexts.ts:
--------------------------------------------------------------------------------
1 | import { MongoVectorStore } from "langchain/vectorstores/mongo";
2 | import { CohereEmbeddings } from "langchain/embeddings/cohere";
3 | import { MongoClient } from "mongodb";
4 |
5 | export const run = async () => {
6 | const client = new MongoClient(process.env.MONGO_URI || "");
7 |
8 | const collection = client.db("langchain").collection("test");
9 |
10 | await MongoVectorStore.fromTexts(
11 | ["Hello world", "Bye bye", "What's this?"],
12 | [{ id: 2 }, { id: 1 }, { id: 3 }],
13 | new CohereEmbeddings(),
14 | {
15 | client,
16 | collection,
17 | // indexName: "default", // make sure that this matches the index name in atlas if not using "default"
18 | }
19 | );
20 |
21 | // remember to close the client
22 | await client.close();
23 | };
24 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/web_loaders/s3.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_class_name: node-only
4 | ---
5 |
6 | # S3 文件
7 |
8 | :::info 兼容性
9 | 仅适用于 Node.js。
10 | :::
11 |
12 | 本文档介绍如何从 S3 文件对象中加载文档对象。
13 |
14 | ## 设置
15 |
16 | 要运行此索引,您需要先设置并准备好 Unstructured 并在可用的 URL 端点中使用。它也可以在本地配置运行。
17 |
18 | 请查看此处的文档(https://js.langchain.com/docs/modules/indexes/document_loaders/examples/file_loaders/unstructured)以了解如何进行操作。
19 |
20 | ## 使用方法
21 |
22 | 一旦 Unstructured 配置完成,您可以使用 S3 Loader 加载文件,然后将其转换为文档。
23 |
24 | 您可以选择提供 s3Config 参数以指定桶区域访问密钥和秘密访问密钥。如果未提供这些参数,则需要在您的环境中具有它们(例如通过运行 'aws configure' 命令)。
25 |
26 | import CodeBlock from "@theme/CodeBlock";
27 |
28 | import Example from "!!raw-loader!@examples/document_loaders/s3.ts";
29 |
30 |
31 |
32 | {Example}
33 |
34 |
--------------------------------------------------------------------------------
/examples/src/README.md:
--------------------------------------------------------------------------------
1 | # langchain-examples
2 |
3 | This folder contains examples of how to use LangChain.
4 |
5 | ## Run an example
6 |
7 | What you'll usually want to do.
8 |
9 | First, build langchain. From the repository root, run:
10 |
11 | ```sh
12 | yarn
13 | yarn build
14 | ```
15 |
16 | Most examples require API keys. Run `cp .env.example .env`, then edit `.env` with your API keys.
17 |
18 | Then from the `examples/` directory, run:
19 |
20 | `yarn run start `
21 |
22 | eg.
23 |
24 | `yarn run start ./src/prompts/few_shot.ts`
25 |
26 | ## Run an example with the transpiled JS
27 |
28 | You shouldn't need to do this, but if you want to run an example with the transpiled JS, you can do so with:
29 |
30 | `yarn run start:dist `
31 |
32 | eg.
33 |
34 | `yarn run start:dist ./dist/prompts/few_shot.js`
35 |
--------------------------------------------------------------------------------
/examples/src/chains/llm_chain_stream.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { PromptTemplate } from "langchain/prompts";
3 | import { LLMChain } from "langchain/chains";
4 |
5 | // Create a new LLMChain from a PromptTemplate and an LLM in streaming mode.
6 | const model = new OpenAI({ temperature: 0.9, streaming: true });
7 | const prompt = PromptTemplate.fromTemplate(
8 | "What is a good name for a company that makes {product}?"
9 | );
10 | const chain = new LLMChain({ llm: model, prompt });
11 |
12 | // Call the chain with the inputs and a callback for the streamed tokens
13 | const res = await chain.call({ product: "colorful socks" }, [
14 | {
15 | handleLLMNewToken(token: string) {
16 | process.stdout.write(token);
17 | },
18 | },
19 | ]);
20 | console.log({ res });
21 | // { res: { text: '\n\nKaleidoscope Socks' } }
22 |
--------------------------------------------------------------------------------
/docs/use_cases/autonomous_agents/auto_gpt.mdx:
--------------------------------------------------------------------------------
1 | import CodeBlock from "@theme/CodeBlock";
2 |
3 |
4 | # AutoGPT
5 |
6 | :::info
7 | AutoGPT是一个使用长期记忆和专为独立工作设计的提示(即无需要求用户输入)的自定义代理来执行任务。
8 | :::
9 |
10 | ## 同构示例
11 |
12 | 在这个例子中,我们使用AutoGPT为给定位置预测天气。 这个例子被设计为运行在所有的JS环境中,包括浏览器。
13 |
14 | import IsomorphicExample from "!!raw-loader!@examples/experimental/autogpt/weather_browser.ts";
15 |
16 |
17 | ## Node.js示例
18 |
19 | {IsomorphicExample}
20 |
21 |
22 | 在这个示例中,我们使用AutoGPT为给定位置预测天气。 这个示例被设计为在Node.js中运行,因此它使用本地文件系统和一个仅限Node的向量存储。
23 |
24 | import NodeExample from "!!raw-loader!@examples/experimental/autogpt/weather.ts";
25 |
26 |
27 | ### This example is designed to run in Node.js, and uses the local filesystem and a Node-only vector store.
28 |
29 |
30 | {NodeExample}
31 |
32 |
--------------------------------------------------------------------------------
/examples/src/models/llm/llm_with_tracing.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { ChatOpenAI } from "langchain/chat_models/openai";
3 | import { SystemChatMessage, HumanChatMessage } from "langchain/schema";
4 | import * as process from "process";
5 |
6 | export const run = async () => {
7 | process.env.LANGCHAIN_HANDLER = "langchain";
8 | const model = new OpenAI({ temperature: 0.9 });
9 | const resA = await model.call(
10 | "What would be a good company name a company that makes colorful socks?"
11 | );
12 | console.log({ resA });
13 |
14 | const chat = new ChatOpenAI({ temperature: 0 });
15 | const system_message = new SystemChatMessage("You are to chat with a user.");
16 | const message = new HumanChatMessage("Hello!");
17 | const resB = await chat.call([system_message, message]);
18 | console.log({ resB });
19 | };
20 |
--------------------------------------------------------------------------------
/examples/src/indexes/token_text_splitter.ts:
--------------------------------------------------------------------------------
1 | import { Document } from "langchain/document";
2 | import { TokenTextSplitter } from "langchain/text_splitter";
3 | import fs from "fs";
4 | import path from "path";
5 |
6 | export const run = async () => {
7 | /* Split text */
8 | const text = fs.readFileSync(
9 | path.resolve(__dirname, "../../state_of_the_union.txt"),
10 | "utf8"
11 | );
12 |
13 | const splitter = new TokenTextSplitter({
14 | encodingName: "r50k_base",
15 | chunkSize: 10,
16 | chunkOverlap: 0,
17 | allowedSpecial: ["<|endoftext|>"],
18 | disallowedSpecial: [],
19 | });
20 |
21 | const output = splitter.createDocuments([text]);
22 | console.log({ output });
23 |
24 | const docOutput = splitter.splitDocuments([
25 | new Document({ pageContent: text }),
26 | ]);
27 |
28 | console.log({ docOutput });
29 | };
30 |
--------------------------------------------------------------------------------
/docs/modules/agents/executor/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_position: 2
4 | ---
5 |
6 | # Agent Executors
7 |
8 | :::info
9 | [概念指南](https://docs.langchain.com/docs/components/agents/agent-executor)
10 | :::
11 |
12 | 为了让智能代理更加强大,我们需要使其迭代,即调用模型多次,直到达到最终答案。这就是 AgentExecutor 的工作。
13 |
14 | ```typescript
15 | class AgentExecutor {
16 |
17 | // a simplified implementation
18 |
19 | run(inputs: object) {
20 |
21 | const steps = [];
22 |
23 | while (true) {
24 |
25 | const step = await this.agent.plan(steps, inputs);
26 |
27 | if (step instanceof AgentFinish) {
28 |
29 | return step.returnValues;
30 |
31 | }
32 |
33 | steps.push(step);
34 |
35 | }
36 |
37 | }
38 |
39 | }
40 |
41 | ```
42 |
43 |
44 | import DocCardList from "@theme/DocCardList";
45 |
46 |
47 |
48 |
49 |
50 |
--------------------------------------------------------------------------------
/examples/src/callbacks/custom_handler.ts:
--------------------------------------------------------------------------------
1 | import { BaseCallbackHandler } from "langchain/callbacks";
2 | import { AgentAction, AgentFinish, ChainValues } from "langchain/schema";
3 |
4 | export class MyCallbackHandler extends BaseCallbackHandler {
5 | name = "MyCallbackHandler";
6 |
7 | async handleChainStart(chain: { name: string }) {
8 | console.log(`Entering new ${chain.name} chain...`);
9 | }
10 |
11 | async handleChainEnd(_output: ChainValues) {
12 | console.log("Finished chain.");
13 | }
14 |
15 | async handleAgentAction(action: AgentAction) {
16 | console.log(action.log);
17 | }
18 |
19 | async handleToolEnd(output: string) {
20 | console.log(output);
21 | }
22 |
23 | async handleText(text: string) {
24 | console.log(text);
25 | }
26 |
27 | async handleAgentEnd(action: AgentFinish) {
28 | console.log(action.log);
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/hnswlib_saveload.ts:
--------------------------------------------------------------------------------
1 | import { HNSWLib } from "langchain/vectorstores/hnswlib";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 |
4 | // Create a vector store through any method, here from texts as an example
5 | const vectorStore = await HNSWLib.fromTexts(
6 | ["Hello world", "Bye bye", "hello nice world"],
7 | [{ id: 2 }, { id: 1 }, { id: 3 }],
8 | new OpenAIEmbeddings()
9 | );
10 |
11 | // Save the vector store to a directory
12 | const directory = "your/directory/here";
13 | await vectorStore.save(directory);
14 |
15 | // Load the vector store from the same directory
16 | const loadedVectorStore = await HNSWLib.load(directory, new OpenAIEmbeddings());
17 |
18 | // vectorStore and loadedVectorStore are identical
19 |
20 | const result = await loadedVectorStore.similaritySearch("hello world", 1);
21 | console.log(result);
22 |
--------------------------------------------------------------------------------
/docs/modules/agents/agents/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_position: 1
4 | ---
5 |
6 | import DocCardList from "@theme/DocCardList";
7 |
8 |
9 | # 代理
10 |
11 | :::info
12 | [概念指南](https://docs.langchain.com/docs/components/agents/agent)
13 | :::
14 |
15 | 代理是一个无状态的封装器,封装了一个代理提示链(比如MRKL),负责将工具格式化到提示符中,以及解析从聊天模型获取的响应。它接收用户输入,并返回相应的“操作”和相应的“操作输入”响应。
16 |
17 | ## 选择哪种代理?
18 |
19 | 您选择的代理取决于您想执行的任务类型。以下是一个快速指南,可帮助您为您的使用情况选择正确的代理:
20 |
21 | - 如果您正在使用文本LLM, 首先尝试 `zero-shot-react-description`, 即。[LLMs的MRKL代理](./action/llm_mrkl)。
22 | - 如果您正在使用聊天模型, 尝试 `chat-zero-shot-react-description`, 即。[聊天模型的MRKL代理](./action/chat_mrkl)。
23 | - 如果您正在使用聊天模型并想使用内存, 尝试 `chat-conversational-react-description`, [会话代理](./action/conversational_agent)。
24 | - 如果您有一个需要多个步骤的复杂任务,并且您有兴趣尝试一种新的代理类型, 尝试 [Plan-and-Execute代理](./plan_execute/)。
25 |
26 | ## 所有代理
27 |
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/docs/modules/schema/document.md:
--------------------------------------------------------------------------------
1 | # 文档
2 |
3 | 语言模型只知道它们所训练的内容的信息。为了让它们能够回答问题或总结其他信息,你需要将信息传递给语言模型。因此,拥有文档的概念非常重要。
4 |
5 | 文档本质上非常简单。它由一段文本和可选的元数据组成。文本是我们与语言模型交互的部分,而可选的元数据对于跟踪文档的元数据(例如来源)非常有用。
6 |
7 | ```typescript
8 | interface Document {
9 |
10 | pageContent: string;
11 |
12 | metadata: Record;
13 |
14 | }
15 |
16 | ```
17 |
18 |
19 | ## 创建文档
20 |
21 | 你可以在LangChain中很容易地创建一个文档对象与
22 |
23 | ```typescript
24 | import { Document } from "langchain/document";
25 |
26 |
27 |
28 | const doc = new Document({ pageContent: "foo" });
29 |
30 | ```
31 |
32 |
33 | 你可以使用 创建带有元数据的文档
34 |
35 | ```typescript
36 | import { Document } from "langchain/document";
37 |
38 |
39 |
40 | const doc = new Document({ pageContent: "foo", metadata: { source: "1" } });
41 |
42 | ```
43 |
44 |
45 | 同时还可以查看[文档加载器(Document Loaders)](../indexes/document_loaders/),以了解从各种来源加载文档的方法。
46 |
47 |
--------------------------------------------------------------------------------
/examples/src/agents/aiplugin-tool.ts:
--------------------------------------------------------------------------------
1 | import { ChatOpenAI } from "langchain/chat_models/openai";
2 | import { initializeAgentExecutorWithOptions } from "langchain/agents";
3 | import {
4 | RequestsGetTool,
5 | RequestsPostTool,
6 | AIPluginTool,
7 | } from "langchain/tools";
8 |
9 | export const run = async () => {
10 | const tools = [
11 | new RequestsGetTool(),
12 | new RequestsPostTool(),
13 | await AIPluginTool.fromPluginUrl(
14 | "https://www.klarna.com/.well-known/ai-plugin.json"
15 | ),
16 | ];
17 | const agent = await initializeAgentExecutorWithOptions(
18 | tools,
19 | new ChatOpenAI({ temperature: 0 }),
20 | { agentType: "chat-zero-shot-react-description", verbose: true }
21 | );
22 |
23 | const result = await agent.call({
24 | input: "what t shirts are available in klarna?",
25 | });
26 |
27 | console.log({ result });
28 | };
29 |
--------------------------------------------------------------------------------
/docs/modules/chains/sequential_chain.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_position: 3
4 | ---
5 |
6 | import CodeBlock from "@theme/CodeBlock";
7 |
8 | import SimpleSequentialChainExample from "!!raw-loader!@examples/chains/simple_sequential_chain.ts";
9 |
10 | import SequentialChainExample from "!!raw-loader!@examples/chains/sequential_chain.ts";
11 |
12 |
13 | # 顺序链
14 |
15 | 顺序链允许您连接多个链,并将它们组合成执行特定场景的管道。
16 |
17 | ## `SimpleSequentialChain`
18 |
19 | 让我们从最简单的情况开始,即`SimpleSequentialChain`。
20 |
21 | `SimpleSequentialChain`是一种允许您将多个单输入/单输出链连接成一个链的链。
22 |
23 | 下面的示例显示了一个样例用例。在第一步,给定一个标题,生成一个剧本的简介。在第二步,基于生成的简介,生成剧本的评论。
24 |
25 | {SimpleSequentialChainExample}
26 |
27 |
28 | ## `SequentialChain`
29 |
30 | 更高级的情况非常有用,当您有多个具有多个输入或输出键的链时。
31 |
32 |
33 | {SequentialChainExample}
34 |
35 |
--------------------------------------------------------------------------------
/docs/modules/chains/index_related_chains/retrieval_qa.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_position: 2
4 | ---
5 |
6 | import RetrievalQAExample from "!!raw-loader!@examples/chains/retrieval_qa.ts";
7 |
8 | import RetrievalQAExampleCustom from "!!raw-loader!@examples/chains/retrieval_qa_custom.ts";
9 |
10 | import CodeBlock from "@theme/CodeBlock";
11 |
12 |
13 | # 检索问答
14 |
15 | `RetrievalQAChain` 是将 `Retriever` 和 QA 链(上文中所述)组合起来的链。它用于从 `Retriever` 检索文档,然后使用 `QA` 链根据检索到的文档回答问题。
16 |
17 | ## 使用
18 |
19 | 在下面的示例中,我们使用 `VectorStore` 作为 `Retriever`。默认情况下,将使用 `StuffDocumentsChain` 作为 `QA` 链。
20 |
21 | {RetrievalQAExample}
22 |
23 |
24 | ## 使用自定义的 `QA` 链
25 |
26 | 在下面的示例中,我们使用 `VectorStore` 作为 `Retriever`,并使用 `RefineDocumentsChain` 作为 `QA` 链。
27 |
28 |
29 | {RetrievalQAExampleCustom}
30 |
31 |
--------------------------------------------------------------------------------
/docs/modules/indexes/retrievers/time-weighted-retriever.mdx:
--------------------------------------------------------------------------------
1 | # 时间加权召回器
2 |
3 |
4 | 时间加权召回器是一种综合考虑相似性和新近度的召回器。评分算法为 :。
5 |
6 |
7 | ```typescript
8 | let score = (1.0 - this.decayRate) ** hoursPassed + vectorRelevance;
9 |
10 | ```
11 |
12 |
13 |
14 | 特别注意:`hoursPassed` 指的是自上次访问以来的时间,而不是对象创建以来的时间。这意味着经常访问的对象保持“新鲜”,并且具有更高的分数。
15 |
16 |
17 | `this.decayRate` 是一个可配置的小数,介于 0 和 1 之间。较小的数字意味着文档将被“记住”的时间更长,而较高的数字则更加强调最近访问的文档。
18 |
19 |
20 | 请注意,将衰减速率设置为恰好为0或1使 `hoursPassed` 无关,使得此召回器等价于标准的向量查找。
21 |
22 |
23 | ## 使用
24 |
25 |
26 | 下面是一个使用向量存储库初始化 `TimeWeightedVectorStoreRetriever` 的示例。
27 | 重要提示:由于所需的元数据,所有文档都必须使用 **召回器** 上的 `addDocuments` 方法添加到后端向量存储库中,而不是直接添加到向量存储库本身。
28 |
29 |
30 | import CodeBlock from "@theme/CodeBlock";
31 |
32 | import Example from "!!raw-loader!@examples/retrievers/time-weighted-retriever.ts";
33 |
34 |
35 |
36 | {Example}
37 |
38 |
--------------------------------------------------------------------------------
/blog/2021-08-26-welcome/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | slug: welcome
3 | title: Welcome
4 | authors: [slorber, yangshun]
5 | tags: [facebook, hello, docusaurus]
6 | ---
7 |
8 | [Docusaurus blogging features](https://docusaurus.io/docs/blog) are powered by the [blog plugin](https://docusaurus.io/docs/api/plugins/@docusaurus/plugin-content-blog).
9 |
10 | Simply add Markdown files (or folders) to the `blog` directory.
11 |
12 | Regular blog authors can be added to `authors.yml`.
13 |
14 | The blog post date can be extracted from filenames, such as:
15 |
16 | - `2019-05-30-welcome.md`
17 | - `2019-05-30-welcome/index.md`
18 |
19 | A blog post folder can be convenient to co-locate blog post images:
20 |
21 | 
22 |
23 | The blog supports tags as well!
24 |
25 | **And if you don't want a blog**: just delete this directory, and use `blog: false` in your Docusaurus config.
26 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/chroma/fromDocs.ts:
--------------------------------------------------------------------------------
1 | import { Chroma } from "langchain/vectorstores/chroma";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 | import { TextLoader } from "langchain/document_loaders/fs/text";
4 |
5 | // Create docs with a loader
6 | const loader = new TextLoader("src/document_loaders/example_data/example.txt");
7 | const docs = await loader.load();
8 |
9 | // Create vector store and index the docs
10 | const vectorStore = await Chroma.fromDocuments(docs, new OpenAIEmbeddings(), {
11 | collectionName: "a-test-collection",
12 | });
13 |
14 | // Search for the most similar document
15 | const response = await vectorStore.similaritySearch("hello", 1);
16 |
17 | console.log(response);
18 | /*
19 | [
20 | Document {
21 | pageContent: 'Foo\nBar\nBaz\n\n',
22 | metadata: { source: 'src/document_loaders/example_data/example.txt' }
23 | }
24 | ]
25 | */
26 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/faiss_saveload.ts:
--------------------------------------------------------------------------------
1 | import { FaissStore } from "langchain/vectorstores/faiss";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 |
4 | // Create a vector store through any method, here from texts as an example
5 | const vectorStore = await FaissStore.fromTexts(
6 | ["Hello world", "Bye bye", "hello nice world"],
7 | [{ id: 2 }, { id: 1 }, { id: 3 }],
8 | new OpenAIEmbeddings()
9 | );
10 |
11 | // Save the vector store to a directory
12 | const directory = "your/directory/here";
13 |
14 | await vectorStore.save(directory);
15 |
16 | // Load the vector store from the same directory
17 | const loadedVectorStore = await FaissStore.load(
18 | directory,
19 | new OpenAIEmbeddings()
20 | );
21 |
22 | // vectorStore and loadedVectorStore are identical
23 | const result = await loadedVectorStore.similaritySearch("hello world", 1);
24 | console.log(result);
25 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/myscale_search.ts:
--------------------------------------------------------------------------------
1 | import { MyScaleStore } from "langchain/vectorstores/myscale";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 |
4 | const vectorStore = await MyScaleStore.fromExistingIndex(
5 | new OpenAIEmbeddings(),
6 | {
7 | host: process.env.MYSCALE_HOST || "localhost",
8 | port: process.env.MYSCALE_PORT || "8443",
9 | username: process.env.MYSCALE_USERNAME || "username",
10 | password: process.env.MYSCALE_PASSWORD || "password",
11 | database: "your_database", // defaults to "default"
12 | table: "your_table", // defaults to "vector_table"
13 | }
14 | );
15 |
16 | const results = await vectorStore.similaritySearch("hello world", 1);
17 | console.log(results);
18 |
19 | const filteredResults = await vectorStore.similaritySearch("hello world", 1, {
20 | whereStr: "metadata.name = '1'",
21 | });
22 | console.log(filteredResults);
23 |
--------------------------------------------------------------------------------
/docs/modules/schema/chat-messages.md:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_position: 3
4 | ---
5 |
6 | # 聊天消息
7 |
8 | 终端用户与LLMs互动的主要界面是聊天界面。因此,一些模型提供商已经开始以期望聊天消息的方式提供对底层API的访问。这些消息具有内容字段(通常是文本),并与用户(或角色)相关联。当前支持的用户有System, Human,和AI。
9 |
10 | ## SystemChatMessage
11 |
12 | 表示应为AI系统提供说明的聊天消息。
13 |
14 | ```类型脚本
15 | import { SystemChatMessage } from "langchain/schema";
16 |
17 |
18 |
19 | new SystemChatMessage("You are a nice assistant");
20 |
21 | ```
22 |
23 |
24 | ## HumanChatMessage
25 |
26 | 表示来自与AI系统交互的人的信息的聊天消息。
27 |
28 | ```类型脚本
29 | import { HumanChatMessage } from "langchain/schema";
30 |
31 |
32 |
33 | new HumanChatMessage("Hello, how are you?");
34 |
35 | ```
36 |
37 |
38 | ## AIChatMessage
39 |
40 | 表示来自AI系统的消息的聊天消息。
41 |
42 | ```typescript
43 |
44 | import { AIChatMessage } from "langchain/schema";
45 |
46 |
47 |
48 | new AIChatMessage("I am doing well, thank you!");
49 |
50 | ```
51 |
52 |
--------------------------------------------------------------------------------
/docs/use_cases/autonomous_agents/baby_agi.mdx:
--------------------------------------------------------------------------------
1 | import CodeBlock from "@theme/CodeBlock";
2 |
3 |
4 | # BabyAGI
5 |
6 | :::info
7 | 原始GitHub链接: https://github.com/yoheinakajima/babyagi
8 | :::
9 |
10 | BabyAGI由三个组成部分构成:
11 |
12 | - 一个创建任务的链
13 | - 一个负责优先处理任务的链
14 | - 一个执行任务的链
15 |
16 | 这些链按顺序执行,直到任务列表为空或达到最大迭代次数。
17 |
18 | ## 简单示例
19 |
20 | import SimpleExample from "!!raw-loader!@examples/experimental/babyagi/weather.ts";
21 |
22 |
23 | 在这个示例中,我们直接使用BabyAGI没有任何工具。您会发现成功创建了任务列表,但在执行任务时我们没有得到具体结果。这是因为我们没有为BabyAGI提供任何工具。在下一个示例中,我们将看到如何做到这一点。
24 |
25 | {SimpleExample}
26 |
27 |
28 | ## 带工具的示例
29 |
30 | import ToolsExample from "!!raw-loader!@examples/experimental/babyagi/weather_with_tools.ts";
31 |
32 |
33 | 在这个示例中,我们用一个带有搜索工具的自定义代理替换了执行链。这使得BabyAGI能够在执行任务时使用真实世界的数据,从而使其更加强大。您可以添加其他工具来增强BabyAGI的能力。
34 |
35 |
36 | {ToolsExample}
37 |
38 |
--------------------------------------------------------------------------------
/docs/modules/indexes/retrievers/vespa-retriever.mdx:
--------------------------------------------------------------------------------
1 | # Vespa Retriever
2 |
3 | 展示如何使用Vespa.ai作为LangChain检索器。
4 | Vespa.ai是用于高效结构化文本和向量搜索的平台。
5 | 请参阅[Vespa.ai](https://vespa.ai)获取更多信息。
6 |
7 | 以下设置了一个从Vespa文档搜索中获取结果的检索器:
8 |
9 | import CodeBlock from "@theme/CodeBlock";
10 |
11 | import Example from "!!raw-loader!@examples/retrievers/vespa.ts";
12 |
13 |
14 | {Example}
15 |
16 |
17 | 此处,检索了"段落"文档类型中"内容"字段的最多5个结果,
18 | 使用"documentation"作为排名方法。"userQuery()"被实际查询替换
19 |
20 | 请参阅[pyvespa文档](https://pyvespa.readthedocs.io/en/latest/getting-started-pyvespa.html#Query)
21 | 获取更多信息。
22 |
23 | URL是Vespa应用程序的终端点。
24 | 您可以连接任何Vespa终节点,远程服务或使用Docker本地实例。
25 |
26 | 然而,大多数Vespa Cloud实例都受到mTLS保护。
27 | 如果您的情况是这样的,您可以,例如设置[CloudFlare Worker](https://cloud.vespa.ai/en/security/cloudflare-workers)
28 | 其中包含连接到该实例所需的凭据。
29 |
30 |
31 |
32 | Now you can return the results and continue using them in LangChain.
33 |
34 |
--------------------------------------------------------------------------------
/docs/modules/models/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_position: 2
3 | hide_table_of_contents: true
4 | sidebar_label: 模型
5 | ---
6 |
7 | import DocCardList from "@theme/DocCardList";
8 |
9 |
10 | # 模型
11 |
12 | :::info
13 | [概念指南](https://docs.langchain.com/docs/components/models/)
14 | :::
15 |
16 | 模型是LangChain的核心组件。LangChain不是模型的提供者,而是提供标准接口,通过该接口您可以与各种语言模型进行交互。
17 | LangChain支持文本模型(LLMs),聊天模型和文本嵌入模型。
18 |
19 | LLMs使用文本输入和输出,而聊天模型使用消息输入和输出。
20 |
21 | > **_注意:_** 聊天模型API还比较新,因此我们还在找出正确的抽象。如果您有任何反馈,请告诉我们!
22 |
23 | ## 所有模型
24 |
25 |
26 |
27 |
28 | ## 高级
29 |
30 | _本节面向想要更深入技术了解LangChain工作原理的用户。如果您刚开始使用,请跳过本节。_
31 |
32 | LLMs和聊天模型都基于`BaseLanguageModel`类构建。该类为所有模型提供了公共接口,并允许我们在不改变其余代码的情况下轻松切换模型。
33 |
34 | `BaseLanguageModel`类具有两个抽象方法`generatePrompt`和`getNumTokens`,分别由`BaseChatModel`和`BaseLLM`实现。
35 |
36 | `BaseLLM` 是 `BaseLanguageModel` 的子类,为 LLM(Large Language Model)提供了一个通用的接口,而 `BaseChatModel` 是 `BaseLanguageModel` 的子类,为聊天模型提供了一个通用的接口。
--------------------------------------------------------------------------------
/examples/src/models/chat/integration_googlevertexai-examples.ts:
--------------------------------------------------------------------------------
1 | import { ChatGoogleVertexAI } from "langchain/chat_models/googlevertexai";
2 | import {
3 | AIChatMessage,
4 | HumanChatMessage,
5 | SystemChatMessage,
6 | } from "langchain/schema";
7 |
8 | export const run = async () => {
9 | const examples = [
10 | {
11 | input: new HumanChatMessage("What is your favorite sock color?"),
12 | output: new AIChatMessage("My favorite sock color be arrrr-ange!"),
13 | },
14 | ];
15 | const model = new ChatGoogleVertexAI({
16 | temperature: 0.7,
17 | examples,
18 | });
19 | const questions = [
20 | new SystemChatMessage(
21 | "You are a funny assistant that answers in pirate language."
22 | ),
23 | new HumanChatMessage("What is your favorite food?"),
24 | ];
25 | // You can also use the model as part of a chain
26 | const res = await model.call(questions);
27 | console.log({ res });
28 | };
29 |
--------------------------------------------------------------------------------
/examples/src/chains/llm_chain.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { PromptTemplate } from "langchain/prompts";
3 | import { LLMChain } from "langchain/chains";
4 |
5 | // We can construct an LLMChain from a PromptTemplate and an LLM.
6 | const model = new OpenAI({ temperature: 0 });
7 | const prompt = PromptTemplate.fromTemplate(
8 | "What is a good name for a company that makes {product}?"
9 | );
10 | const chainA = new LLMChain({ llm: model, prompt });
11 |
12 | // The result is an object with a `text` property.
13 | const resA = await chainA.call({ product: "colorful socks" });
14 | console.log({ resA });
15 | // { resA: { text: '\n\nSocktastic!' } }
16 |
17 | // Since the LLMChain is a single-input, single-output chain, we can also `run` it.
18 | // This takes in a string and returns the `text` property.
19 | const resA2 = await chainA.run("colorful socks");
20 | console.log({ resA2 });
21 | // { resA2: '\n\nSocktastic!' }
22 |
--------------------------------------------------------------------------------
/examples/src/chains/sql_db.ts:
--------------------------------------------------------------------------------
1 | import { DataSource } from "typeorm";
2 | import { OpenAI } from "langchain/llms/openai";
3 | import { SqlDatabase } from "langchain/sql_db";
4 | import { SqlDatabaseChain } from "langchain/chains";
5 |
6 | /**
7 | * This example uses Chinook database, which is a sample database available for SQL Server, Oracle, MySQL, etc.
8 | * To set it up follow the instructions on https://database.guide/2-sample-databases-sqlite/, placing the .db file
9 | * in the examples folder.
10 | */
11 | const datasource = new DataSource({
12 | type: "sqlite",
13 | database: "Chinook.db",
14 | });
15 |
16 | const db = await SqlDatabase.fromDataSourceParams({
17 | appDataSource: datasource,
18 | });
19 |
20 | const chain = new SqlDatabaseChain({
21 | llm: new OpenAI({ temperature: 0 }),
22 | database: db,
23 | });
24 |
25 | const res = await chain.run("How many tracks are there?");
26 | console.log(res);
27 | // There are 3503 tracks.
28 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/qdrant/fromDocs.ts:
--------------------------------------------------------------------------------
1 | import { QdrantVectorStore } from "langchain/vectorstores/qdrant";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 | import { TextLoader } from "langchain/document_loaders/fs/text";
4 |
5 | // Create docs with a loader
6 | const loader = new TextLoader("src/document_loaders/example_data/example.txt");
7 | const docs = await loader.load();
8 |
9 | const vectorStore = await QdrantVectorStore.fromDocuments(
10 | docs,
11 | new OpenAIEmbeddings(),
12 | {
13 | url: process.env.QDRANT_URL,
14 | collectionName: "a_test_collection",
15 | }
16 | );
17 |
18 | // Search for the most similar document
19 | const response = await vectorStore.similaritySearch("hello", 1);
20 |
21 | console.log(response);
22 | /*
23 | [
24 | Document {
25 | pageContent: 'Foo\nBar\nBaz\n\n',
26 | metadata: { source: 'src/document_loaders/example_data/example.txt' }
27 | }
28 | ]
29 | */
30 |
--------------------------------------------------------------------------------
/examples/src/agents/zapier_mrkl.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { ZapierNLAWrapper } from "langchain/tools";
3 | import {
4 | initializeAgentExecutorWithOptions,
5 | ZapierToolKit,
6 | } from "langchain/agents";
7 |
8 | const model = new OpenAI({ temperature: 0 });
9 | const zapier = new ZapierNLAWrapper();
10 | const toolkit = await ZapierToolKit.fromZapierNLAWrapper(zapier);
11 |
12 | const executor = await initializeAgentExecutorWithOptions(
13 | toolkit.tools,
14 | model,
15 | {
16 | agentType: "zero-shot-react-description",
17 | verbose: true,
18 | }
19 | );
20 | console.log("Loaded agent.");
21 |
22 | const input = `Summarize the last email I received regarding Silicon Valley Bank. Send the summary to the #test-zapier Slack channel.`;
23 |
24 | console.log(`Executing with input "${input}"...`);
25 |
26 | const result = await executor.call({ input });
27 |
28 | console.log(`Got output ${result.output}`);
29 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/myscale_fromTexts.ts:
--------------------------------------------------------------------------------
1 | import { MyScaleStore } from "langchain/vectorstores/myscale";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 |
4 | const vectorStore = await MyScaleStore.fromTexts(
5 | ["Hello world", "Bye bye", "hello nice world"],
6 | [
7 | { id: 2, name: "2" },
8 | { id: 1, name: "1" },
9 | { id: 3, name: "3" },
10 | ],
11 | new OpenAIEmbeddings(),
12 | {
13 | host: process.env.MYSCALE_HOST || "localhost",
14 | port: process.env.MYSCALE_PORT || "8443",
15 | username: process.env.MYSCALE_USERNAME || "username",
16 | password: process.env.MYSCALE_PASSWORD || "password",
17 | }
18 | );
19 |
20 | const results = await vectorStore.similaritySearch("hello world", 1);
21 | console.log(results);
22 |
23 | const filteredResults = await vectorStore.similaritySearch("hello world", 1, {
24 | whereStr: "metadata.name = '1'",
25 | });
26 | console.log(filteredResults);
27 |
--------------------------------------------------------------------------------
/examples/src/callbacks/console_handler.ts:
--------------------------------------------------------------------------------
1 | import { ConsoleCallbackHandler } from "langchain/callbacks";
2 | import { LLMChain } from "langchain/chains";
3 | import { OpenAI } from "langchain/llms/openai";
4 | import { PromptTemplate } from "langchain/prompts";
5 |
6 | export const run = async () => {
7 | const handler = new ConsoleCallbackHandler();
8 | const llm = new OpenAI({ temperature: 0, callbacks: [handler] });
9 | const prompt = PromptTemplate.fromTemplate("1 + {number} =");
10 | const chain = new LLMChain({ prompt, llm, callbacks: [handler] });
11 |
12 | const output = await chain.call({ number: 2 });
13 | /*
14 | Entering new llm_chain chain...
15 | Finished chain.
16 | */
17 |
18 | console.log(output);
19 | /*
20 | { text: ' 3\n\n3 - 1 = 2' }
21 | */
22 |
23 | // The non-enumerable key `__run` contains the runId.
24 | console.log(output.__run);
25 | /*
26 | { runId: '90e1f42c-7cb4-484c-bf7a-70b73ef8e64b' }
27 | */
28 | };
29 |
--------------------------------------------------------------------------------
/examples/src/chat/llm_chain.ts:
--------------------------------------------------------------------------------
1 | import { LLMChain } from "langchain/chains";
2 | import { ChatOpenAI } from "langchain/chat_models/openai";
3 | import {
4 | ChatPromptTemplate,
5 | HumanMessagePromptTemplate,
6 | SystemMessagePromptTemplate,
7 | } from "langchain/prompts";
8 |
9 | export const run = async () => {
10 | const chat = new ChatOpenAI({ temperature: 0 });
11 |
12 | const chatPrompt = ChatPromptTemplate.fromPromptMessages([
13 | SystemMessagePromptTemplate.fromTemplate(
14 | "You are a helpful assistant that translates {input_language} to {output_language}."
15 | ),
16 | HumanMessagePromptTemplate.fromTemplate("{text}"),
17 | ]);
18 |
19 | const chain = new LLMChain({
20 | prompt: chatPrompt,
21 | llm: chat,
22 | });
23 |
24 | const response = await chain.call({
25 | input_language: "English",
26 | output_language: "French",
27 | text: "I love programming.",
28 | });
29 |
30 | console.log(response);
31 | };
32 |
--------------------------------------------------------------------------------
/examples/src/memory/buffer.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { BufferMemory } from "langchain/memory";
3 | import { LLMChain } from "langchain/chains";
4 | import { PromptTemplate } from "langchain/prompts";
5 |
6 | const memory = new BufferMemory({ memoryKey: "chat_history" });
7 | const model = new OpenAI({ temperature: 0.9 });
8 | const prompt =
9 | PromptTemplate.fromTemplate(`The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
10 |
11 | Current conversation:
12 | {chat_history}
13 | Human: {input}
14 | AI:`);
15 | const chain = new LLMChain({ llm: model, prompt, memory });
16 |
17 | const res1 = await chain.call({ input: "Hi! I'm Jim." });
18 | console.log({ res1 });
19 |
20 | const res2 = await chain.call({ input: "What's my name?" });
21 | console.log({ res2 });
22 |
--------------------------------------------------------------------------------
/docs/modules/agents/tools/zapier_agent.mdx:
--------------------------------------------------------------------------------
1 | import CodeBlock from "@theme/CodeBlock";
2 |
3 |
4 | # Zapier NLA集成代理
5 |
6 | 完整文档在此处 : https://nla.zapier.com/api/v1/dynamic/docs
7 |
8 | ** Zapier自然语言操作**通过自然语言API界面让您访问Zapier平台上的5k+应用程序和20k+操作。
9 |
10 | NLA支持的应用包括 Gmail, Salesforce, Trello, Slack, Asana, HubSpot, Google Sheets, Microsoft Teams,以及数千个更多的应用程序:https://zapier.com/apps
11 |
12 | Zapier NLA处理所有底层API身份验证和自然语言翻译-->底层API调用-->返回简化的LLM输出。关键思想是您或您的用户通过类似于oauth的设置窗口公开一组操作,然后通过REST API进行查询和执行。
13 |
14 | NLA为签署NLA API请求提供API密钥和OAuth两种方式。
15 |
16 | 服务器端(API密钥): 用于快速入门,测试以及仅使用开发人员Zapier账户中公开的操作(将在Zapier.com上使用开发人员的连接的帐户)的生产场景
17 |
18 | 用户界面(Oauth):面向生产场景,您正在部署面向终端用户的应用程序,LangChain需要访问终端用户公开的操作和在zapier.com上连接的帐户
19 |
20 | 此快速入门将关注用于简洁性的服务器端用例。查看完整文档或联系nla@zapier.com以获取用户界面oauth开发人员支持。
21 |
22 | 下面的示例演示如何将Zapier集成作为代理:
23 |
24 | import Example from "!!raw-loader!@examples/agents/zapier_mrkl.ts";
25 |
26 |
27 |
28 | {Example}
29 |
30 |
--------------------------------------------------------------------------------
/examples/src/indexes/python_text_splitter.ts:
--------------------------------------------------------------------------------
1 | import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";
2 |
3 | const pythonCode = `def hello_world():
4 | print("Hello, World!")
5 | # Call the function
6 | hello_world()`;
7 |
8 | const splitter = RecursiveCharacterTextSplitter.fromLanguage("python", {
9 | chunkSize: 32,
10 | chunkOverlap: 0,
11 | });
12 |
13 | const pythonOutput = await splitter.createDocuments([pythonCode]);
14 |
15 | console.log(pythonOutput);
16 |
17 | /*
18 | [
19 | Document {
20 | pageContent: 'def hello_world():',
21 | metadata: { loc: [Object] }
22 | },
23 | Document {
24 | pageContent: 'print("Hello, World!")',
25 | metadata: { loc: [Object] }
26 | },
27 | Document {
28 | pageContent: '# Call the function',
29 | metadata: { loc: [Object] }
30 | },
31 | Document {
32 | pageContent: 'hello_world()',
33 | metadata: { loc: [Object] }
34 | }
35 | ]
36 | */
37 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/singlestore.ts:
--------------------------------------------------------------------------------
1 | import { SingleStoreVectorStore } from "langchain/vectorstores/singlestore";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 | import { createPool } from "mysql2/promise";
4 |
5 | export const run = async () => {
6 | const pool = createPool({
7 | host: process.env.SINGLESTORE_HOST,
8 | port: Number(process.env.SINGLESTORE_PORT),
9 | user: process.env.SINGLESTORE_USERNAME,
10 | password: process.env.SINGLESTORE_PASSWORD,
11 | database: process.env.SINGLESTORE_DATABASE,
12 | });
13 |
14 | const vectorStore = await SingleStoreVectorStore.fromTexts(
15 | ["Hello world", "Bye bye", "hello nice world"],
16 | [{ id: 2 }, { id: 1 }, { id: 3 }],
17 | new OpenAIEmbeddings(),
18 | {
19 | connectionPool: pool,
20 | }
21 | );
22 |
23 | const resultOne = await vectorStore.similaritySearch("hello world", 1);
24 | console.log(resultOne);
25 | await pool.end();
26 | };
27 |
--------------------------------------------------------------------------------
/examples/src/chains/advanced_subclass.ts:
--------------------------------------------------------------------------------
1 | import { CallbackManagerForChainRun } from "langchain/callbacks";
2 | import { BaseChain as _ } from "langchain/chains";
3 | import { BaseMemory } from "langchain/memory";
4 | import { ChainValues } from "langchain/schema";
5 |
6 | abstract class BaseChain {
7 | memory?: BaseMemory;
8 |
9 | /**
10 | * Run the core logic of this chain and return the output
11 | */
12 | abstract _call(
13 | values: ChainValues,
14 | runManager?: CallbackManagerForChainRun
15 | ): Promise;
16 |
17 | /**
18 | * Return the string type key uniquely identifying this class of chain.
19 | */
20 | abstract _chainType(): string;
21 |
22 | /**
23 | * Return the list of input keys this chain expects to receive when called.
24 | */
25 | abstract get inputKeys(): string[];
26 |
27 | /**
28 | * Return the list of output keys this chain will produce when called.
29 | */
30 | abstract get outputKeys(): string[];
31 | }
32 |
--------------------------------------------------------------------------------
/examples/src/llms/googlevertexai.ts:
--------------------------------------------------------------------------------
1 | import { GoogleVertexAI } from "langchain/llms/googlevertexai";
2 |
3 | /*
4 | * Before running this, you should make sure you have created a
5 | * Google Cloud Project that is permitted to the Vertex AI API.
6 | *
7 | * You will also need permission to access this project / API.
8 | * Typically, this is done in one of three ways:
9 | * - You are logged into an account permitted to that project.
10 | * - You are running this on a machine using a service account permitted to
11 | * the project.
12 | * - The `GOOGLE_APPLICATION_CREDENTIALS` environment variable is set to the
13 | * path of a credentials file for a service account permitted to the project.
14 | */
15 | export const run = async () => {
16 | const model = new GoogleVertexAI({
17 | temperature: 0.7,
18 | });
19 | const res = await model.call(
20 | "What would be a good company name a company that makes colorful socks?"
21 | );
22 | console.log({ res });
23 | };
24 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/file_loaders/epub.md:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_label: EPUB文件
3 | ---
4 |
5 | # EPUB文件
6 |
7 | 本例演示如何从EPUB文件中加载数据。默认情况下,每个章节会创建一个文档,您可以通过将“splitChapters”选项设置为“false”来更改此行为。
8 |
9 | # 设置
10 |
11 | ```bash npm2yarn
12 | npm install epub2 html-to-text
13 |
14 | ```
15 |
16 |
17 | # 用法:每章一个文档
18 |
19 | ```typescript
20 | import { EPubLoader } from "langchain/document_loaders/fs/epub";
21 |
22 |
23 |
24 | const loader = new EPubLoader("src/document_loaders/example_data/example.epub");
25 |
26 |
27 |
28 | const docs = await loader.load();
29 |
30 | ```
31 |
32 |
33 | # 用法:每个文件一个文档
34 |
35 | ```typescript
36 |
37 | import { EPubLoader } from "langchain/document_loaders/fs/epub";
38 |
39 |
40 |
41 | const loader = new EPubLoader(
42 |
43 | "src/document_loaders/example_data/example.epub",
44 |
45 | {
46 |
47 | splitChapters: false,
48 |
49 | }
50 |
51 | );
52 |
53 |
54 |
55 | const docs = await loader.load();
56 |
57 | ```
58 |
59 |
--------------------------------------------------------------------------------
/examples/src/retrievers/supabase_hybrid.ts:
--------------------------------------------------------------------------------
1 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
2 | import { createClient } from "@supabase/supabase-js";
3 | import { SupabaseHybridSearch } from "langchain/retrievers/supabase";
4 |
5 | export const run = async () => {
6 | const client = createClient(
7 | process.env.SUPABASE_URL || "",
8 | process.env.SUPABASE_PRIVATE_KEY || ""
9 | );
10 |
11 | const embeddings = new OpenAIEmbeddings();
12 |
13 | const retriever = new SupabaseHybridSearch(embeddings, {
14 | client,
15 | // Below are the defaults, expecting that you set up your supabase table and functions according to the guide above. Please change if necessary.
16 | similarityK: 2,
17 | keywordK: 2,
18 | tableName: "documents",
19 | similarityQueryName: "match_documents",
20 | keywordQueryName: "kw_match_documents",
21 | });
22 |
23 | const results = await retriever.getRelevantDocuments("hello bye");
24 |
25 | console.log(results);
26 | };
27 |
--------------------------------------------------------------------------------
/examples/src/memory/zep.ts:
--------------------------------------------------------------------------------
1 | import { ChatOpenAI } from "langchain/chat_models/openai";
2 | import { ConversationChain } from "langchain/chains";
3 | import { ZepMemory } from "langchain/memory/zep";
4 |
5 | const sessionId = "TestSession1234";
6 | const zepURL = "http://localhost:8000";
7 |
8 | const memory = new ZepMemory({
9 | sessionId,
10 | baseURL: zepURL,
11 | });
12 |
13 | const model = new ChatOpenAI({
14 | modelName: "gpt-3.5-turbo",
15 | temperature: 0,
16 | });
17 |
18 | const chain = new ConversationChain({ llm: model, memory });
19 |
20 | const res1 = await chain.call({ input: "Hi! I'm Jim." });
21 | console.log({ res1 });
22 | /*
23 | {
24 | res1: {
25 | text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
26 | }
27 | }
28 | */
29 |
30 | const res2 = await chain.call({ input: "What did I just say my name was?" });
31 | console.log({ res2 });
32 |
33 | /*
34 | {
35 | res1: {
36 | text: "You said your name was Jim."
37 | }
38 | }
39 | */
40 |
--------------------------------------------------------------------------------
/src/theme/SearchBar.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) Meta Platforms, Inc. and affiliates.
3 | *
4 | * This source code is licensed under the MIT license found in the
5 | * LICENSE file in the root directory of this source tree.
6 | *
7 | * @format
8 | */
9 | import React from "react";
10 | import { MendableSearchBar } from "@mendable/search";
11 | import useDocusaurusContext from "@docusaurus/useDocusaurusContext";
12 |
13 | export default function SearchBarWrapper() {
14 | const {
15 | siteConfig: { customFields },
16 | } = useDocusaurusContext();
17 | return (
18 |
19 |
27 |
28 | );
29 | }
30 |
--------------------------------------------------------------------------------
/docs/modules/indexes/vector_stores/integrations/weaviate.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | import CodeBlock from "@theme/CodeBlock";
6 |
7 |
8 | # Weaviate
9 |
10 | Weaviate是一个开源的向量数据库,可以存储对象和向量,使向量搜索与结构化过滤相结合。LangChain通过`weaviate-ts-client`软件包连接到Weaviate,这是官方的Typescript客户端。
11 |
12 | LangChain直接将向量插入Weaviate并查询给定向量的最近邻,因此您可以使用所有LangChain Embeddings与Weaviate的集成。
13 |
14 | ## 设置
15 |
16 | ```bash npm2yarn
17 | npm install weaviate-ts-client graphql
18 |
19 | ```
20 |
21 |
22 | 您需要在本地或服务器上运行Weaviate,请参阅[Weaviate文档](https://weaviate.io/developers/weaviate/installation)获取更多信息。
23 |
24 | ## 用法:插入文档
25 |
26 | import InsertExample from "!!raw-loader!@examples/indexes/vector_stores/weaviate_fromTexts.ts";
27 |
28 |
29 | {InsertExample}
30 |
31 |
32 | ## 用法:查询文档
33 |
34 | import QueryExample from "!!raw-loader!@examples/indexes/vector_stores/weaviate_search.ts";
35 |
36 |
37 |
38 | {QueryExample}
39 |
40 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/pdf_directory.ts:
--------------------------------------------------------------------------------
1 | import { DirectoryLoader } from "langchain/document_loaders/fs/directory";
2 | import { PDFLoader } from "langchain/document_loaders/fs/pdf";
3 | import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";
4 |
5 | export const run = async () => {
6 | /* Load all PDFs within the specified directory */
7 | const directoryLoader = new DirectoryLoader(
8 | "src/document_loaders/example_data/",
9 | {
10 | ".pdf": (path: string) => new PDFLoader(path),
11 | }
12 | );
13 |
14 | const docs = await directoryLoader.load();
15 |
16 | console.log({ docs });
17 |
18 | /* Additional steps : Split text into chunks with any TextSplitter. You can then use it as context or save it to memory afterwards. */
19 | const textSplitter = new RecursiveCharacterTextSplitter({
20 | chunkSize: 1000,
21 | chunkOverlap: 200,
22 | });
23 |
24 | const splitDocs = await textSplitter.splitDocuments(docs);
25 | console.log({ splitDocs });
26 | };
27 |
--------------------------------------------------------------------------------
/examples/src/chains/llm_chain_chat.ts:
--------------------------------------------------------------------------------
1 | import {
2 | ChatPromptTemplate,
3 | HumanMessagePromptTemplate,
4 | SystemMessagePromptTemplate,
5 | } from "langchain/prompts";
6 | import { LLMChain } from "langchain/chains";
7 | import { ChatOpenAI } from "langchain/chat_models/openai";
8 |
9 | // We can also construct an LLMChain from a ChatPromptTemplate and a chat model.
10 | const chat = new ChatOpenAI({ temperature: 0 });
11 | const chatPrompt = ChatPromptTemplate.fromPromptMessages([
12 | SystemMessagePromptTemplate.fromTemplate(
13 | "You are a helpful assistant that translates {input_language} to {output_language}."
14 | ),
15 | HumanMessagePromptTemplate.fromTemplate("{text}"),
16 | ]);
17 | const chainB = new LLMChain({
18 | prompt: chatPrompt,
19 | llm: chat,
20 | });
21 |
22 | const resB = await chainB.call({
23 | input_language: "English",
24 | output_language: "French",
25 | text: "I love programming.",
26 | });
27 | console.log({ resB });
28 | // { resB: { text: "J'adore la programmation." } }
29 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/web_loaders/github.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_label: 仅限于节点
3 | hide_table_of_contents: true
4 | ---
5 |
6 |
7 | # GitHub
8 |
9 |
10 | 本示例介绍了如何从 GitHub 存储库加载数据。
11 | 您可以将 `GITHUB_ACCESS_TOKEN` 环境变量设置为 GitHub 访问令牌,以增加速率限制和访问私有存储库。
12 |
13 |
14 | ## 设置
15 |
16 |
17 | GitHub 加载器需要 [ignore npm package](https://www.npmjs.com/package/ignore) 作为同等依赖项。可以像这样安装它
18 |
19 |
20 | ```bash npm2yarn
21 | npm install ignore
22 |
23 | ```
24 |
25 |
26 |
27 | ## 用法
28 |
29 |
30 | import CodeBlock from "@theme/CodeBlock";
31 |
32 | import Example from "!!raw-loader!@examples/document_loaders/github.ts";
33 |
34 |
35 |
36 | {Example}
37 |
38 |
39 |
40 | 加载器将忽略像图像这样的二进制文件。
41 |
42 |
43 | ### 使用 .gitignore 语法
44 |
45 |
46 | 要忽略特定文件,您可以将 `ignorePaths` 数组传递到构造函数中
47 |
48 |
49 | import IgnoreExample from "!!raw-loader!@examples/document_loaders/github_ignore_paths.ts";
50 |
51 |
52 |
53 | {IgnoreExample}
54 |
55 |
--------------------------------------------------------------------------------
/docs/modules/indexes/vector_stores/integrations/chroma.mdx:
--------------------------------------------------------------------------------
1 | import CodeBlock from "@theme/CodeBlock";
2 |
3 | 换行符
4 | # Chroma(嵌入式的开源Apache 2.0数据库)
5 |
6 |
7 | Chroma是一个开源的Apache 2.0嵌入式数据库。
8 |
9 |
10 | ## 设置
11 |
12 |
13 | 1. 在计算机上使用Docker运行Chroma [文档](https://docs.trychroma.com/api-reference)
14 | 2. 安装Chroma JS SDK。
15 |
16 |
17 | ```bash npm2yarn
18 | npm install -S chromadb
19 |
20 | ```
21 |
22 |
23 |
24 | ## 使用,索引和查询文档
25 |
26 |
27 | import FromDocs from "!!raw-loader!@examples/indexes/vector_stores/chroma/fromDocs.ts";
28 |
29 |
30 |
31 | {FromDocs}
32 |
33 |
34 |
35 | ## 使用,索引和查询文本
36 |
37 |
38 | import FromTexts from "!!raw-loader!@examples/indexes/vector_stores/chroma/fromTexts.ts";
39 |
40 |
41 |
42 | {FromTexts}
43 |
44 |
45 |
46 | ## 使用,从现有集合查询文档
47 |
48 |
49 | import Search from "!!raw-loader!@examples/indexes/vector_stores/chroma/search.ts";
50 |
51 |
52 |
53 | {Search}
54 |
55 |
--------------------------------------------------------------------------------
/docs/modules/indexes/vector_stores/integrations/myscale.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_class_name: 仅限节点
3 | ---
4 |
5 | import CodeBlock from "@theme/CodeBlock";
6 |
7 |
8 | # MyScale
9 |
10 | :::info 兼容性
11 | 仅在Node.js上可用。
12 | :::
13 |
14 | [MyScale](https://myscale.com/)是一款新兴的人工智能数据库,将向量搜索和SQL分析的强大功能相结合,提供管理、高效、响应迅速的体验。
15 |
16 | ## 安装
17 |
18 | 1. 通过[MyScale Web控制台](https://console.myscale.com/)启动集群。更多信息请参见[MyScale官方文档](https://docs.myscale.com/zh-CN/quickstart/)。
19 | 2. 启动集群后,请从集群的“操作”菜单中查看您的“连接详细信息”。您需要主机名、端口、用户名和密码。
20 | 3. 在您的工作区中安装所需的Node.js版本。
21 |
22 | ```bash npm2yarn
23 | npm install -S @clickhouse/client
24 |
25 | ```
26 |
27 |
28 | ## 索引与查询文档
29 |
30 | import InsertExample from "!!raw-loader!@examples/indexes/vector_stores/myscale_fromTexts.ts";
31 |
32 |
33 | {InsertExample}
34 |
35 |
36 | ## 从现有集合中查询文档
37 |
38 | import SearchExample from "!!raw-loader!@examples/indexes/vector_stores/myscale_search.ts";
39 |
40 |
41 |
42 | {SearchExample}
43 |
44 |
--------------------------------------------------------------------------------
/docs/modules/indexes/vector_stores/integrations/memory.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_label: 内存
4 | sidebar_position: 1
5 | ---
6 |
7 | import CodeBlock from "@theme/CodeBlock";
8 |
9 |
10 | # `MemoryVectorStore`
11 |
12 | MemoryVectorStore是一个内存中的暂存向量存储器,用于在内存中存储嵌入,并做精确的线性搜索以找到最相似的嵌入。默认的相似度度量是余弦相似度,但可以更改为[ml-distance](https://mljs.github.io/distance/modules/similarity.html)支持的任何相似度度量方式。
13 |
14 | ## 用法
15 |
16 | ### 从文本创建新索引
17 |
18 | import ExampleTexts from "!!raw-loader!@examples/indexes/vector_stores/memory.ts";
19 |
20 |
21 | {ExampleTexts}
22 |
23 |
24 | ### 从加载程序创建新索引
25 |
26 | import ExampleLoader from "!!raw-loader!@examples/indexes/vector_stores/memory_fromdocs.ts";
27 |
28 |
29 | {ExampleLoader}
30 |
31 |
32 | ### 使用自定义相似度度量
33 |
34 | import ExampleCustom from "!!raw-loader!@examples/indexes/vector_stores/memory_custom_similarity.ts";
35 |
36 |
37 |
38 | {ExampleCustom}
39 |
40 |
--------------------------------------------------------------------------------
/examples/src/models/chat/chat_streaming_stdout.ts:
--------------------------------------------------------------------------------
1 | import { ChatOpenAI } from "langchain/chat_models/openai";
2 | import { HumanChatMessage } from "langchain/schema";
3 |
4 | const chat = new ChatOpenAI({
5 | streaming: true,
6 | callbacks: [
7 | {
8 | handleLLMNewToken(token: string) {
9 | process.stdout.write(token);
10 | },
11 | },
12 | ],
13 | });
14 |
15 | await chat.call([
16 | new HumanChatMessage("Write me a song about sparkling water."),
17 | ]);
18 | /*
19 | Verse 1:
20 | Bubbles rise, crisp and clear
21 | Refreshing taste that brings us cheer
22 | Sparkling water, so light and pure
23 | Quenches our thirst, it's always secure
24 |
25 | Chorus:
26 | Sparkling water, oh how we love
27 | Its fizzy bubbles and grace above
28 | It's the perfect drink, anytime, anyplace
29 | Refreshing as it gives us a taste
30 |
31 | Verse 2:
32 | From morning brunch to evening feast
33 | It's the perfect drink for a treat
34 | A sip of it brings a smile so bright
35 | Our thirst is quenched in just one sip so light
36 | ...
37 | */
38 |
--------------------------------------------------------------------------------
/examples/src/retrievers/hyde.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 | import { MemoryVectorStore } from "langchain/vectorstores/memory";
4 | import { HydeRetriever } from "langchain/retrievers/hyde";
5 | import { Document } from "langchain/document";
6 |
7 | const embeddings = new OpenAIEmbeddings();
8 | const vectorStore = new MemoryVectorStore(embeddings);
9 | const llm = new OpenAI();
10 | const retriever = new HydeRetriever({
11 | vectorStore,
12 | llm,
13 | k: 1,
14 | });
15 |
16 | await vectorStore.addDocuments(
17 | [
18 | "My name is John.",
19 | "My name is Bob.",
20 | "My favourite food is pizza.",
21 | "My favourite food is pasta.",
22 | ].map((pageContent) => new Document({ pageContent }))
23 | );
24 |
25 | const results = await retriever.getRelevantDocuments(
26 | "What is my favourite food?"
27 | );
28 |
29 | console.log(results);
30 | /*
31 | [
32 | Document { pageContent: 'My favourite food is pasta.', metadata: {} }
33 | ]
34 | */
35 |
--------------------------------------------------------------------------------
/docs/use_cases/question_answering.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_position: 2
4 | ---
5 |
6 | # 问答
7 |
8 | :::info
9 | [概念指南](https://docs.langchain.com/docs/use-cases/qa-docs)
10 | :::
11 |
12 | 在这个背景下的问答指的是针对文档数据的问答。
13 | 有几种不同类型的问答:
14 |
15 | - [检索式问答](../modules/chains/index_related_chains/retrieval_qa): 利用这种方式,可以将文档导入、索引到向量存储库中,然后能够对它们进行提问。
16 | - [交互检索](../modules/chains/index_related_chains/conversational_retrieval): 类似于上述方式,导入并索引文件,但是可以进行交谈(提出后续问题等)而不仅仅是一个问题。
17 |
18 | ## 索引
19 |
20 | 对于许多文档的问答,您几乎总是希望对数据创建索引。
21 | 这可用于智能访问给定问题的最相关文档,从而可以避免将所有文档传递给LLM(节省时间和金钱)。
22 |
23 | 因此,了解如何创建索引非常重要,因此您应该熟悉与此相关的所有文档。
24 |
25 | - [索引](../modules/indexes/)
26 |
27 | ## 链
28 |
29 | 创建索引后,可以在链中使用它。
30 | 您可以正常地对其进行问答,也可以以交互方式使用它。
31 | 有关这些链(以及更多内容)的概述,请参阅下面的文档。
32 |
33 | - [与索引相关的链](../modules/chains/index_related_chains/)
34 |
35 | ## 代理
36 |
37 |
38 | 如果你想回答更复杂的多跳问题,你应该考虑将你的索引与一个Agent组合使用。#multi-hop指多跳问题,#indexes指索引,#agent指代理。
39 | 有关如何操作的示例,请参见以下内容。#example指示例。
40 |
41 |
42 | - [Vectorstore Agent](../modules/agents/toolkits/vectorstore)
43 |
44 |
--------------------------------------------------------------------------------
/examples/src/models/chat/chat_streaming.ts:
--------------------------------------------------------------------------------
1 | import { ChatOpenAI } from "langchain/chat_models/openai";
2 | import { HumanChatMessage } from "langchain/schema";
3 |
4 | const chat = new ChatOpenAI({
5 | maxTokens: 25,
6 | streaming: true,
7 | });
8 |
9 | const response = await chat.call(
10 | [new HumanChatMessage("Tell me a joke.")],
11 | undefined,
12 | [
13 | {
14 | handleLLMNewToken(token: string) {
15 | console.log({ token });
16 | },
17 | },
18 | ]
19 | );
20 |
21 | console.log(response);
22 | // { token: '' }
23 | // { token: '\n\n' }
24 | // { token: 'Why' }
25 | // { token: ' don' }
26 | // { token: "'t" }
27 | // { token: ' scientists' }
28 | // { token: ' trust' }
29 | // { token: ' atoms' }
30 | // { token: '?\n\n' }
31 | // { token: 'Because' }
32 | // { token: ' they' }
33 | // { token: ' make' }
34 | // { token: ' up' }
35 | // { token: ' everything' }
36 | // { token: '.' }
37 | // { token: '' }
38 | // AIChatMessage {
39 | // text: "\n\nWhy don't scientists trust atoms?\n\nBecause they make up everything."
40 | // }
41 |
--------------------------------------------------------------------------------
/docs/modules/indexes/vector_stores/integrations/tigris.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_label: 仅限node
3 | ---
4 |
5 | import CodeBlock from "@theme/CodeBlock";
6 |
7 |
8 | # Tigris
9 |
10 | Tigris使向量嵌入的构建人工智能应用程序变得轻松。
11 | 它是一个完全托管的云原生数据库,允许您存储和索引文档和向量嵌入,以进行快速和可扩展的向量搜索。
12 |
13 | :::info 兼容性
14 | 仅在Node.js上可用。
15 | :::
16 |
17 | ## 安装
18 |
19 | ### 1. 安装Tigris SDK
20 |
21 | 按以下方式安装SDK
22 |
23 | ```bash npm2yarn
24 |
25 | npm install -S @tigrisdata/vector
26 |
27 | ```
28 |
29 |
30 | ### 2. 获取Tigris API凭据
31 |
32 | 您可以在[此处](https://console.preview.tigrisdata.cloud/signup)注册免费的Tigris帐户。
33 |
34 | 注册Tigris帐户后,创建名为`vectordemo`的新项目。
35 | 接下来,记录`clientId`和`clientSecret`,您可以从项目的应用程序密钥部分获取它们。
36 |
37 | ## 索引文档
38 |
39 |
40 | import FromDocs from "!!raw-loader!@examples/indexes/vector_stores/tigris/fromDocs.ts";
41 |
42 |
43 | {FromDocs}
44 |
45 |
46 | ## 查询文档
47 |
48 | import Search from "!!raw-loader!@examples/indexes/vector_stores/tigris/search.ts";
49 |
50 |
51 |
52 | {Search}
53 |
54 |
--------------------------------------------------------------------------------
/docs/modules/agents/tools/webbrowser.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | import CodeBlock from "@theme/CodeBlock";
6 |
7 |
8 | # 网络浏览器工具
9 |
10 | 网络浏览器工具为您的代理程序提供了访问网站和提取信息的功能。它向代理程序描述为:
11 |
12 | ```
13 | useful for when you need to find something on or summarize a webpage. input should be a comma separated list of "valid URL including protocol","what you want to find on the page or empty string for a summary".
14 |
15 | ```
16 |
17 |
18 | 它公开了两种操作模式:
19 |
20 | - 当代理程序仅使用URL调用时,它会生成网站内容的摘要
21 | - 当代理程序使用URL和要查找的描述来调用时,它将使用内存中的Vector Store查找最相关的片段并对其进行摘要
22 |
23 | ## 设置
24 |
25 | 要使用网络浏览器工具,您需要安装所有依赖项:
26 |
27 | ```bash npm2yarn
28 | npm install cheerio axios
29 |
30 | ```
31 |
32 |
33 | ## 使用, 独立
34 |
35 | import ToolExample from "!!raw-loader!@examples/tools/webbrowser.ts";
36 |
37 |
38 | {ToolExample}
39 |
40 |
41 | ## 使用, 在代理程序中
42 |
43 | import AgentExample from "!!raw-loader!@examples/agents/mrkl_browser.ts";
44 |
45 |
46 |
47 | {AgentExample}
48 |
49 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/web_loaders/gitbook.md:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # GitBook
6 |
7 | 本示例介绍如何使用 Cheerio 从任何 GitBook 中加载数据。将为每个页面创建一个文档。
8 |
9 | ## 设置
10 |
11 | ```bash npm2yarn
12 | npm install cheerio
13 |
14 | ```
15 |
16 |
17 | ## 从单个 GitBook 页面加载
18 |
19 | ```typescript
20 | import { GitbookLoader } from "langchain/document_loaders/web/gitbook";
21 |
22 |
23 |
24 | const loader = new GitbookLoader(
25 |
26 | "https://docs.gitbook.com/product-tour/navigation"
27 |
28 | );
29 |
30 |
31 |
32 | const docs = await loader.load();
33 |
34 | ```
35 |
36 |
37 | ## 从给定 GitBook 中的所有路径加载
38 |
39 | 为了使此项功能正常工作,需要使用根路径(例如 https://docs.gitbook.com)初始化 GitbookLoader,并将 `shouldLoadAllPaths` 设置为 `true`。
40 |
41 | ```typescript
42 |
43 | import { GitbookLoader } from "langchain/document_loaders/web/gitbook";
44 |
45 |
46 |
47 | const loader = new GitbookLoader("https://docs.gitbook.com", {
48 |
49 | shouldLoadAllPaths: true,
50 |
51 | });
52 |
53 |
54 |
55 | const docs = await loader.load();
56 |
57 | ```
58 |
59 |
--------------------------------------------------------------------------------
/docs/modules/indexes/vector_stores/integrations/singlestore.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_class_name: 仅限节点
3 | ---
4 |
5 | import CodeBlock from "@theme/CodeBlock";
6 |
7 |
8 | # SingleStore
9 |
10 | [SingleStoreDB](https://singlestore.com/)是一款高性能,分布式数据库系统。长期以来,它一直支持[dot_product](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/dot_product.html)等向量函数,从而成为需要文本相似度匹配的AI应用程序的最佳解决方案。
11 |
12 | :::info兼容性
13 | 仅Node.js可用。
14 | :::
15 |
16 | LangChain.js接受`mysql2/promise Pool`作为SingleStore向量存储的连接池。
17 |
18 | ## 设置
19 |
20 | 1. 建立SingleStoreDB环境。 你可以选择云版或自行部署版。,[云版](https://docs.singlestore.com/managed-service/en/getting-started-with-singlestoredb-cloud.html),[自行部署版](https://docs.singlestore.com/db/v8.1/en/developer-resources/get-started-using-singlestoredb-for-free.html)
21 | 2. 安装mysql2 JS客户端
22 |
23 | ```bash npm2yarn
24 | npm install -S mysql2
25 |
26 | ```
27 |
28 |
29 | ## 用法
30 |
31 | import UsageExample from "!!raw-loader!@examples/indexes/vector_stores/singlestore.ts";
32 |
33 |
34 |
35 | {UsageExample}
36 |
37 |
--------------------------------------------------------------------------------
/examples/src/memory/buffer_window.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { BufferWindowMemory } from "langchain/memory";
3 | import { LLMChain } from "langchain/chains";
4 | import { PromptTemplate } from "langchain/prompts";
5 |
6 | export const run = async () => {
7 | const memory = new BufferWindowMemory({ memoryKey: "chat_history", k: 1 });
8 | const model = new OpenAI({ temperature: 0.9 });
9 | const template = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
10 |
11 | Current conversation:
12 | {chat_history}
13 | Human: {input}
14 | AI:`;
15 |
16 | const prompt = PromptTemplate.fromTemplate(template);
17 | const chain = new LLMChain({ llm: model, prompt, memory });
18 | const res1 = await chain.call({ input: "Hi! I'm Jim." });
19 | console.log({ res1 });
20 | const res2 = await chain.call({ input: "What's my name?" });
21 | console.log({ res2 });
22 | };
23 |
--------------------------------------------------------------------------------
/docs/modules/models/chat/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_label: 聊天模型
4 | ---
5 |
6 | import CodeBlock from "@theme/CodeBlock";
7 |
8 | import Example from "!!raw-loader!@examples/models/chat/chat_quick_start.ts";
9 |
10 | import DocCardList from "@theme/DocCardList";
11 |
12 |
13 | # 入门: 聊天模型
14 |
15 | :::info
16 | [概念指南](https://docs.langchain.com/docs/components/models/chat-model)
17 | :::
18 |
19 | LangChain提供了一个标准接口来使用聊天模型。聊天模型是语言模型的一种变体。
20 | 虽然聊天模型在内部使用语言模型,但它们公开的接口有些不同。
21 | 除了公开一个“输入文本,输出文本”的API外,它们还公开了一个“聊天消息”作为输入和输出的接口。
22 |
23 | ## 聊天消息
24 |
25 | 一个“聊天消息”是指聊天模型中的模块化信息单位。
26 | 目前,这包括一个“text”字段,它指的是聊天消息的内容。
27 |
28 | 目前LangChain支持四种不同类型的“聊天消息”:
29 |
30 | - `HumanChatMessage`: 模拟一个人类的视角发送的聊天消息。
31 | - `AIChatMessage`: 从AI系统的角度发送的聊天消息,用于与人类进行通信。
32 | - `SystemChatMessage`: 一种用于向AI系统提供有关对话信息的聊天消息。通常在对话开始时发送。
33 | - `ChatMessage`: 一个通用的聊天消息,不仅有一个“文本”字段,还有一个任意的“角色”字段。
34 |
35 | 要开始使用,只需使用“LLM”实现的“call”方法,传入一个字符串输入。在这个例子中,我们使用的是“ChatOpenAI”实现:
36 |
37 | {Example}
38 |
39 |
40 | ## 深入挖掘
41 |
42 |
43 |
44 |
45 |
--------------------------------------------------------------------------------
/docs/modules/agents/tools/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_position: 3
4 | ---
5 |
6 | import DocCardList from "@theme/DocCardList";
7 |
8 |
9 | # 工具
10 |
11 | :::info
12 | [概念指南](https://docs.langchain.com/docs/components/agents/tool)
13 | :::
14 |
15 | 工具是一个函数的抽象,使得语言模型可以轻松地与之交互。具体来说,工具的接口有一个文本输入和一个文本输出。它包括名称和描述,向[模型](../../models/)传达工具的作用和何时使用它。
16 |
17 | ```typescript
18 | interface Tool {
19 |
20 | call(arg: string): Promise;
21 |
22 |
23 |
24 | name: string;
25 |
26 |
27 |
28 | description: string;
29 |
30 | }
31 |
32 | ```
33 |
34 |
35 | ## 所有工具
36 |
37 |
38 |
39 |
40 | ## 高级
41 |
42 | 要实现自己的工具,你可以将`Tool`类作为子类,并实现`_call`方法。`_call`方法使用输入文本调用,应返回输出文本。Tool超类实现了`call`方法,在调用`_call`方法之前和之后调用正确的CallbackManager方法。当出现错误时,`_call`方法应返回表示错误的字符串,而不是抛出错误。这允许错误传递给LLM,并且LLM可以决定如何处理它。如果抛出错误,则代理的执行将停止。
43 |
44 | ```typescript
45 |
46 | abstract class Tool {
47 |
48 | abstract _call(arg: string): Promise;
49 |
50 |
51 |
52 | abstract name: string;
53 |
54 |
55 |
56 | abstract description: string;
57 |
58 | }
59 |
60 | ```
61 |
62 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/weaviate_fromTexts.ts:
--------------------------------------------------------------------------------
1 | /* eslint-disable @typescript-eslint/no-explicit-any */
2 | import weaviate from "weaviate-ts-client";
3 | import { WeaviateStore } from "langchain/vectorstores/weaviate";
4 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
5 |
6 | export async function run() {
7 | // Something wrong with the weaviate-ts-client types, so we need to disable
8 | const client = (weaviate as any).client({
9 | scheme: process.env.WEAVIATE_SCHEME || "https",
10 | host: process.env.WEAVIATE_HOST || "localhost",
11 | apiKey: new (weaviate as any).ApiKey(
12 | process.env.WEAVIATE_API_KEY || "default"
13 | ),
14 | });
15 |
16 | // Create a store and fill it with some texts + metadata
17 | await WeaviateStore.fromTexts(
18 | ["hello world", "hi there", "how are you", "bye now"],
19 | [{ foo: "bar" }, { foo: "baz" }, { foo: "qux" }, { foo: "bar" }],
20 | new OpenAIEmbeddings(),
21 | {
22 | client,
23 | indexName: "Test",
24 | textKey: "text",
25 | metadataKeys: ["foo"],
26 | }
27 | );
28 | }
29 |
--------------------------------------------------------------------------------
/examples/src/chains/sql_db_sql_output.ts:
--------------------------------------------------------------------------------
1 | import { DataSource } from "typeorm";
2 | import { OpenAI } from "langchain/llms/openai";
3 | import { SqlDatabase } from "langchain/sql_db";
4 | import { SqlDatabaseChain } from "langchain/chains";
5 |
6 | /**
7 | * This example uses Chinook database, which is a sample database available for SQL Server, Oracle, MySQL, etc.
8 | * To set it up follow the instructions on https://database.guide/2-sample-databases-sqlite/, placing the .db file
9 | * in the examples folder.
10 | */
11 | const datasource = new DataSource({
12 | type: "sqlite",
13 | database: "Chinook.db",
14 | });
15 |
16 | const db = await SqlDatabase.fromDataSourceParams({
17 | appDataSource: datasource,
18 | });
19 |
20 | const chain = new SqlDatabaseChain({
21 | llm: new OpenAI({ temperature: 0 }),
22 | database: db,
23 | sqlOutputKey: "sql",
24 | });
25 |
26 | const res = await chain.call({ query: "How many tracks are there?" });
27 | /* Expected result:
28 | * {
29 | * result: ' There are 3503 tracks.',
30 | * sql: ' SELECT COUNT(*) FROM "Track";'
31 | * }
32 | */
33 | console.log(res);
34 |
--------------------------------------------------------------------------------
/examples/src/chains/llm_chain_cancellation.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { PromptTemplate } from "langchain/prompts";
3 | import { LLMChain } from "langchain/chains";
4 |
5 | // Create a new LLMChain from a PromptTemplate and an LLM in streaming mode.
6 | const model = new OpenAI({ temperature: 0.9, streaming: true });
7 | const prompt = PromptTemplate.fromTemplate(
8 | "Give me a long paragraph about {product}?"
9 | );
10 | const chain = new LLMChain({ llm: model, prompt });
11 | const controller = new AbortController();
12 |
13 | // Call `controller.abort()` somewhere to cancel the request.
14 | setTimeout(() => {
15 | controller.abort();
16 | }, 3000);
17 |
18 | try {
19 | // Call the chain with the inputs and a callback for the streamed tokens
20 | const res = await chain.call(
21 | { product: "colorful socks", signal: controller.signal },
22 | [
23 | {
24 | handleLLMNewToken(token: string) {
25 | process.stdout.write(token);
26 | },
27 | },
28 | ]
29 | );
30 | } catch (e) {
31 | console.log(e);
32 | // Error: Cancel: canceled
33 | }
34 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/file_loaders/unstructured.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # 无结构
6 |
7 | 本示例介绍如何使用[无结构](../../../../../ecosystem/unstructured)读取多种类型的文件。无结构目前支持加载文本文件、PPT、HTML、PDF、图片等。
8 |
9 | ## 设置
10 |
11 | 您可以在计算机上使用Docker运行无结构。要这样做,您需要安装Docker。您可以在此处找到安装Docker的说明 [here](https://docs.docker.com/get-docker/)。
12 |
13 | ```bash
14 | docker run -p 8000:8000 -d --rm --name unstructured-api quay.io/unstructured-io/unstructured-api:latest --port 8000 --host 0.0.0.0
15 |
16 | ```
17 |
18 |
19 | ## 用法
20 |
21 | 运行无结构后,您可以使用它从计算机中加载文件。您可以使用以下代码从计算机中加载文件。
22 |
23 | import CodeBlock from "@theme/CodeBlock";
24 |
25 | import Example from "!!raw-loader!@examples/document_loaders/unstructured.ts";
26 |
27 |
28 | {Example}
29 |
30 |
31 | ## 目录
32 |
33 | 您还可以使用 'UnstructuredDirectoryLoader' 从目录中加载所有文件,其继承自 ['DirectoryLoader'](./directory.md)
34 |
35 | import DirectoryExample from "!!raw-loader!@examples/document_loaders/unstructured_directory.ts";
36 |
37 |
38 |
39 | {DirectoryExample}
40 |
41 |
--------------------------------------------------------------------------------
/docs/modules/memory/examples/buffer_window_memory.md:
--------------------------------------------------------------------------------
1 | # Buffer Window Memory
2 |
3 | BufferWindowMemory用于跟踪会话中的来回消息,然后使用大小为 `k` 的窗口将最近的 `k` 条来回消息提取出来作为内存。
4 |
5 | ### back-and-forths in conversation
6 |
7 | ```typescript
8 | import { OpenAI } from "langchain/llms/openai";
9 |
10 | import { BufferWindowMemory } from "langchain/memory";
11 |
12 | import { ConversationChain } from "langchain/chains";
13 |
14 |
15 |
16 | const model = new OpenAI({});
17 |
18 | const memory = new BufferWindowMemory({ k: 1 });
19 |
20 | const chain = new ConversationChain({ llm: model, memory: memory });
21 |
22 | const res1 = await chain.call({ input: "Hi! I'm Jim." });
23 |
24 | console.log({ res1 });
25 |
26 | ```
27 |
28 |
29 | ```shell
30 | {response: " Hi Jim! It's nice to meet you. My name is AI. What would you like to talk about?"}
31 |
32 | ```
33 |
34 |
35 | ```typescript
36 | const res2 = await chain.call({ input: "What's my name?" });
37 |
38 | console.log({ res2 });
39 |
40 | ```
41 |
42 |
43 | ```shell
44 |
45 | {response: ' You said your name is Jim. Is there anything else you would like to talk about?'}
46 |
47 | ```
48 |
49 |
--------------------------------------------------------------------------------
/docs/modules/memory/examples/redis.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | import CodeBlock from "@theme/CodeBlock";
6 |
7 |
8 | # 基于Redis的聊天存储
9 |
10 | 如果需要在聊天会话之间进行长期持久化,可以将默认的内存`chatHistory`替换为一个[Redis](https://redis.io/)实例来支持聊天存储类,如`BufferMemory`。
11 |
12 | ## 设置
13 |
14 | 您需要在项目中安装[node-redis](https://github.com/redis/node-redis)。
15 |
16 | ```bash npm2yarn
17 | npm install redis
18 |
19 | ```
20 |
21 |
22 | 您还需要一个Redis实例来连接。请参阅[Redis官方网站](https://redis.io/docs/getting-started/)上运行本地服务器的说明。
23 |
24 | ## 用法
25 |
26 | Redis中存储的每个聊天历史记录会话都必须具有唯一的ID。你可以提供一个可选的`sessionTTL`参数来使会话在一定时间后过期。
27 | 传递给`createClient`方法的`config`参数直接传递给[node-redis](https://github.com/redis/node-redis),并使用所有相同的参数。
28 |
29 | import Example from "!!raw-loader!@examples/memory/redis.ts";
30 |
31 |
32 | {Example}
33 |
34 |
35 | ## 高级用法
36 |
37 | 您也可以直接传递先前创建的[node-redis](https://github.com/redis/node-redis)客户端实例:
38 |
39 | import AdvancedExample from "!!raw-loader!@examples/memory/redis-advanced.ts";
40 |
41 |
42 |
43 | {AdvancedExample}
44 |
45 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/tigris/search.ts:
--------------------------------------------------------------------------------
1 | import { VectorDocumentStore } from "@tigrisdata/vector";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 | import { TigrisVectorStore } from "langchain/vectorstores/tigris";
4 |
5 | const index = new VectorDocumentStore({
6 | connection: {
7 | serverUrl: "api.preview.tigrisdata.cloud",
8 | projectName: process.env.TIGRIS_PROJECT,
9 | clientId: process.env.TIGRIS_CLIENT_ID,
10 | clientSecret: process.env.TIGRIS_CLIENT_SECRET,
11 | },
12 | indexName: "examples_index",
13 | numDimensions: 1536, // match the OpenAI embedding size
14 | });
15 |
16 | const vectorStore = await TigrisVectorStore.fromExistingIndex(
17 | new OpenAIEmbeddings(),
18 | { index }
19 | );
20 |
21 | /* Search the vector DB independently with metadata filters */
22 | const results = await vectorStore.similaritySearch("tigris", 1, {
23 | "metadata.foo": "bar",
24 | });
25 | console.log(JSON.stringify(results, null, 2));
26 | /*
27 | [
28 | Document {
29 | pageContent: 'tigris is a cloud-native vector db',
30 | metadata: { foo: 'bar' }
31 | }
32 | ]
33 | */
34 |
--------------------------------------------------------------------------------
/docs/modules/chains/index_related_chains/document_qa.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_position: 1
4 | ---
5 |
6 |
7 | import QAExample from "!!raw-loader!@examples/chains/question_answering.ts";
8 |
9 | import RefineExample from "!!raw-loader!@examples/chains/qa_refine.ts";
10 |
11 | import CodeBlock from "@theme/CodeBlock";
12 |
13 |
14 | # 文档QA
15 |
16 | LangChain提供了一系列专门针对非结构化文本数据处理的链条: `StuffDocumentsChain`, `MapReduceDocumentsChain`, 和 `RefineDocumentsChain`。这些链条是开发与这些数据交互的更复杂链条的基本构建模块。它们旨在接受文档和问题作为输入,然后利用语言模型根据提供的文档制定答案。
17 |
18 | - `StuffDocumentsChain`: 这是三者中最简单的链条。它只是将所有输入文档注入到提示中作为上下文,并返回问题的答案。它适用于在少量文档上进行的QA任务。
19 | - `MapReduceDocumentsChain`: 这个链条合并了一个预处理步骤,以选择每个文档的相关部分,直到标记的总数少于模型允许的最大标记数。然后,它使用转换后的文档作为上下文来回答问题。它适用于在更大的文档上进行的QA任务,并可以并行运行预处理步骤,从而减少运行时间。
20 | - `RefineDocumentsChain`: 这个链条逐个遍历输入文档,每次迭代都使用上一个答案版本和下一个文档作为上下文更新中间答案。它适用于在大量文档上进行的QA任务。
21 |
22 | ## 用法, `StuffDocumentsChain`和`MapReduceDocumentsChain`
23 |
24 | {QAExample}
25 |
26 |
27 | ## 用法, `RefineDocumentsChain`
28 |
29 |
30 | {RefineExample}
31 |
32 |
--------------------------------------------------------------------------------
/docs/modules/indexes/vector_stores/integrations/redis.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_class_name: node-only
3 | ---
4 |
5 | import CodeBlock from "@theme/CodeBlock";
6 |
7 |
8 | # Redis
9 |
10 | [Redis](https://redis.io/)是一款快速的开源内存数据存储系统。,作为[Redis Stack](https://redis.io/docs/stack/get-started/)的一部分,[RediSearch](https://redis.io/docs/stack/search/)是一种支持向量相似性语义搜索以及其他许多类型搜索的模块。
11 |
12 | :::tip 兼容性
13 | 只支持在Node.js上使用。
14 | :::
15 |
16 | LangChain.js接受[node-redis](https://github.com/redis/node-redis)作为Redis矢量存储的客户端。
17 |
18 | ## 设置
19 |
20 | 1. 根据[文档](https://redis.io/docs/stack/get-started/install/docker/#redisredis-stack)在计算机上使用Docker运行Redis。
21 | 2. 安装node-redis JS客户端
22 |
23 | ```bash npm2yarn
24 |
25 | npm install -S redis
26 |
27 | ```
28 |
29 |
30 | ## 索引文档
31 |
32 | import IndexExample from "!!raw-loader!@examples/indexes/vector_stores/redis/redis.ts";
33 |
34 |
35 | {IndexExample}
36 |
37 |
38 | ## 查询文档
39 |
40 | import QueryExample from "!!raw-loader!@examples/indexes/vector_stores/redis/redis_query.ts";
41 |
42 |
43 |
44 | {QueryExample}
45 |
46 |
--------------------------------------------------------------------------------
/examples/src/models/llm/llm_streaming.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 |
3 | // To enable streaming, we pass in `streaming: true` to the LLM constructor.
4 | // Additionally, we pass in a handler for the `handleLLMNewToken` event.
5 | const chat = new OpenAI({
6 | maxTokens: 25,
7 | streaming: true,
8 | });
9 |
10 | const response = await chat.call("Tell me a joke.", undefined, [
11 | {
12 | handleLLMNewToken(token: string) {
13 | console.log({ token });
14 | },
15 | },
16 | ]);
17 | console.log(response);
18 | /*
19 | { token: '\n' }
20 | { token: '\n' }
21 | { token: 'Q' }
22 | { token: ':' }
23 | { token: ' Why' }
24 | { token: ' did' }
25 | { token: ' the' }
26 | { token: ' chicken' }
27 | { token: ' cross' }
28 | { token: ' the' }
29 | { token: ' playground' }
30 | { token: '?' }
31 | { token: '\n' }
32 | { token: 'A' }
33 | { token: ':' }
34 | { token: ' To' }
35 | { token: ' get' }
36 | { token: ' to' }
37 | { token: ' the' }
38 | { token: ' other' }
39 | { token: ' slide' }
40 | { token: '.' }
41 |
42 |
43 | Q: Why did the chicken cross the playground?
44 | A: To get to the other slide.
45 | */
46 |
--------------------------------------------------------------------------------
/examples/src/agents/mrkl_with_tracing.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { initializeAgentExecutorWithOptions } from "langchain/agents";
3 | import { SerpAPI } from "langchain/tools";
4 | import { Calculator } from "langchain/tools/calculator";
5 | import process from "process";
6 |
7 | export const run = async () => {
8 | process.env.LANGCHAIN_TRACING_V2 = "true";
9 | const model = new OpenAI({ temperature: 0 });
10 | const tools = [
11 | new SerpAPI(process.env.SERPAPI_API_KEY, {
12 | location: "Austin,Texas,United States",
13 | hl: "en",
14 | gl: "us",
15 | }),
16 | new Calculator(),
17 | ];
18 |
19 | const executor = await initializeAgentExecutorWithOptions(tools, model, {
20 | agentType: "zero-shot-react-description",
21 | verbose: true,
22 | });
23 | console.log("Loaded agent.");
24 |
25 | const input = `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`;
26 |
27 | console.log(`Executing with input "${input}"...`);
28 |
29 | const result = await executor.call({ input });
30 |
31 | console.log(`Got output ${result.output}`);
32 | };
33 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/redis/redis.ts:
--------------------------------------------------------------------------------
1 | import { createClient, createCluster } from "redis";
2 | import { Document } from "langchain/document";
3 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
4 | import { RedisVectorStore } from "langchain/vectorstores/redis";
5 |
6 | const client = createClient({
7 | url: process.env.REDIS_URL ?? "redis://localhost:6379",
8 | });
9 | await client.connect();
10 |
11 | const docs = [
12 | new Document({
13 | metadata: { foo: "bar" },
14 | pageContent: "redis is fast",
15 | }),
16 | new Document({
17 | metadata: { foo: "bar" },
18 | pageContent: "the quick brown fox jumped over the lazy dog",
19 | }),
20 | new Document({
21 | metadata: { baz: "qux" },
22 | pageContent: "lorem ipsum dolor sit amet",
23 | }),
24 | new Document({
25 | metadata: { baz: "qux" },
26 | pageContent: "consectetur adipiscing elit",
27 | }),
28 | ];
29 |
30 | const vectorStore = await RedisVectorStore.fromDocuments(
31 | docs,
32 | new OpenAIEmbeddings(),
33 | {
34 | redisClient: client,
35 | indexName: "docs",
36 | }
37 | );
38 |
39 | await client.disconnect();
40 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/supabase.ts:
--------------------------------------------------------------------------------
1 | import { SupabaseVectorStore } from "langchain/vectorstores/supabase";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 | import { createClient } from "@supabase/supabase-js";
4 |
5 | // First, follow set-up instructions at
6 | // https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase
7 |
8 | const privateKey = process.env.SUPABASE_PRIVATE_KEY;
9 | if (!privateKey) throw new Error(`Expected env var SUPABASE_PRIVATE_KEY`);
10 |
11 | const url = process.env.SUPABASE_URL;
12 | if (!url) throw new Error(`Expected env var SUPABASE_URL`);
13 |
14 | export const run = async () => {
15 | const client = createClient(url, privateKey);
16 |
17 | const vectorStore = await SupabaseVectorStore.fromTexts(
18 | ["Hello world", "Bye bye", "What's this?"],
19 | [{ id: 2 }, { id: 1 }, { id: 3 }],
20 | new OpenAIEmbeddings(),
21 | {
22 | client,
23 | tableName: "documents",
24 | queryName: "match_documents",
25 | }
26 | );
27 |
28 | const resultOne = await vectorStore.similaritySearch("Hello world", 1);
29 |
30 | console.log(resultOne);
31 | };
32 |
--------------------------------------------------------------------------------
/examples/src/models/llm/llm_streaming_stdout.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 |
3 | // To enable streaming, we pass in `streaming: true` to the LLM constructor.
4 | // Additionally, we pass in a handler for the `handleLLMNewToken` event.
5 | const chat = new OpenAI({
6 | streaming: true,
7 | callbacks: [
8 | {
9 | handleLLMNewToken(token: string) {
10 | process.stdout.write(token);
11 | },
12 | },
13 | ],
14 | });
15 |
16 | await chat.call("Write me a song about sparkling water.");
17 | /*
18 | Verse 1
19 | Crystal clear and made with care
20 | Sparkling water on my lips, so refreshing in the air
21 | Fizzy bubbles, light and sweet
22 | My favorite beverage I can’t help but repeat
23 |
24 | Chorus
25 | A toast to sparkling water, I’m feeling so alive
26 | Let’s take a sip, and let’s take a drive
27 | A toast to sparkling water, it’s the best I’ve had in my life
28 | It’s the best way to start off the night
29 |
30 | Verse 2
31 | It’s the perfect drink to quench my thirst
32 | It’s the best way to stay hydrated, it’s the first
33 | A few ice cubes, a splash of lime
34 | It will make any day feel sublime
35 | ...
36 | */
37 |
--------------------------------------------------------------------------------
/examples/src/memory/entity.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import {
3 | EntityMemory,
4 | ENTITY_MEMORY_CONVERSATION_TEMPLATE,
5 | } from "langchain/memory";
6 | import { LLMChain } from "langchain/chains";
7 |
8 | export const run = async () => {
9 | const memory = new EntityMemory({
10 | llm: new OpenAI({ temperature: 0 }),
11 | chatHistoryKey: "history", // Default value
12 | entitiesKey: "entities", // Default value
13 | });
14 | const model = new OpenAI({ temperature: 0.9 });
15 | const chain = new LLMChain({
16 | llm: model,
17 | prompt: ENTITY_MEMORY_CONVERSATION_TEMPLATE, // Default prompt - must include the set chatHistoryKey and entitiesKey as input variables.
18 | memory,
19 | });
20 |
21 | const res1 = await chain.call({ input: "Hi! I'm Jim." });
22 | console.log({
23 | res1,
24 | memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }),
25 | });
26 |
27 | const res2 = await chain.call({
28 | input: "I work in construction. What about you?",
29 | });
30 | console.log({
31 | res2,
32 | memory: await memory.loadMemoryVariables({ input: "Who is Jim?" }),
33 | });
34 | };
35 |
--------------------------------------------------------------------------------
/docs/modules/indexes/text_splitters/examples/code.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # 代码和标记文本分割器
6 |
7 | LangChain支持各种不同的标记和编程语言特定的文本分割器,以基于语言特定的语法分割文本。
8 | 这将导致更具有语义的自包含块,更适用于矢量存储或其他检索器。
9 | 流行的语言,如JavaScript, Python,和Rust,以及Latex,HTML,和Markdown都受到支持。
10 |
11 | ## 用法
12 |
13 | 使用“fromLanguage”工厂方法初始化标准的“RecursiveCharacterTextSplitter”。以下是各种语言的示例。
14 |
15 | ## JavaScript
16 |
17 | import CodeBlock from "@theme/CodeBlock";
18 |
19 | import JSExample from "!!raw-loader!@examples/indexes/javascript_text_splitter.ts";
20 |
21 |
22 | {JSExample}
23 |
24 |
25 | ## Python
26 |
27 | import PythonExample from "!!raw-loader!@examples/indexes/python_text_splitter.ts";
28 |
29 |
30 | {PythonExample}
31 |
32 |
33 | ## HTML
34 |
35 | import HTMLExample from "!!raw-loader!@examples/indexes/html_text_splitter.ts";
36 |
37 |
38 | {HTMLExample}
39 |
40 |
41 | ## Latex
42 |
43 | import LatexExample from "!!raw-loader!@examples/indexes/latex_text_splitter.ts";
44 |
45 |
46 |
47 | {LatexExample}
48 |
49 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/typeorm_vectorstore/typeorm.ts:
--------------------------------------------------------------------------------
1 | import { DataSourceOptions } from "typeorm";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 | import { TypeORMVectorStore } from "langchain/vectorstores/typeorm";
4 |
5 | // First, follow set-up instructions at
6 | // https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/typeorm
7 |
8 | export const run = async () => {
9 | const args = {
10 | postgresConnectionOptions: {
11 | type: "postgres",
12 | host: "localhost",
13 | port: 5432,
14 | username: "myuser",
15 | password: "ChangeMe",
16 | database: "api",
17 | } as DataSourceOptions,
18 | };
19 |
20 | const typeormVectorStore = await TypeORMVectorStore.fromDataSource(
21 | new OpenAIEmbeddings(),
22 | args
23 | );
24 |
25 | await typeormVectorStore.ensureTableInDatabase();
26 |
27 | await typeormVectorStore.addDocuments([
28 | { pageContent: "what's this", metadata: { a: 2 } },
29 | { pageContent: "Cat drinks milk", metadata: { a: 1 } },
30 | ]);
31 |
32 | const results = await typeormVectorStore.similaritySearch("hello", 2);
33 |
34 | console.log(results);
35 | };
36 |
--------------------------------------------------------------------------------
/examples/src/document_loaders/puppeteer_web.ts:
--------------------------------------------------------------------------------
1 | import { PuppeteerWebBaseLoader } from "langchain/document_loaders/web/puppeteer";
2 |
3 | export const run = async () => {
4 | const loader = new PuppeteerWebBaseLoader("https://www.tabnews.com.br/");
5 |
6 | /** Loader use evaluate function ` await page.evaluate(() => document.body.innerHTML);` as default evaluate */
7 | const docs = await loader.load();
8 | console.log({ docs });
9 |
10 | const loaderWithOptions = new PuppeteerWebBaseLoader(
11 | "https://www.tabnews.com.br/",
12 | {
13 | launchOptions: {
14 | headless: true,
15 | },
16 | gotoOptions: {
17 | waitUntil: "domcontentloaded",
18 | },
19 | /** Pass custom evaluate , in this case you get page and browser instances */
20 | async evaluate(page, browser) {
21 | await page.waitForResponse("https://www.tabnews.com.br/va/view");
22 |
23 | const result = await page.evaluate(() => document.body.innerHTML);
24 | await browser.close();
25 | return result;
26 | },
27 | }
28 | );
29 | const docsFromLoaderWithOptions = await loaderWithOptions.load();
30 | console.log({ docsFromLoaderWithOptions });
31 | };
32 |
--------------------------------------------------------------------------------
/docs/modules/indexes/vector_stores/integrations/typeorm.mdx:
--------------------------------------------------------------------------------
1 | # TypeORM
2 |
3 | 为了在通用的PostgreSQL数据库中实现向量搜索,LangChainJS支持使用[TypeORM](https://typeorm.io/)和[`pgvector`](https://github.com/pgvector/pgvector) Postgres扩展。
4 |
5 | ## 设置
6 |
7 | 要使用TypeORM,您需要安装`typeorm`和`pg`软件包:
8 |
9 | ```bash npm2yarn
10 | npm install typeorm
11 |
12 | ```
13 |
14 |
15 | ```bash npm2yarn
16 | npm install pg
17 |
18 | ```
19 |
20 |
21 | ### 使用`docker-compose`设置`pgvector`自托管实例
22 |
23 | `pgvector`提供了一个预构建的Docker镜像,可用于快速设置自托管的Postgres实例。
24 | 创建一个名为`docker-compose.yml`的文件:
25 |
26 | import CodeBlock from "@theme/CodeBlock";
27 |
28 | `import DockerExample from "!!raw-loader!@examples/indexes/vector_stores/typeorm_vectorstore/docker-compose.example.yml";`
29 |
30 |
31 | `{DockerExample}`
32 |
33 |
34 | 然后在相同的目录下运行`docker compose up`来启动容器。
35 |
36 | 您可以在[官方存储库](https://github.com/pgvector/pgvector)中找到有关如何设置`pgvector`的更多信息。
37 |
38 | ## 用法
39 |
40 | import Example from "!!raw-loader!@examples/indexes/vector_stores/typeorm_vectorstore/typeorm.ts";
41 |
42 |
43 | 使用`TypeORMVectorStore`的一个完整的示例如下:
44 |
45 |
46 | {Example}
47 |
48 |
--------------------------------------------------------------------------------
/docs/modules/agents/executor/getting-started.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_label: 开始(Getting Started)
3 | hide_table_of_contents: true
4 | ---
5 |
6 |
7 | import CodeBlock from "@theme/CodeBlock";
8 |
9 | import Example from "!!raw-loader!@examples/agents/mrkl.ts";
10 |
11 |
12 | # 代理执行器(Agent Executors)
13 |
14 | 代理使用LLM来确定采取哪些操作以及采取的顺序。操作可以是使用工具并观察其输出,或返回给用户。
15 |
16 | 正确使用代理可以非常强大。在本教程中,我们将向您展示如何通过最简单的,最高级别的API轻松使用代理。
17 |
18 | 为了加载代理,您应该了解以下概念:
19 |
20 | - 工具:(Tool) 执行特定职责的函数。这可以是像:谷歌搜索(Google Search),数据库查找(Database lookup),代码REPL,其他链。工具的接口目前是预期具有字符串输入,和字符串输出的函数。
21 | - LLM: 代理支持的语言模型。
22 |
23 | - 代理: 代理以使用。这应该是一个字符串,引用支持的代理类。因为这个笔记本集中在最简单的,最高级别的API上,所以仅涵盖使用标准支持的代理。
24 |
25 | 对于这个例子,您需要在`.env` 文件中设置SerpAPI环境变量。
26 | ```bash
27 | SERPAPI_API_KEY="..."
28 |
29 | ```
30 |
31 |
32 | 现在,我们可以开始了!(Now we can get started!)
33 |
34 | {Example}
35 |
36 |
37 | ```shell
38 |
39 | langchain-examples:start: Executing with input "Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?"...
40 |
41 | langchain-examples:start: Got output Harry Styles is Olivia Wilde's boyfriend and his current age raised to the 0.23 power is 2.169459462491557.
42 |
43 | ```
44 |
45 |
--------------------------------------------------------------------------------
/examples/src/agents/load_from_hub.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { AgentExecutor } from "langchain/agents";
3 | import { loadAgent } from "langchain/agents/load";
4 | import { SerpAPI } from "langchain/tools";
5 | import { Calculator } from "langchain/tools/calculator";
6 |
7 | export const run = async () => {
8 | const model = new OpenAI({ temperature: 0 });
9 | const tools = [
10 | new SerpAPI(process.env.SERPAPI_API_KEY, {
11 | location: "Austin,Texas,United States",
12 | hl: "en",
13 | gl: "us",
14 | }),
15 | new Calculator(),
16 | ];
17 |
18 | const agent = await loadAgent(
19 | "lc://agents/zero-shot-react-description/agent.json",
20 | { llm: model, tools }
21 | );
22 | console.log("Loaded agent from Langchain hub");
23 |
24 | const executor = AgentExecutor.fromAgentAndTools({
25 | agent,
26 | tools,
27 | returnIntermediateSteps: true,
28 | });
29 |
30 | const input = `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`;
31 | console.log(`Executing with input "${input}"...`);
32 |
33 | const result = await executor.call({ input });
34 |
35 | console.log(`Got output ${result.output}`);
36 | };
37 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/supabase_with_metadata_filter.ts:
--------------------------------------------------------------------------------
1 | import { SupabaseVectorStore } from "langchain/vectorstores/supabase";
2 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
3 | import { createClient } from "@supabase/supabase-js";
4 |
5 | // First, follow set-up instructions at
6 | // https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase
7 |
8 | const privateKey = process.env.SUPABASE_PRIVATE_KEY;
9 | if (!privateKey) throw new Error(`Expected env var SUPABASE_PRIVATE_KEY`);
10 |
11 | const url = process.env.SUPABASE_URL;
12 | if (!url) throw new Error(`Expected env var SUPABASE_URL`);
13 |
14 | export const run = async () => {
15 | const client = createClient(url, privateKey);
16 |
17 | const vectorStore = await SupabaseVectorStore.fromTexts(
18 | ["Hello world", "Hello world", "Hello world"],
19 | [{ user_id: 2 }, { user_id: 1 }, { user_id: 3 }],
20 | new OpenAIEmbeddings(),
21 | {
22 | client,
23 | tableName: "documents",
24 | queryName: "match_documents",
25 | }
26 | );
27 |
28 | const result = await vectorStore.similaritySearch("Hello world", 1, {
29 | user_id: 3,
30 | });
31 |
32 | console.log(result);
33 | };
34 |
--------------------------------------------------------------------------------
/examples/src/memory/redis-advanced.ts:
--------------------------------------------------------------------------------
1 | import { createClient } from "redis";
2 | import { BufferMemory } from "langchain/memory";
3 | import { RedisChatMessageHistory } from "langchain/stores/message/redis";
4 | import { ChatOpenAI } from "langchain/chat_models/openai";
5 | import { ConversationChain } from "langchain/chains";
6 |
7 | const client = createClient({
8 | url: "redis://localhost:6379",
9 | });
10 |
11 | const memory = new BufferMemory({
12 | chatHistory: new RedisChatMessageHistory({
13 | sessionId: new Date().toISOString(),
14 | sessionTTL: 300,
15 | client,
16 | }),
17 | });
18 |
19 | const model = new ChatOpenAI({
20 | modelName: "gpt-3.5-turbo",
21 | temperature: 0,
22 | });
23 |
24 | const chain = new ConversationChain({ llm: model, memory });
25 |
26 | const res1 = await chain.call({ input: "Hi! I'm Jim." });
27 | console.log({ res1 });
28 | /*
29 | {
30 | res1: {
31 | text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
32 | }
33 | }
34 | */
35 |
36 | const res2 = await chain.call({ input: "What did I just say my name was?" });
37 | console.log({ res2 });
38 |
39 | /*
40 | {
41 | res1: {
42 | text: "You said your name was Jim."
43 | }
44 | }
45 | */
46 |
--------------------------------------------------------------------------------
/examples/src/prompts/regex_parser.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { RegexParser } from "langchain/output_parsers";
3 | import { PromptTemplate } from "langchain/prompts";
4 |
5 | export const run = async () => {
6 | const parser = new RegexParser(
7 | /Humor: ([0-9]+), Sophistication: (A|B|C|D|E)/,
8 | ["mark", "grade"],
9 | "noConfidence"
10 | );
11 | const formatInstructions = parser.getFormatInstructions();
12 |
13 | const prompt = new PromptTemplate({
14 | template: "Grade the joke.\n\n{format_instructions}\n\nJoke: {joke}",
15 | inputVariables: ["joke"],
16 | partialVariables: { format_instructions: formatInstructions },
17 | });
18 |
19 | const model = new OpenAI({ temperature: 0 });
20 |
21 | const input = await prompt.format({
22 | joke: "What time is the appointment? Tooth hurt-y.",
23 | });
24 | console.log(input);
25 | /*
26 | Grade the joke.
27 |
28 | Your response should match the following regex: /Humor: ([0-9]+), Sophistication: (A|B|C|D|E)/
29 |
30 | Joke: What time is the appointment? Tooth hurt-y.
31 | */
32 |
33 | const response = await model.call(input);
34 | console.log(response);
35 | /*
36 | Humor: 8, Sophistication: D
37 | */
38 | };
39 |
--------------------------------------------------------------------------------
/examples/src/agents/chat_mrkl.ts:
--------------------------------------------------------------------------------
1 | import { initializeAgentExecutorWithOptions } from "langchain/agents";
2 | import { ChatOpenAI } from "langchain/chat_models/openai";
3 | import { SerpAPI } from "langchain/tools";
4 | import { Calculator } from "langchain/tools/calculator";
5 |
6 | export const run = async () => {
7 | const model = new ChatOpenAI({ temperature: 0 });
8 | const tools = [
9 | new SerpAPI(process.env.SERPAPI_API_KEY, {
10 | location: "Austin,Texas,United States",
11 | hl: "en",
12 | gl: "us",
13 | }),
14 | new Calculator(),
15 | ];
16 |
17 | const executor = await initializeAgentExecutorWithOptions(tools, model, {
18 | agentType: "chat-zero-shot-react-description",
19 | returnIntermediateSteps: true,
20 | });
21 | console.log("Loaded agent.");
22 |
23 | const input = `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`;
24 |
25 | console.log(`Executing with input "${input}"...`);
26 |
27 | const result = await executor.call({ input });
28 |
29 | console.log(`Got output ${result.output}`);
30 |
31 | console.log(
32 | `Got intermediate steps ${JSON.stringify(
33 | result.intermediateSteps,
34 | null,
35 | 2
36 | )}`
37 | );
38 | };
39 |
--------------------------------------------------------------------------------
/examples/src/agents/json.ts:
--------------------------------------------------------------------------------
1 | import * as fs from "fs";
2 | import * as yaml from "js-yaml";
3 | import { OpenAI } from "langchain/llms/openai";
4 | import { JsonSpec, JsonObject } from "langchain/tools";
5 | import { JsonToolkit, createJsonAgent } from "langchain/agents";
6 |
7 | export const run = async () => {
8 | let data: JsonObject;
9 | try {
10 | const yamlFile = fs.readFileSync("openai_openapi.yaml", "utf8");
11 | data = yaml.load(yamlFile) as JsonObject;
12 | if (!data) {
13 | throw new Error("Failed to load OpenAPI spec");
14 | }
15 | } catch (e) {
16 | console.error(e);
17 | return;
18 | }
19 |
20 | const toolkit = new JsonToolkit(new JsonSpec(data));
21 | const model = new OpenAI({ temperature: 0 });
22 | const executor = createJsonAgent(model, toolkit);
23 |
24 | const input = `What are the required parameters in the request body to the /completions endpoint?`;
25 |
26 | console.log(`Executing with input "${input}"...`);
27 |
28 | const result = await executor.call({ input });
29 |
30 | console.log(`Got output ${result.output}`);
31 |
32 | console.log(
33 | `Got intermediate steps ${JSON.stringify(
34 | result.intermediateSteps,
35 | null,
36 | 2
37 | )}`
38 | );
39 | };
40 |
--------------------------------------------------------------------------------
/examples/src/chains/question_answering.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { loadQAStuffChain, loadQAMapReduceChain } from "langchain/chains";
3 | import { Document } from "langchain/document";
4 |
5 | export const run = async () => {
6 | // This first example uses the `StuffDocumentsChain`.
7 | const llmA = new OpenAI({});
8 | const chainA = loadQAStuffChain(llmA);
9 | const docs = [
10 | new Document({ pageContent: "Harrison went to Harvard." }),
11 | new Document({ pageContent: "Ankush went to Princeton." }),
12 | ];
13 | const resA = await chainA.call({
14 | input_documents: docs,
15 | question: "Where did Harrison go to college?",
16 | });
17 | console.log({ resA });
18 | // { resA: { text: ' Harrison went to Harvard.' } }
19 |
20 | // This second example uses the `MapReduceChain`.
21 | // Optionally limit the number of concurrent requests to the language model.
22 | const llmB = new OpenAI({ maxConcurrency: 10 });
23 | const chainB = loadQAMapReduceChain(llmB);
24 | const resB = await chainB.call({
25 | input_documents: docs,
26 | question: "Where did Harrison go to college?",
27 | });
28 | console.log({ resB });
29 | // { resB: { text: ' Harrison went to Harvard.' } }
30 | };
31 |
--------------------------------------------------------------------------------
/examples/src/chains/retrieval_qa_with_remote.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { RetrievalQAChain } from "langchain/chains";
3 | import { RemoteLangChainRetriever } from "langchain/retrievers/remote";
4 |
5 | export const run = async () => {
6 | // Initialize the LLM to use to answer the question.
7 | const model = new OpenAI({});
8 |
9 | // Initialize the remote retriever.
10 | const retriever = new RemoteLangChainRetriever({
11 | url: "http://0.0.0.0:8080/retrieve", // Replace with your own URL.
12 | auth: { bearer: "foo" }, // Replace with your own auth.
13 | inputKey: "message",
14 | responseKey: "response",
15 | });
16 |
17 | // Create a chain that uses the OpenAI LLM and remote retriever.
18 | const chain = RetrievalQAChain.fromLLM(model, retriever);
19 |
20 | // Call the chain with a query.
21 | const res = await chain.call({
22 | query: "What did the president say about Justice Breyer?",
23 | });
24 | console.log({ res });
25 | /*
26 | {
27 | res: {
28 | text: 'The president said that Justice Breyer was an Army veteran, Constitutional scholar,
29 | and retiring Justice of the United States Supreme Court and thanked him for his service.'
30 | }
31 | }
32 | */
33 | };
34 |
--------------------------------------------------------------------------------
/examples/src/chat/memory.ts:
--------------------------------------------------------------------------------
1 | import { ConversationChain } from "langchain/chains";
2 | import { ChatOpenAI } from "langchain/chat_models/openai";
3 | import {
4 | ChatPromptTemplate,
5 | HumanMessagePromptTemplate,
6 | SystemMessagePromptTemplate,
7 | MessagesPlaceholder,
8 | } from "langchain/prompts";
9 | import { BufferMemory } from "langchain/memory";
10 |
11 | export const run = async () => {
12 | const chat = new ChatOpenAI({ temperature: 0 });
13 |
14 | const chatPrompt = ChatPromptTemplate.fromPromptMessages([
15 | SystemMessagePromptTemplate.fromTemplate(
16 | "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know."
17 | ),
18 | new MessagesPlaceholder("history"),
19 | HumanMessagePromptTemplate.fromTemplate("{input}"),
20 | ]);
21 |
22 | const chain = new ConversationChain({
23 | memory: new BufferMemory({ returnMessages: true, memoryKey: "history" }),
24 | prompt: chatPrompt,
25 | llm: chat,
26 | });
27 |
28 | const response = await chain.call({
29 | input: "hi! whats up?",
30 | });
31 |
32 | console.log(response);
33 | };
34 |
--------------------------------------------------------------------------------
/examples/src/agents/agent_timeout.ts:
--------------------------------------------------------------------------------
1 | import { initializeAgentExecutorWithOptions } from "langchain/agents";
2 | import { OpenAI } from "langchain/llms/openai";
3 | import { SerpAPI } from "langchain/tools";
4 | import { Calculator } from "langchain/tools/calculator";
5 |
6 | const model = new OpenAI({ temperature: 0 });
7 | const tools = [
8 | new SerpAPI(process.env.SERPAPI_API_KEY, {
9 | location: "Austin,Texas,United States",
10 | hl: "en",
11 | gl: "us",
12 | }),
13 | new Calculator(),
14 | ];
15 | const executor = await initializeAgentExecutorWithOptions(tools, model, {
16 | agentType: "zero-shot-react-description",
17 | });
18 |
19 | try {
20 | const input = `Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?`;
21 | const result = await executor.call({ input, timeout: 2000 }); // 2 seconds
22 | } catch (e) {
23 | console.log(e);
24 | /*
25 | Error: Cancel: canceled
26 | at file:///Users/nuno/dev/langchainjs/langchain/dist/util/async_caller.js:60:23
27 | at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
28 | at RetryOperation._fn (/Users/nuno/dev/langchainjs/node_modules/p-retry/index.js:50:12) {
29 | attemptNumber: 1,
30 | retriesLeft: 6
31 | }
32 | */
33 | }
34 |
--------------------------------------------------------------------------------
/docs/modules/chains/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_label: 链
4 | sidebar_position: 6
5 | ---
6 |
7 | import DocCardList from "@theme/DocCardList";
8 |
9 | import CodeBlock from "@theme/CodeBlock";
10 |
11 |
12 | # 入门指南: 链
13 |
14 | :::info
15 | [概念指南](https://docs.langchain.com/docs/components/chains)
16 | :::info
17 |
18 | 在一些应用中,仅使用一个语言模型是可以的,但通常将语言模型与其他信息源(例如第三方API或其他语言模型)组合使用是非常有用的。
19 |
20 | 这就是链的概念。
21 |
22 | LangChain提供了一个用于链的标准接口,以及一些可直接使用的内置链。您也可以创建自己的链。
23 |
24 |
25 |
26 |
27 | ## 高级
28 |
29 | 要实现自己的自定义链,您可以继承`BaseChain`并实现以下方法:
30 |
31 | import SubclassInterface from "!!raw-loader!@examples/chains/advanced_subclass.ts";
32 |
33 |
34 | {SubclassInterface}
35 |
36 |
37 | ### 继承`BaseChain`
38 |
39 | `_call`方法是自定义链必须实现的主要方法。它接受输入记录并返回输出记录。接收到的输入应符合`inputKeys`数组,返回的输出应符合`outputKeys`数组。
40 |
41 | 在自定义链中实现此方法时,值得特别关注的是`runManager`参数,它允许您的自定义链参与与内置链相同的回调系统[callbacks system](../../production/callbacks/)。
42 |
43 | 如果在自定义链中调用另一个链/模型/代理,则应将其传递给调用`runManager?.getChild()`的结果,该结果将生成一个新的回调管理器,范围限定为该内部运行。例如:
44 |
45 | import SubclassCall from "!!raw-loader!@examples/chains/advanced_subclass_call.ts";
46 |
47 |
48 |
49 | {SubclassCall}
50 |
51 |
--------------------------------------------------------------------------------
/docs/production/deployment.md:
--------------------------------------------------------------------------------
1 | ## 部署
2 |
3 | 您已经构建好了 LangChain 应用程序,并且现在想将其部署到生产环境?您来对地方了。本指南将为您介绍部署应用程序的选项以及进行部署时应考虑的问题。
4 |
5 | ## 概述
6 |
7 | LangChain 是用于构建使用语言模型的应用程序的库。它不是 Web 框架,并且不提供任何用于通过 Web 提供服务的内置功能。相反,它提供了一组工具,您可以将其集成在 API 或后端服务器中。
8 |
9 | 部署应用程序有几个高级选项:
10 |
11 | - 部署到虚拟机或容器中
12 | - 持久化文件系统意味着你可以从磁盘中保存和加载文件
13 |
14 | - 永久运行的进程意味着你可以在内存中缓存一些东西
15 |
16 | - 你可以支持长时间运行的请求,例如WebSockets
17 |
18 | - 部署到无服务器环境
19 | - 没有持久化文件系统意味着你可以从磁盘中加载文件,但是不能将它们保存以备后用。
20 |
21 | - 冷启动意味着你不能在内存中缓存东西,并期望在请求之间被缓存。
22 |
23 | - 函数超时意味着你不能支持长时间运行的请求,例如WebSockets。
24 |
25 | 其他一些考虑事项包括:
26 |
27 | - 您将后端和前端一起部署还是分别部署?
28 | - 您将后端与数据库协同部署还是分别部署?
29 |
30 | 随着您将 LangChain 应用程序部署到生产环境,我们将非常乐意提供更全面的支持。请填写 [此表格](https://forms.gle/57d8AmXBYp8PP8tZA),我们将设置一个专门的支持 Slack 频道。
31 |
32 | ## 部署选项
33 |
34 | 请参阅以下有关 LangChain 应用程序部署选项的列表。如果您没有看到您首选的选项,请联系我们,我们可以将其添加到此列表中。
35 |
36 | ### 部署到 Fly.io
37 |
38 | [Fly.io](https://fly.io) 是将应用程序部署到云端的平台。这是将您的应用程序部署到容器环境的不错选择。
39 |
40 | 请参阅 [我们的 Fly.io 模板](https://github.com/hwchase17/langchain-template-node-fly) ,其中包含了将应用程序部署到 Fly.io 的示例。
41 |
42 | ### 部署到 Kinsta
43 |
44 |
45 | [Kinsta](https://kinsta.com)是一个以开发人员为中心的云主机平台。
46 |
47 |
48 | 使用[我们的hello-world模板](https://github.com/kinsta/hello-world-langchainjs),了解如何在Kinsta上在几分钟内部署你的下一个LangChain应用程序的示例。
49 |
50 |
--------------------------------------------------------------------------------
/examples/src/indexes/vector_stores/tigris/fromDocs.ts:
--------------------------------------------------------------------------------
1 | import { VectorDocumentStore } from "@tigrisdata/vector";
2 | import { Document } from "langchain/document";
3 | import { OpenAIEmbeddings } from "langchain/embeddings/openai";
4 | import { TigrisVectorStore } from "langchain/vectorstores/tigris";
5 |
6 | const index = new VectorDocumentStore({
7 | connection: {
8 | serverUrl: "api.preview.tigrisdata.cloud",
9 | projectName: process.env.TIGRIS_PROJECT,
10 | clientId: process.env.TIGRIS_CLIENT_ID,
11 | clientSecret: process.env.TIGRIS_CLIENT_SECRET,
12 | },
13 | indexName: "examples_index",
14 | numDimensions: 1536, // match the OpenAI embedding size
15 | });
16 |
17 | const docs = [
18 | new Document({
19 | metadata: { foo: "bar" },
20 | pageContent: "tigris is a cloud-native vector db",
21 | }),
22 | new Document({
23 | metadata: { foo: "bar" },
24 | pageContent: "the quick brown fox jumped over the lazy dog",
25 | }),
26 | new Document({
27 | metadata: { baz: "qux" },
28 | pageContent: "lorem ipsum dolor sit amet",
29 | }),
30 | new Document({
31 | metadata: { baz: "qux" },
32 | pageContent: "tigris is a river",
33 | }),
34 | ];
35 |
36 | await TigrisVectorStore.fromDocuments(docs, new OpenAIEmbeddings(), { index });
37 |
--------------------------------------------------------------------------------
/docs/modules/chains/other_chains/sql.mdx:
--------------------------------------------------------------------------------
1 | import CodeBlock from "@theme/CodeBlock";
2 |
3 | import SqlDBExample from "!!raw-loader!@examples/chains/sql_db.ts";
4 |
5 | import SqlDBSqlOutputExample from "!!raw-loader!@examples/chains/sql_db_sql_output.ts";
6 |
7 |
8 | # `SqlDatabaseChain` 中文:`Sql数据库链`
9 |
10 | `SqlDatabaseChain` 可以让您在 SQL 数据库上回答问题。
11 | 此示例使用 Chinook 数据库,这是一个可用于 SQL Server、Oracle、MySQL 等的示例数据库。
12 |
13 | ## 设置
14 |
15 | 首先安装 `typeorm` :`typeorm`包是必须安装的
16 |
17 | ```bash npm2yarn
18 | npm install typeorm
19 |
20 | ```
21 |
22 |
23 | 然后安装所需的数据库依赖项。例如,对于 SQLite
24 |
25 | ```bash npm2yarn
26 | npm install sqlite3
27 |
28 | ```
29 |
30 |
31 | 对于其他数据库,请参阅 https://typeorm.io/#installation
32 |
33 | 最后,按照 https://database.guide/2-sample-databases-sqlite/ 上的说明,获取此示例所需的样品数据库。
34 |
35 | {SqlDBExample}
36 |
37 |
38 | 您可以在创建 `SqlDatabase` 对象时包含或排除表格,以帮助链集中于您想要的表格。
39 | 它还可以减少链中使用的令牌数量。
40 |
41 | ```typescript 中文:如果需要,您可以在调用链时返回所使用的 SQL 命令。
42 | const db = await SqlDatabase.fromDataSourceParams({
43 |
44 | appDataSource: datasource,
45 |
46 | includesTables: ["Track"],
47 |
48 | });
49 |
50 | ```
51 |
52 |
53 | 如果需要,您可以在调用链时返回使用的 SQL 命令。
54 |
55 |
56 | {SqlDBSqlOutputExample}
57 |
58 |
--------------------------------------------------------------------------------
/docs/modules/indexes/document_loaders/examples/file_loaders/jsonlines.md:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | ---
4 |
5 | # JSONLines 文件
6 |
7 | 这个例子演示了如何从 JSONLines 或 JSONL 文件加载数据。第二个参数是一个 JSONPointer,用于从文件中的每个 JSON 对象中提取属性。每个 JSON 对象都将创建一个文档。
8 |
9 | 示例 JSONLines 文件:
10 |
11 | ```json
12 | {"html": "This is a sentence."}
13 |
14 | {"html": "This is another sentence."}
15 |
16 | ```
17 |
18 |
19 | 示例代码:
20 |
21 | ```typescript
22 |
23 | import { JSONLinesLoader } from "langchain/document_loaders/fs/json";
24 |
25 |
26 |
27 | const loader = new JSONLinesLoader(
28 |
29 | "src/document_loaders/example_data/example.jsonl",
30 |
31 | "/html"
32 |
33 | );
34 |
35 |
36 |
37 | const docs = await loader.load();
38 |
39 | /*
40 |
41 | [
42 |
43 | Document {
44 |
45 | "metadata": {
46 |
47 | "blobType": "application/jsonl+json",
48 |
49 | "line": 1,
50 |
51 | "source": "blob",
52 |
53 | },
54 |
55 | "pageContent": "This is a sentence.",
56 |
57 | },
58 |
59 | Document {
60 |
61 | "metadata": {
62 |
63 | "blobType": "application/jsonl+json",
64 |
65 | "line": 2,
66 |
67 | "source": "blob",
68 |
69 | },
70 |
71 | "pageContent": "This is another sentence.",
72 |
73 | },
74 |
75 | ]
76 |
77 | */
78 |
79 | ```
80 |
81 |
--------------------------------------------------------------------------------
/examples/src/agents/custom_tool.ts:
--------------------------------------------------------------------------------
1 | import { OpenAI } from "langchain/llms/openai";
2 | import { initializeAgentExecutorWithOptions } from "langchain/agents";
3 | import { DynamicTool } from "langchain/tools";
4 |
5 | export const run = async () => {
6 | const model = new OpenAI({ temperature: 0 });
7 | const tools = [
8 | new DynamicTool({
9 | name: "FOO",
10 | description:
11 | "call this to get the value of foo. input should be an empty string.",
12 | func: () =>
13 | new Promise((resolve) => {
14 | resolve("foo");
15 | }),
16 | }),
17 | new DynamicTool({
18 | name: "BAR",
19 | description:
20 | "call this to get the value of bar. input should be an empty string.",
21 | func: () =>
22 | new Promise((resolve) => {
23 | resolve("baz1");
24 | }),
25 | }),
26 | ];
27 |
28 | const executor = await initializeAgentExecutorWithOptions(tools, model, {
29 | agentType: "zero-shot-react-description",
30 | });
31 |
32 | console.log("Loaded agent.");
33 |
34 | const input = `What is the value of foo?`;
35 |
36 | console.log(`Executing with input "${input}"...`);
37 |
38 | const result = await executor.call({ input });
39 |
40 | console.log(`Got output ${result.output}`);
41 | };
42 |
--------------------------------------------------------------------------------
/docs/ecosystem/databerry.md:
--------------------------------------------------------------------------------
1 | # 数据莓
2 |
3 | 本页面介绍如何在LangChain中使用[Databerry](https://databerry.ai)。
4 |
5 | ## 什么是数据莓?
6 |
7 | 数据莓是一个[开源](https://github.com/gmpetrov/databerry)的文档检索平台,帮助连接您的个人数据和大型语言模型。
8 |
9 | 
10 |
11 | ## 快速开始
12 |
13 | 从LangChain中检索存储在数据莓中的文档非常容易!
14 |
15 | ```typescript
16 |
17 | import { DataberryRetriever } from "langchain/retrievers/databerry";
18 |
19 |
20 |
21 | const retriever = new DataberryRetriever({
22 |
23 | datastoreUrl: "https://api.databerry.ai/query/clg1xg2h80000l708dymr0fxc",
24 |
25 | apiKey: "DATABERRY_API_KEY", // optional: needed for private datastores
26 |
27 | topK: 8, // optional: default value is 3
28 |
29 | });
30 |
31 |
32 |
33 | // Create a chain that uses the OpenAI LLM and Databerry retriever.
34 |
35 | const chain = RetrievalQAChain.fromLLM(model, retriever);
36 |
37 |
38 |
39 | // Call the chain with a query.
40 |
41 | const res = await chain.call({
42 |
43 | query: "What's Databerry?",
44 |
45 | });
46 |
47 |
48 |
49 | console.log({ res });
50 |
51 | /*
52 |
53 | {
54 |
55 | res: {
56 |
57 | text: 'Databerry provides a user-friendly solution to quickly setup a semantic search system over your personal data without any technical knowledge.'
58 |
59 | }
60 |
61 | }
62 |
63 | */
64 |
65 | ```
66 |
67 |
--------------------------------------------------------------------------------
/docs/modules/chains/prompt_selectors/index.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | hide_table_of_contents: true
3 | sidebar_label: 提示选择器
4 | ---
5 |
6 | # 提示选择器
7 |
8 | :::info
9 | [概念指南](https://docs.langchain.com/docs/components/chains/prompt-selector)
10 | :::
11 |
12 | 通常, 根据链中使用的模型类型,您会想要以编程方式选择提示。特别是在交换聊天模型和LLM时,这尤其重要。
13 |
14 | 提示选择器的界面非常简单。:
15 |
16 | ```typescript
17 | abstract class BasePromptSelector {
18 |
19 | abstract getPrompt(llm: BaseLanguageModel): BasePromptTemplate;
20 |
21 | }
22 |
23 | ```
24 |
25 |
26 | getPrompt方法接受一个语言模型并返回一个适当的提示模板。
27 |
28 | 我们目前提供了一个ConditionalPromptSelector,允许您指定一组条件和提示模板。评估为true的第一个条件将用于选择提示模板。
29 |
30 | ```typescript
31 | const QA_PROMPT_SELECTOR = new ConditionalPromptSelector(DEFAULT_QA_PROMPT, [
32 |
33 | [isChatModel, CHAT_PROMPT],
34 |
35 | ]);
36 |
37 | ```
38 |
39 |
40 | 如果该模型不是聊天模型,则返回`DEFAULT_QA_PROMPT`,如果是,则返回`CHAT_PROMPT`。
41 |
42 | 下面的示例展示了如何在加载链时使用提示选择器。:
43 |
44 | ```typescript
45 |
46 | const loadQAStuffChain = (
47 |
48 | llm: BaseLanguageModel,
49 |
50 | params: StuffQAChainParams = {}
51 |
52 | ) => {
53 |
54 | const { prompt = QA_PROMPT_SELECTOR.getPrompt(llm) } = params;
55 |
56 | const llmChain = new LLMChain({ prompt, llm });
57 |
58 | const chain = new StuffDocumentsChain({ llmChain });
59 |
60 | return chain;
61 |
62 | };
63 |
64 | ```
65 |
66 |
--------------------------------------------------------------------------------
/examples/src/memory/redis.ts:
--------------------------------------------------------------------------------
1 | import { BufferMemory } from "langchain/memory";
2 | import { RedisChatMessageHistory } from "langchain/stores/message/redis";
3 | import { ChatOpenAI } from "langchain/chat_models/openai";
4 | import { ConversationChain } from "langchain/chains";
5 |
6 | const memory = new BufferMemory({
7 | chatHistory: new RedisChatMessageHistory({
8 | sessionId: new Date().toISOString(), // Or some other unique identifier for the conversation
9 | sessionTTL: 300, // 5 minutes, omit this parameter to make sessions never expire
10 | config: {
11 | url: "redis://localhost:6379", // Default value, override with your own instance's URL
12 | },
13 | }),
14 | });
15 |
16 | const model = new ChatOpenAI({
17 | modelName: "gpt-3.5-turbo",
18 | temperature: 0,
19 | });
20 |
21 | const chain = new ConversationChain({ llm: model, memory });
22 |
23 | const res1 = await chain.call({ input: "Hi! I'm Jim." });
24 | console.log({ res1 });
25 | /*
26 | {
27 | res1: {
28 | text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
29 | }
30 | }
31 | */
32 |
33 | const res2 = await chain.call({ input: "What did I just say my name was?" });
34 | console.log({ res2 });
35 |
36 | /*
37 | {
38 | res1: {
39 | text: "You said your name was Jim."
40 | }
41 | }
42 | */
43 |
--------------------------------------------------------------------------------
/examples/src/memory/dynamodb-store.ts:
--------------------------------------------------------------------------------
1 | import { BufferMemory } from "langchain/memory";
2 | import { DynamoDBChatMessageHistory } from "langchain/stores/message/dynamodb";
3 | import { ChatOpenAI } from "langchain/chat_models/openai";
4 | import { ConversationChain } from "langchain/chains";
5 |
6 | const memory = new BufferMemory({
7 | chatHistory: new DynamoDBChatMessageHistory({
8 | tableName: "langchain",
9 | partitionKey: "id",
10 | sessionId: new Date().toISOString(), // Or some other unique identifier for the conversation
11 | config: {
12 | region: "us-east-2",
13 | credentials: {
14 | accessKeyId: "",
15 | secretAccessKey: "",
16 | },
17 | },
18 | }),
19 | });
20 |
21 | const model = new ChatOpenAI();
22 | const chain = new ConversationChain({ llm: model, memory });
23 |
24 | const res1 = await chain.call({ input: "Hi! I'm Jim." });
25 | console.log({ res1 });
26 | /*
27 | {
28 | res1: {
29 | text: "Hello Jim! It's nice to meet you. My name is AI. How may I assist you today?"
30 | }
31 | }
32 | */
33 |
34 | const res2 = await chain.call({ input: "What did I just say my name was?" });
35 | console.log({ res2 });
36 |
37 | /*
38 | {
39 | res1: {
40 | text: "You said your name was Jim."
41 | }
42 | }
43 | */
44 |
--------------------------------------------------------------------------------
/examples/.eslintrc.cjs:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | extends: [
3 | "airbnb-base",
4 | "eslint:recommended",
5 | "prettier",
6 | "plugin:@typescript-eslint/recommended",
7 | ],
8 | parserOptions: {
9 | ecmaVersion: 12,
10 | parser: "@typescript-eslint/parser",
11 | sourceType: "module",
12 | },
13 | plugins: ["@typescript-eslint"],
14 | rules: {
15 | "@typescript-eslint/explicit-module-boundary-types": 0,
16 | "@typescript-eslint/no-empty-function": 0,
17 | "@typescript-eslint/no-shadow": 0,
18 | "@typescript-eslint/no-use-before-define": ["error", "nofunc"],
19 | "@typescript-eslint/no-unused-vars": 0,
20 | camelcase: 0,
21 | "class-methods-use-this": 0,
22 | "import/extensions": 0,
23 | "import/no-extraneous-dependencies": [
24 | "error",
25 | { devDependencies: ["**/*.test.ts"] },
26 | ],
27 | "import/no-unresolved": 0,
28 | "import/prefer-default-export": 0,
29 | "keyword-spacing": "error",
30 | "max-classes-per-file": 0,
31 | "max-len": 0,
32 | "no-await-in-loop": 0,
33 | "no-bitwise": 0,
34 | "no-console": 0,
35 | "no-restricted-syntax": 0,
36 | "no-shadow": 0,
37 | "no-underscore-dangle": 0,
38 | "no-use-before-define": 0,
39 | "no-useless-constructor": 0,
40 | semi: ["error", "always"],
41 | },
42 | };
43 |
--------------------------------------------------------------------------------