├── .codespellignore ├── .dockerignore ├── .editorconfig ├── .env.full.example ├── .env.quickstart.example ├── .eslintrc.cjs ├── .github └── workflows │ ├── ci.yml │ ├── integration-tests.yml │ └── unit-tests.yml ├── .gitignore ├── .vscode └── settings.json ├── FEATURES.md ├── LICENSE ├── README.md ├── jest.config.js ├── jest.setup.cjs ├── langgraph.json ├── memory-v2 ├── Makefile ├── README.md ├── langgraph.json ├── memory_v2 │ ├── graph.py │ └── state.py ├── poetry.lock └── pyproject.toml ├── package.json ├── pyproject.toml ├── scripts ├── README.md ├── backfill.ts ├── checkLanggraphPaths.js ├── crons │ ├── create-cron.ts │ ├── delete-cron.ts │ └── list-crons.ts ├── delete-run-thread.ts ├── generate-post.ts ├── get-all-used-links.ts ├── get-scheduled-runs.ts ├── reinterrupt.ts └── repurposer │ ├── create-cron.ts │ └── ingest.ts ├── slack-messaging ├── .gitignore ├── README.md └── src │ └── langgraph_slack │ ├── __init__.py │ ├── __main__.py │ ├── auth.py │ ├── config.py │ └── server.py ├── src ├── agents │ ├── curate-data │ │ ├── constants.ts │ │ ├── index.ts │ │ ├── loaders │ │ │ ├── ai-news-blog.ts │ │ │ ├── github │ │ │ │ ├── langchain.ts │ │ │ │ └── trending.ts │ │ │ ├── latent-space.ts │ │ │ ├── reddit.ts │ │ │ ├── tests │ │ │ │ ├── ai-news-blog.int.test.ts │ │ │ │ ├── github.int.test.ts │ │ │ │ ├── latent-space.int.test.ts │ │ │ │ ├── reddit.int.test.ts │ │ │ │ └── twitter.int.test.ts │ │ │ └── twitter.ts │ │ ├── nodes │ │ │ ├── extract-ai-newsletter-content.ts │ │ │ ├── format-data.ts │ │ │ ├── generate-posts-subgraph.ts │ │ │ ├── ingest-data.ts │ │ │ ├── tweets │ │ │ │ ├── group-tweets-by-content.ts │ │ │ │ ├── prompts.ts │ │ │ │ ├── re-group-tweets.ts │ │ │ │ ├── reflect-tweet-groups.ts │ │ │ │ └── tests │ │ │ │ │ ├── group-by-content.int.test.ts │ │ │ │ │ ├── re-group-reflect.int.test.ts │ │ │ │ │ └── validate-bulk-tweets.int.test.ts │ │ │ ├── validate-bulk-tweets.ts │ │ │ ├── verify-github-wrapper.ts │ │ │ └── verify-reddit-wrapper.ts │ │ ├── state.ts │ │ ├── tests │ │ │ └── e2e.int.test.ts │ │ ├── types.ts │ │ └── utils │ │ │ ├── created-at-after.ts │ │ │ ├── get-unique-array.ts │ │ │ └── stores │ │ │ ├── github-repos.ts │ │ │ ├── latent-space-links.ts │ │ │ ├── reddit-post-ids.ts │ │ │ └── twitter.ts │ ├── curated-post-interrupt │ │ ├── index.ts │ │ └── types.ts │ ├── find-images │ │ ├── find-images-graph.ts │ │ ├── nodes │ │ │ ├── find-images.ts │ │ │ ├── re-rank-images.ts │ │ │ └── validate-images.ts │ │ └── screenshot.ts │ ├── generate-post │ │ ├── constants.ts │ │ ├── generate-post-graph.ts │ │ ├── generate-post-state.ts │ │ ├── nodes │ │ │ ├── auth-socials.ts │ │ │ ├── condense-post.ts │ │ │ ├── generate-post │ │ │ │ ├── index.ts │ │ │ │ ├── prompts.ts │ │ │ │ └── utils.ts │ │ │ ├── generate-report │ │ │ │ ├── index.ts │ │ │ │ └── prompts.ts │ │ │ └── rewrite-with-split-url.ts │ │ └── prompts │ │ │ ├── examples.ts │ │ │ ├── index.ts │ │ │ └── prompts.langchain.ts │ ├── generate-report │ │ ├── index.ts │ │ ├── nodes │ │ │ ├── extract-key-details.ts │ │ │ └── generate-report.ts │ │ ├── prompts.ts │ │ ├── state.ts │ │ └── utils.ts │ ├── generate-thread │ │ ├── index.ts │ │ ├── nodes │ │ │ ├── generate-thread-plan.ts │ │ │ ├── generate-thread-posts.ts │ │ │ ├── human-node │ │ │ │ └── index.ts │ │ │ ├── rewrite-thread.ts │ │ │ └── schedule-thread.ts │ │ ├── state.ts │ │ ├── types.ts │ │ └── utils.ts │ ├── ingest-data │ │ ├── ingest-data-graph.ts │ │ ├── ingest-data-state.ts │ │ └── nodes │ │ │ ├── ingest-slack.ts │ │ │ └── ingest-twitter.ts │ ├── ingest-repurposed-data │ │ ├── constants.ts │ │ ├── index.ts │ │ ├── nodes │ │ │ ├── extract.ts │ │ │ └── ingest-slack.ts │ │ └── types.ts │ ├── reflection │ │ ├── index.ts │ │ └── prompts.ts │ ├── repurposer-post-interrupt │ │ ├── index.ts │ │ ├── nodes │ │ │ ├── human-node │ │ │ │ ├── index.ts │ │ │ │ ├── router.ts │ │ │ │ └── utils.ts │ │ │ └── rewrite-posts.ts │ │ └── types.ts │ ├── repurposer │ │ ├── index.ts │ │ ├── nodes │ │ │ ├── extract-content │ │ │ │ ├── get-url-contents.ts │ │ │ │ └── index.ts │ │ │ ├── generate-campaign-plan.ts │ │ │ ├── generate-posts.ts │ │ │ ├── start-interrupt-graph.ts │ │ │ └── validate-images.ts │ │ ├── tests │ │ │ ├── graph.int.test.ts │ │ │ └── images.test.ts │ │ ├── types.ts │ │ └── utils.ts │ ├── shared │ │ ├── auth │ │ │ ├── linkedin.ts │ │ │ └── twitter.ts │ │ ├── nodes │ │ │ ├── generate-post │ │ │ │ ├── human-node.ts │ │ │ │ ├── rewrite-post.ts │ │ │ │ ├── schedule-post.ts │ │ │ │ └── types.ts │ │ │ ├── route-response.ts │ │ │ ├── update-scheduled-date.ts │ │ │ ├── verify-content.ts │ │ │ ├── verify-general.ts │ │ │ ├── verify-github.ts │ │ │ ├── verify-luma.ts │ │ │ ├── verify-youtube.ts │ │ │ └── youtube.utils.ts │ │ ├── shared-state.ts │ │ ├── stores │ │ │ └── post-subject-urls.ts │ │ └── youtube │ │ │ └── video-summary.ts │ ├── should-exclude.ts │ ├── supervisor │ │ ├── nodes │ │ │ ├── determine-post-type.ts │ │ │ ├── generate-posts.ts │ │ │ └── group-reports.ts │ │ ├── supervisor-graph.ts │ │ ├── supervisor-state.ts │ │ ├── tests │ │ │ └── e2e.int.test.ts │ │ └── types.ts │ ├── types.ts │ ├── upload-post │ │ └── index.ts │ ├── utils.ts │ ├── verify-links │ │ ├── verify-links-graph.ts │ │ └── verify-links-state.ts │ ├── verify-reddit-post │ │ ├── nodes │ │ │ ├── get-external-urls.ts │ │ │ ├── get-post.ts │ │ │ └── validate-reddit-post.ts │ │ ├── tests │ │ │ ├── data │ │ │ │ └── inputs-outputs.ts │ │ │ └── e2e.int.test.ts │ │ ├── types.ts │ │ ├── utils.ts │ │ ├── verify-reddit-post-graph.ts │ │ └── verify-reddit-post-state.ts │ └── verify-tweet │ │ ├── nodes │ │ ├── get-tweet-content.ts │ │ ├── tests │ │ │ └── get-tweet-content.int.test.ts │ │ └── validate-tweet.ts │ │ ├── verify-tweet-graph.ts │ │ └── verify-tweet-state.ts ├── clients │ ├── auth-server.ts │ ├── linkedin.ts │ ├── reddit │ │ ├── client.ts │ │ ├── get-user-less-token.ts │ │ ├── snoowrap.ts │ │ ├── tests │ │ │ └── reddit.int.test.ts │ │ └── types.ts │ ├── slack │ │ ├── client.ts │ │ ├── types.ts │ │ └── utils.ts │ ├── twitter │ │ ├── SETUP.md │ │ ├── client.ts │ │ ├── tests │ │ │ ├── arcade.int.test.ts │ │ │ └── twitter.int.test.ts │ │ ├── types.ts │ │ └── utils.ts │ └── types.ts ├── evals │ ├── e2e │ │ ├── e2e.int.test.ts │ │ └── inputs.ts │ ├── general │ │ └── index.ts │ ├── github │ │ └── index.ts │ ├── twitter │ │ └── index.ts │ ├── validate-images │ │ ├── inputs.ts │ │ └── validate-images.int.test.ts │ └── youtube │ │ └── index.ts ├── tests │ ├── agent.test.ts │ ├── data │ │ ├── langchain_logo.png │ │ └── langchain_logo_2.png │ ├── expected.ts │ ├── github.int.test.ts │ ├── graph.int.test.ts │ ├── linkedin.int.test.ts │ ├── scrape-general-content.int.test.ts │ ├── slack.int.test.ts │ ├── states.ts │ ├── utils.int.test.ts │ └── youtube.int.test.ts └── utils │ ├── create-dir.ts │ ├── date.ts │ ├── delay-run.ts │ ├── firecrawl.ts │ ├── github-repo-contents.ts │ ├── image-message.ts │ ├── reflections.ts │ ├── schedule-date │ ├── constants.ts │ ├── helpers.ts │ ├── index.ts │ ├── tests │ │ └── schedule-date.test.ts │ └── types.ts │ ├── screenshot.ts │ └── supabase.ts ├── static ├── agent_flow.png └── graph_screenshot.png ├── tsconfig.json ├── uv.lock └── yarn.lock /.codespellignore: -------------------------------------------------------------------------------- 1 | IST 2 | afterAll -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | dist -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | 7 | [*.{js,json,yml}] 8 | charset = utf-8 9 | indent_style = space 10 | indent_size = 2 11 | -------------------------------------------------------------------------------- /.env.quickstart.example: -------------------------------------------------------------------------------- 1 | # For LangSmith tracing 2 | LANGCHAIN_API_KEY= 3 | LANGCHAIN_TRACING_V2=true 4 | 5 | # For LLM generations 6 | ANTHROPIC_API_KEY= 7 | 8 | # For web scraping 9 | FIRECRAWL_API_KEY= 10 | 11 | # Arcade API key - used for fetching Tweets, and scheduling LinkedIn/Twitter posts 12 | ARCADE_API_KEY= 13 | # Setting this to false will not use Arcade for reading, or posting Tweets 14 | USE_ARCADE_AUTH="true" 15 | # Set this to true if your LinkedIn account is tied to a company page, and you want 16 | # to post from the company page 17 | POST_TO_LINKEDIN_ORGANIZATION="false" 18 | 19 | # The Twitter/LinkedIn user ID/email/username of the account you want to use to post from 20 | # This field can be passed via configurable fields (`twitterUserId`, `linkedinUserId`), 21 | # _or_ set as an environment variable here. 22 | TWITTER_USER_ID= 23 | LINKEDIN_USER_ID= 24 | 25 | # Whether or not to skip the verification step. If "true", all links will be assumed to be valid. 26 | SKIP_CONTENT_RELEVANCY_CHECK="false" 27 | 28 | # Whether or not to skip the used URLs check. If "true", the graph will not prevent duplicate links from being used, or save those links for future checks. 29 | SKIP_USED_URLS_CHECK="false" 30 | -------------------------------------------------------------------------------- /.eslintrc.cjs: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | extends: [ 3 | "eslint:recommended", 4 | "prettier", 5 | "plugin:@typescript-eslint/recommended", 6 | ], 7 | parserOptions: { 8 | ecmaVersion: 12, 9 | parser: "@typescript-eslint/parser", 10 | project: "./tsconfig.json", 11 | sourceType: "module", 12 | }, 13 | plugins: ["import", "@typescript-eslint", "no-instanceof"], 14 | ignorePatterns: [ 15 | ".eslintrc.cjs", 16 | "scripts", 17 | "src/utils/lodash/*", 18 | "node_modules", 19 | "dist", 20 | "dist-cjs", 21 | "*.js", 22 | "*.cjs", 23 | "*.d.ts", 24 | ], 25 | rules: { 26 | "@typescript-eslint/explicit-module-boundary-types": 0, 27 | "@typescript-eslint/no-empty-function": 0, 28 | "@typescript-eslint/no-shadow": 0, 29 | "@typescript-eslint/no-empty-interface": 0, 30 | "@typescript-eslint/no-use-before-define": ["error", "nofunc"], 31 | "@typescript-eslint/no-unused-vars": [ 32 | "error", 33 | { 34 | argsIgnorePattern: "^_", 35 | varsIgnorePattern: "^_|^UNUSED_", 36 | caughtErrorsIgnorePattern: "^_", 37 | destructuredArrayIgnorePattern: "^_", 38 | }, 39 | ], 40 | "@typescript-eslint/no-floating-promises": "error", 41 | "@typescript-eslint/no-misused-promises": "error", 42 | "@typescript-eslint/await-thenable": "error", 43 | "@typescript-eslint/no-explicit-any": 0, 44 | camelcase: 0, 45 | "class-methods-use-this": 0, 46 | "import/extensions": [2, "ignorePackages"], 47 | "import/no-extraneous-dependencies": [ 48 | "error", 49 | { devDependencies: ["**/*.test.ts"] }, 50 | ], 51 | "import/no-unresolved": 0, 52 | "import/prefer-default-export": 0, 53 | "keyword-spacing": "error", 54 | "max-classes-per-file": 0, 55 | "max-len": 0, 56 | "no-await-in-loop": 0, 57 | "no-bitwise": 0, 58 | "no-console": 0, 59 | "no-restricted-syntax": 0, 60 | "no-shadow": 0, 61 | "no-continue": 0, 62 | "no-underscore-dangle": 0, 63 | "no-use-before-define": 0, 64 | "no-useless-constructor": 0, 65 | "no-return-await": 0, 66 | "consistent-return": 0, 67 | "no-else-return": 0, 68 | "new-cap": ["error", { properties: false, capIsNew: false }], 69 | }, 70 | }; 71 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | # Run formatting on all PRs 2 | 3 | name: CI 4 | 5 | on: 6 | push: 7 | branches: ["main"] 8 | pull_request: 9 | workflow_dispatch: # Allows triggering the workflow manually in GitHub UI 10 | 11 | # If another push to the same PR or branch happens while this workflow is still running, 12 | # cancel the earlier run in favor of the next run. 13 | # 14 | # There's no point in testing an outdated version of the code. GitHub only allows 15 | # a limited number of job runners to be active at the same time, so it's better to cancel 16 | # pointless jobs early so that more useful jobs can run sooner. 17 | concurrency: 18 | group: ${{ github.workflow }}-${{ github.ref }} 19 | cancel-in-progress: true 20 | 21 | jobs: 22 | format: 23 | name: Check formatting 24 | runs-on: ubuntu-latest 25 | steps: 26 | - uses: actions/checkout@v4 27 | - name: Use Node.js 18.x 28 | uses: actions/setup-node@v3 29 | with: 30 | node-version: 18.x 31 | cache: "yarn" 32 | - name: Install dependencies 33 | run: yarn install --immutable --mode=skip-build 34 | - name: Check formatting 35 | run: yarn format:check 36 | 37 | lint: 38 | name: Check linting 39 | runs-on: ubuntu-latest 40 | steps: 41 | - uses: actions/checkout@v4 42 | - name: Use Node.js 18.x 43 | uses: actions/setup-node@v3 44 | with: 45 | node-version: 18.x 46 | cache: "yarn" 47 | - name: Install dependencies 48 | run: yarn install --immutable --mode=skip-build 49 | - name: Check linting 50 | run: yarn run lint:all 51 | 52 | readme-spelling: 53 | name: Check README spelling 54 | runs-on: ubuntu-latest 55 | steps: 56 | - uses: actions/checkout@v4 57 | - uses: codespell-project/actions-codespell@v2 58 | with: 59 | ignore_words_file: .codespellignore 60 | path: README.md 61 | 62 | check-spelling: 63 | name: Check code spelling 64 | runs-on: ubuntu-latest 65 | steps: 66 | - uses: actions/checkout@v4 67 | - uses: codespell-project/actions-codespell@v2 68 | with: 69 | ignore_words_file: .codespellignore 70 | path: src 71 | -------------------------------------------------------------------------------- /.github/workflows/integration-tests.yml: -------------------------------------------------------------------------------- 1 | # This workflow will run integration tests for the current project once per day 2 | 3 | name: Integration Tests 4 | 5 | on: 6 | schedule: 7 | - cron: "37 14 * * *" # Run at 7:37 AM Pacific Time (14:37 UTC) every day 8 | workflow_dispatch: # Allows triggering the workflow manually in GitHub UI 9 | 10 | # If another scheduled run starts while this workflow is still running, 11 | # cancel the earlier run in favor of the next run. 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.ref }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | integration-tests: 18 | name: Integration Tests 19 | strategy: 20 | matrix: 21 | os: [ubuntu-latest] 22 | node-version: [18.x, 20.x] 23 | runs-on: ${{ matrix.os }} 24 | steps: 25 | - uses: actions/checkout@v4 26 | 27 | - name: Use Node.js ${{ matrix.node-version }} 28 | uses: actions/setup-node@v3 29 | with: 30 | node-version: ${{ matrix.node-version }} 31 | cache: "yarn" 32 | 33 | - name: Install dependencies 34 | run: yarn install --immutable 35 | 36 | - name: Build project 37 | run: yarn build 38 | 39 | - name: Run integration tests 40 | run: yarn test:int 41 | -------------------------------------------------------------------------------- /.github/workflows/unit-tests.yml: -------------------------------------------------------------------------------- 1 | # This workflow will run unit tests for the current project 2 | 3 | name: Unit Tests 4 | 5 | on: 6 | push: 7 | branches: ["main"] 8 | pull_request: 9 | workflow_dispatch: # Allows triggering the workflow manually in GitHub UI 10 | 11 | # If another push to the same PR or branch happens while this workflow is still running, 12 | # cancel the earlier run in favor of the next run. 13 | concurrency: 14 | group: ${{ github.workflow }}-${{ github.ref }} 15 | cancel-in-progress: true 16 | 17 | jobs: 18 | unit-tests: 19 | name: Unit Tests 20 | strategy: 21 | matrix: 22 | os: [ubuntu-latest] 23 | node-version: [18.x, 20.x] 24 | runs-on: ${{ matrix.os }} 25 | steps: 26 | - uses: actions/checkout@v4 27 | 28 | - name: Use Node.js ${{ matrix.node-version }} 29 | uses: actions/setup-node@v3 30 | with: 31 | node-version: ${{ matrix.node-version }} 32 | cache: "yarn" 33 | 34 | - name: Install dependencies 35 | run: yarn install --immutable 36 | 37 | - name: Build project 38 | run: yarn build 39 | 40 | - name: Run tests 41 | run: yarn test 42 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | index.cjs 2 | index.js 3 | index.d.ts 4 | node_modules 5 | dist 6 | .yarn/* 7 | !.yarn/patches 8 | !.yarn/plugins 9 | !.yarn/releases 10 | !.yarn/sdks 11 | !.yarn/versions 12 | yarn-error.log 13 | 14 | .turbo 15 | **/.turbo 16 | **/.eslintcache 17 | 18 | .env 19 | .env.* 20 | !.env.example 21 | !.env.*.example 22 | 23 | .ipynb_checkpoints 24 | 25 | # Screenshots taken during tests 26 | src/tests/data/screenshots/ 27 | src/agents/verify-reddit-post/nodes/tests/data/ 28 | src/agents/verify-reddit-post/nodes/tests/data/openai_o1_vs_recent_leetcode_questions.json 29 | src/agents/curate-data/nodes/tweets/tests/data/* 30 | src/clients/reddit/.secrets/ 31 | 32 | # LangGraph API 33 | .langgraph_api 34 | 35 | __pycache__/ 36 | .mypy_cache/ 37 | .ruff_cache/ -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "cSpell.words": [ 3 | "agentinbox", 4 | "checkpointer", 5 | "LANGSMITH", 6 | "Luma", 7 | "Repurposer", 8 | "reranked", 9 | "subreddits", 10 | "Supabase", 11 | "Userless" 12 | ], 13 | "python.languageServer": "None" 14 | } 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 LangChain 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | export default { 2 | preset: "ts-jest/presets/default-esm", 3 | moduleNameMapper: { 4 | "^(\\.{1,2}/.*)\\.js$": "$1", 5 | }, 6 | transform: { 7 | "^.+\\.tsx?$": [ 8 | "ts-jest", 9 | { 10 | useESM: true, 11 | }, 12 | ], 13 | }, 14 | extensionsToTreatAsEsm: [".ts"], 15 | setupFiles: ["dotenv/config", "./jest.setup.cjs"], 16 | passWithNoTests: true, 17 | testTimeout: 20_000, 18 | }; 19 | -------------------------------------------------------------------------------- /jest.setup.cjs: -------------------------------------------------------------------------------- 1 | // const timezoneMock = require("timezone-mock"); 2 | require("dotenv").config(); 3 | 4 | // Mock the timezone to 'America/Los_Angeles' 5 | // timezoneMock.register("US/Pacific"); // Alternatively, use 'America/Los_Angeles' 6 | 7 | // Optional: Log the current timezone to verify 8 | // console.log( 9 | // "Current Timezone:", 10 | // Intl.DateTimeFormat().resolvedOptions().timeZone, 11 | // ); 12 | 13 | // If you have any global configurations or mocks, add them here 14 | // For example, you can set up global variables, mock APIs, etc. 15 | -------------------------------------------------------------------------------- /langgraph.json: -------------------------------------------------------------------------------- 1 | { 2 | "node_version": "20", 3 | "graphs": { 4 | "ingest_data": "./src/agents/ingest-data/ingest-data-graph.ts:graph", 5 | "generate_post": "./src/agents/generate-post/generate-post-graph.ts:generatePostGraph", 6 | "upload_post": "./src/agents/upload-post/index.ts:uploadPostGraph", 7 | "reflection": "./src/agents/reflection/index.ts:reflectionGraph", 8 | "generate_thread": "./src/agents/generate-thread/index.ts:generateThreadGraph", 9 | "curate_data": "./src/agents/curate-data/index.ts:curateDataGraph", 10 | "verify_reddit_post": "./src/agents/verify-reddit-post/verify-reddit-post-graph.ts:verifyRedditPostGraph", 11 | "verify_tweet": "./src/agents/verify-tweet/verify-tweet-graph.ts:verifyTweetGraph", 12 | "supervisor": "./src/agents/supervisor/supervisor-graph.ts:supervisorGraph", 13 | "generate_report": "./src/agents/generate-report/index.ts:generateReportGraph", 14 | "repurposer": "./src/agents/repurposer/index.ts:repurposerGraph", 15 | "curated_post_interrupt": "./src/agents/curated-post-interrupt/index.ts:curatedPostInterruptGraph", 16 | "ingest_repurposed_data": "./src/agents/ingest-repurposed-data/index.ts:graph", 17 | "repurposer_post_interrupt": "./src/agents/repurposer-post-interrupt/index.ts:repurposerPostInterruptGraph" 18 | }, 19 | "env": ".env", 20 | "dependencies": ["."], 21 | "dockerfile_lines": ["RUN npx -y playwright@1.49.1 install --with-deps"] 22 | } 23 | -------------------------------------------------------------------------------- /memory-v2/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all format lint test tests test_watch integration_tests docker_tests help extended_tests 2 | 3 | # Default target executed when no arguments are given to make. 4 | all: help 5 | 6 | # Define a variable for the test file path. 7 | TEST_FILE ?= tests/unit_tests/ 8 | 9 | test: 10 | python -m pytest $(TEST_FILE) 11 | 12 | integration_tests: 13 | python -m pytest tests/integration_tests 14 | 15 | test_watch: 16 | python -m ptw --snapshot-update --now . -- -vv tests/unit_tests 17 | 18 | test_profile: 19 | python -m pytest -vv tests/unit_tests/ --profile-svg 20 | 21 | extended_tests: 22 | python -m pytest --only-extended $(TEST_FILE) 23 | 24 | 25 | ###################### 26 | # LINTING AND FORMATTING 27 | ###################### 28 | 29 | # Define a variable for Python and notebook files. 30 | PYTHON_FILES=src/ 31 | MYPY_CACHE=.mypy_cache 32 | lint format: PYTHON_FILES=. 33 | lint_diff format_diff: PYTHON_FILES=$(shell git diff --name-only --diff-filter=d main | grep -E '\.py$$|\.ipynb$$') 34 | lint_package: PYTHON_FILES=src 35 | lint_tests: PYTHON_FILES=tests 36 | lint_tests: MYPY_CACHE=.mypy_cache_test 37 | 38 | lint lint_diff lint_package lint_tests: 39 | python -m ruff check . 40 | [ "$(PYTHON_FILES)" = "" ] || python -m ruff format $(PYTHON_FILES) --diff 41 | [ "$(PYTHON_FILES)" = "" ] || python -m ruff check --select I $(PYTHON_FILES) 42 | [ "$(PYTHON_FILES)" = "" ] || python -m mypy --strict $(PYTHON_FILES) 43 | [ "$(PYTHON_FILES)" = "" ] || mkdir -p $(MYPY_CACHE) && python -m mypy --strict $(PYTHON_FILES) --cache-dir $(MYPY_CACHE) 44 | 45 | format format_diff: 46 | ruff format $(PYTHON_FILES) 47 | ruff check --select I --fix $(PYTHON_FILES) 48 | 49 | spell_check: 50 | codespell --toml pyproject.toml 51 | 52 | spell_fix: 53 | codespell --toml pyproject.toml -w 54 | 55 | ###################### 56 | # HELP 57 | ###################### 58 | 59 | help: 60 | @echo '----' 61 | @echo 'format - run code formatters' 62 | @echo 'lint - run linters' 63 | @echo 'test - run unit tests' 64 | @echo 'tests - run unit tests' 65 | @echo 'test TEST_FILE= - run all tests in file' 66 | @echo 'test_watch - run unit tests in watch mode' 67 | -------------------------------------------------------------------------------- /memory-v2/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langchain-ai/social-media-agent/5bc28b136b1d28ef16d4a63f6b6dde1b719754d1/memory-v2/README.md -------------------------------------------------------------------------------- /memory-v2/langgraph.json: -------------------------------------------------------------------------------- 1 | { 2 | "graphs": { 3 | "reflection_v2": "./memory_v2/graph.py:graph" 4 | }, 5 | "env": "../.env", 6 | "dependencies": ["."] 7 | } 8 | -------------------------------------------------------------------------------- /memory-v2/memory_v2/graph.py: -------------------------------------------------------------------------------- 1 | """The reflection graph.""" 2 | 3 | from typing import Any, Dict 4 | 5 | from langchain_anthropic import ChatAnthropic 6 | from langgraph.graph import StateGraph 7 | from langgraph.store.base import BaseStore 8 | from langmem.prompts.looping import ( 9 | Prompt, 10 | create_prompt_optimizer, 11 | ) 12 | 13 | from memory_v2.state import State 14 | 15 | 16 | REFLECTIONS_NAMESPACE = ("reflection_rules",) 17 | REFLECTIONS_KEY = "rules" 18 | PROMPT_KEY = "prompt" 19 | 20 | 21 | async def aget_reflections(store: BaseStore) -> str: 22 | """Get reflections from the store.""" 23 | reflections = await store.aget(REFLECTIONS_NAMESPACE, REFLECTIONS_KEY) 24 | 25 | if not reflections: 26 | return "No prompt rules have been created yet." 27 | 28 | ruleset = reflections.value.get( 29 | PROMPT_KEY, "No prompt rules have been created yet." 30 | ) 31 | 32 | return ruleset 33 | 34 | 35 | async def aput_reflections(store: BaseStore, reflections: str) -> None: 36 | """Put reflections in the store.""" 37 | await store.aput(REFLECTIONS_NAMESPACE, REFLECTIONS_KEY, {PROMPT_KEY: reflections}) 38 | 39 | 40 | async def reflection(state: State, store: BaseStore) -> Dict[str, Any]: 41 | """Process reflection and update rules based on user interaction.""" 42 | model = ChatAnthropic(model="claude-3-5-sonnet-latest", temperature=0) 43 | 44 | current_reflections_prompt = await aget_reflections(store) 45 | 46 | update_instructions = """Analyze the following to determine if rules prompt updates are needed: 47 | 1. Current rules prompt (current_prompt) 48 | 2. Generated social media post (session) 49 | 3. User feedback on the post (feedback) 50 | 51 | If the user's feedback explicitly requests changes: 52 | 1. Create or update rules that directly address the feedback 53 | 2. Keep each rule clear, specific, and concise 54 | 3. If a new rule conflicts with an existing one, use the new rule 55 | 4. Only add rules that are explicitly mentioned in the user's feedback 56 | 57 | Guidelines for updates: 58 | - Do not infer or assume rules beyond what's explicitly stated 59 | - Do not add rules based on implicit feedback 60 | - Do not overgeneralize the feedback 61 | - Combine existing rules if it improves clarity without losing specificity 62 | 63 | Output only the updated rules prompt, with no additional context or instructions.""" 64 | 65 | feedback = state.user_response 66 | 67 | prompt = Prompt( 68 | name="Update Prompt", 69 | prompt=current_reflections_prompt, 70 | update_instructions=update_instructions, 71 | feedback=feedback, 72 | ) 73 | 74 | sessions = state.original_post 75 | 76 | optimizer = create_prompt_optimizer(model, kind="metaprompt") 77 | 78 | result = await optimizer(sessions, prompt) 79 | 80 | await aput_reflections(store, result) 81 | 82 | return {} 83 | 84 | 85 | # Define a new graph 86 | workflow = StateGraph(State) 87 | workflow.add_node("reflection", reflection) 88 | workflow.add_edge("__start__", "reflection") 89 | 90 | graph = workflow.compile() 91 | graph.name = "Reflection Graph" 92 | -------------------------------------------------------------------------------- /memory-v2/memory_v2/state.py: -------------------------------------------------------------------------------- 1 | """Define the state structures for the agent.""" 2 | 3 | from __future__ import annotations 4 | 5 | from dataclasses import dataclass 6 | 7 | 8 | @dataclass 9 | class State: 10 | """The state of the memory graph.""" 11 | 12 | original_post: str = "" 13 | """The original post that the user submitted feedback on""" 14 | user_response: str = "" 15 | """The user's feedback on the new post""" 16 | -------------------------------------------------------------------------------- /memory-v2/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "memory-v2" 3 | version = "0.0.1" 4 | description = "Memory graph for the Social Media Agent." 5 | authors = ["Brace Sproul"] 6 | readme = "README.md" 7 | license = "MIT" 8 | packages = [{include = "memory_v2"}] 9 | 10 | [tool.poetry.dependencies] 11 | python = ">=3.11,<4.0.0" 12 | langmem = ">=0.0.5rc5" 13 | langgraph = ">=0.2.66" 14 | langchain-anthropic = ">=0.3.3" 15 | langgraph-sdk = ">=0.1.51" 16 | python-dotenv = ">=1.0.1" 17 | 18 | [tool.poetry.group.dev.dependencies] 19 | mypy = ">=1.11.1" 20 | ruff = ">=0.6.1" 21 | 22 | [tool.ruff] 23 | lint.select = [ 24 | "E", # pycodestyle 25 | "F", # pyflakes 26 | "I", # isort 27 | "D", # pydocstyle 28 | "D401", # First line should be in imperative mood 29 | "T201", 30 | "UP", 31 | ] 32 | lint.ignore = [ 33 | "UP006", 34 | "UP007", 35 | # We actually do want to import from typing_extensions 36 | "UP035", 37 | # Relax the convention by _not_ requiring documentation for every function parameter. 38 | "D417", 39 | "E501", 40 | ] 41 | [tool.ruff.lint.per-file-ignores] 42 | "tests/*" = ["D", "UP"] 43 | [tool.ruff.lint.pydocstyle] 44 | convention = "google" -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "langgraph-slack" 3 | version = "0.0.1" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.11" 7 | dependencies = [ 8 | "fastapi>=0.115.6", 9 | "langchain>=0.3.20", 10 | "langchain-openai>=0.3.7", 11 | "langgraph-sdk>=0.1.48", 12 | "langmem>=0.0.15", 13 | "python-dotenv>=1.0.1", 14 | "slack-bolt>=1.22.0", 15 | "uvicorn>=0.34.0", 16 | "langgraph-prebuilt>=0.1.2", 17 | "aiohttp>=3.11.13", 18 | ] 19 | 20 | [project.packages] 21 | find = { where = ["src"] } 22 | 23 | [build-system] 24 | requires = ["hatchling"] 25 | build-backend = "hatchling.build" 26 | 27 | 28 | [dependency-groups] 29 | dev = [ 30 | "ruff>=0.8.4", 31 | "langgraph-api>=0.0.28", 32 | "langgraph-cli>=0.1.75", 33 | ] 34 | -------------------------------------------------------------------------------- /scripts/README.md: -------------------------------------------------------------------------------- 1 | # Social Media Agent Scripts 2 | 3 | ## Setup 4 | 5 | First ensure you have all dependencies installed: 6 | 7 | ```bash 8 | yarn install 9 | ``` 10 | 11 | And your `LANGCHAIN_API_KEY`, `LANGGRAPH_API_URL` environment variables set: 12 | 13 | ```bash 14 | LANGCHAIN_API_KEY=... 15 | LANGGRAPH_API_URL=... 16 | ``` 17 | 18 | Some scripts will send output to Slack. If you want this output to post to Slack, ensure you have the `SLACK_BOT_OAUTH_TOKEN` and `SLACK_CHANNEL_ID` environment variables set: 19 | 20 | ```bash 21 | SLACK_BOT_OAUTH_TOKEN=... 22 | SLACK_CHANNEL_ID=... 23 | ``` 24 | 25 | If you don't want to post to Slack, the script will print the output to the console. 26 | 27 | ## Scripts 28 | 29 | ### Get Scheduled Runs 30 | 31 | This script will fetch all scheduled runs and either send them to Slack or print them to the console. 32 | 33 | ```bash 34 | yarn get:scheduled_runs 35 | ``` 36 | 37 | ### Get all used links 38 | 39 | This script will fetch and log all links which are currently scheduled, or interrupted and awaiting human intervention. 40 | 41 | ```bash 42 | yarn get:used_links 43 | ``` 44 | 45 | ### Generate Demo Post 46 | 47 | This script will invoke the graph to generate a post. It defaults to a LangChain blog post, and typically used to demonstrate how the Social Media Agent works. 48 | 49 | ```bash 50 | yarn generate_post 51 | ``` 52 | 53 | ### Delete Run(s) & Thread(s) 54 | 55 | This script will delete runs and associated threads. It requires setting the run ID(s) and thread ID(s) in the script. 56 | 57 | ```bash 58 | yarn graph:delete:run_thread 59 | ``` 60 | 61 | ### Backfill 62 | 63 | This script will backfill your deployment with links. It contains two functions, one for backfilling from Slack, and one for backfilling from a list of links. You'll need to uncomment one/both of the functions to use them. 64 | 65 | ```bash 66 | yarn graph:backfill 67 | ``` 68 | 69 | ### Create Cron 70 | 71 | This script will create a cron job to run the `ingest_data` graph. 72 | 73 | ```bash 74 | yarn cron:create 75 | ``` 76 | 77 | ### Delete Cron 78 | 79 | This script will delete a cron job. 80 | 81 | ```bash 82 | yarn cron:delete 83 | ``` 84 | 85 | ### List Crons 86 | 87 | This script will list all cron jobs. 88 | 89 | ```bash 90 | yarn cron:list 91 | ``` 92 | -------------------------------------------------------------------------------- /scripts/backfill.ts: -------------------------------------------------------------------------------- 1 | import "dotenv/config"; 2 | import { Client } from "@langchain/langgraph-sdk"; 3 | import { 4 | SKIP_CONTENT_RELEVANCY_CHECK, 5 | SKIP_USED_URLS_CHECK, 6 | } from "../src/agents/generate-post/constants.js"; 7 | 8 | /** 9 | * Performs a manual data backfill operation using LangGraph. 10 | * 11 | * This function creates a new thread and initiates a data ingestion run 12 | * to backfill historical data. It's useful for one-time data imports 13 | * or catching up on missed data ingestion periods. 14 | * 15 | * The default configuration looks back 7 days, but this can be adjusted 16 | * via the `maxDaysHistory` parameter in the config. 17 | * 18 | * @async 19 | * @returns {Promise} A promise that resolves when the backfill run is created 20 | * @throws {Error} If there's an issue creating the thread or initiating the run 21 | * 22 | * @example 23 | * ```bash 24 | * yarn graph:backfill 25 | * ``` 26 | */ 27 | export async function backfill() { 28 | const client = new Client({ 29 | apiUrl: process.env.LANGGRAPH_API_URL, 30 | }); 31 | 32 | const thread = await client.threads.create(); 33 | const res = await client.runs.create(thread.thread_id, "ingest_data", { 34 | config: { 35 | configurable: { 36 | slackChannelId: "ADD_SLACK_CHANNEL_ID_HERE", 37 | maxDaysHistory: 50, // Or change to desired number of days 38 | [SKIP_CONTENT_RELEVANCY_CHECK]: true, 39 | [SKIP_USED_URLS_CHECK]: true, 40 | }, 41 | }, 42 | input: {}, 43 | }); 44 | console.log("Created run"); 45 | console.log(res); 46 | } 47 | 48 | // backfill().catch(console.error); 49 | 50 | /** 51 | * Backfill with links instead of ingesting from Slack. 52 | */ 53 | export async function backfillWithLinks() { 54 | const client = new Client({ 55 | apiUrl: process.env.LANGGRAPH_API_URL, 56 | }); 57 | 58 | const newLinksArr: string[] = [ 59 | // Add your new links here 60 | ]; 61 | 62 | const { thread_id } = await client.threads.create(); 63 | await client.runs.create(thread_id, "ingest_data", { 64 | input: { 65 | links: newLinksArr, 66 | }, 67 | config: { 68 | configurable: { 69 | skipIngest: true, 70 | }, 71 | }, 72 | }); 73 | } 74 | 75 | // backfillWithLinks().catch(console.error); 76 | -------------------------------------------------------------------------------- /scripts/checkLanggraphPaths.js: -------------------------------------------------------------------------------- 1 | import fs from "fs"; 2 | import path from "path"; 3 | import { fileURLToPath } from "url"; 4 | 5 | // Function to check if a file exists 6 | function fileExists(filePath) { 7 | return fs.existsSync(filePath); 8 | } 9 | 10 | // Function to check if an object is exported from a file 11 | function isObjectExported(filePath, objectName) { 12 | if (filePath.endsWith(".py")) return true; 13 | 14 | try { 15 | const fileContent = fs.readFileSync(filePath, "utf8"); 16 | const exportRegex = new RegExp( 17 | `export\\s+(?:const|let|var)\\s+${objectName}\\s*=|export\\s+\\{[^}]*\\b${objectName}\\b[^}]*\\}`, 18 | ); 19 | return exportRegex.test(fileContent); 20 | } catch (error) { 21 | console.error(`Error reading file ${filePath}: ${error.message}`); 22 | return false; 23 | } 24 | } 25 | 26 | // Main function to check langgraph.json 27 | function checkLanggraphPaths() { 28 | const __filename = fileURLToPath(import.meta.url); 29 | const __dirname = path.dirname(__filename); 30 | const langgraphPath = path.join(__dirname, "..", "langgraph.json"); 31 | 32 | if (!fileExists(langgraphPath)) { 33 | console.error("langgraph.json not found in the root directory"); 34 | process.exit(1); 35 | } 36 | 37 | try { 38 | const langgraphContent = JSON.parse(fs.readFileSync(langgraphPath, "utf8")); 39 | const graphs = langgraphContent.graphs; 40 | 41 | if (!graphs || typeof graphs !== "object") { 42 | console.error('Invalid or missing "graphs" object in langgraph.json'); 43 | process.exit(1); 44 | } 45 | 46 | let hasError = false; 47 | 48 | for (const [key, value] of Object.entries(graphs)) { 49 | const [filePath, objectName] = value.split(":"); 50 | const fullPath = path.join(__dirname, "..", filePath); 51 | 52 | if (!fileExists(fullPath)) { 53 | console.error(`File not found: ${fullPath}`); 54 | hasError = true; 55 | continue; 56 | } 57 | 58 | if (!isObjectExported(fullPath, objectName)) { 59 | console.error( 60 | `Object "${objectName}" is not exported from ${fullPath}`, 61 | ); 62 | hasError = true; 63 | } 64 | } 65 | 66 | if (hasError) { 67 | process.exit(1); 68 | } else { 69 | console.log( 70 | "All paths in langgraph.json are valid and objects are exported correctly.", 71 | ); 72 | } 73 | } catch (error) { 74 | console.error(`Error parsing langgraph.json: ${error.message}`); 75 | process.exit(1); 76 | } 77 | } 78 | 79 | checkLanggraphPaths(); 80 | -------------------------------------------------------------------------------- /scripts/crons/create-cron.ts: -------------------------------------------------------------------------------- 1 | import "dotenv/config"; 2 | import { Client } from "@langchain/langgraph-sdk"; 3 | import { 4 | SKIP_CONTENT_RELEVANCY_CHECK, 5 | SKIP_USED_URLS_CHECK, 6 | } from "../../src/agents/generate-post/constants.js"; 7 | 8 | /** 9 | * Creates a new cron job in LangGraph for data ingestion. 10 | * 11 | * This function sets up a daily cron job that runs at midnight (00:00) to ingest data. 12 | * It uses the LangGraph Client to create a new cron job with specified configuration 13 | * and then retrieves a list of all existing cron jobs. 14 | * 15 | * @async 16 | * @returns {Promise} A promise that resolves when the cron job is created 17 | * and the list of crons is retrieved 18 | * @throws {Error} If there's an issue creating the cron job or retrieving the list 19 | * 20 | * @example 21 | * ```bash 22 | * yarn cron:create 23 | * ``` 24 | */ 25 | async function createCron() { 26 | const client = new Client({ 27 | apiUrl: process.env.LANGGRAPH_API_URL, 28 | }); 29 | 30 | const res = await client.crons.create("ingest_data", { 31 | schedule: "0 8 * * *", // Runs at 8:00 AM UTC every day (1AM PST) 32 | config: { 33 | configurable: { 34 | slackChannelId: "ADD_SLACK_CHANNEL_ID_HERE", 35 | maxDaysHistory: 1, 36 | [SKIP_CONTENT_RELEVANCY_CHECK]: true, 37 | [SKIP_USED_URLS_CHECK]: true, 38 | }, 39 | }, 40 | input: {}, 41 | }); 42 | console.log("\n\nCreated cron\n\n"); 43 | console.dir(res, { depth: null }); 44 | 45 | const crons = await client.crons.search(); 46 | console.log("\n\nAll Crons\n\n"); 47 | console.dir(crons, { depth: null }); 48 | } 49 | 50 | createCron().catch(console.error); 51 | -------------------------------------------------------------------------------- /scripts/crons/delete-cron.ts: -------------------------------------------------------------------------------- 1 | import "dotenv/config"; 2 | import { Client } from "@langchain/langgraph-sdk"; 3 | 4 | /** 5 | * Deletes a specified cron job from LangGraph. 6 | * 7 | * This function connects to the LangGraph API and deletes a cron job with the specified ID. 8 | * After deletion, it retrieves and displays the updated list of cron jobs. 9 | * 10 | * To find available cron IDs that can be deleted, first run the list-crons script: 11 | * 12 | * ```bash 13 | * yarn cron:list 14 | * ``` 15 | * 16 | * @async 17 | * @returns {Promise} A promise that resolves when the cron job is deleted 18 | * and the updated list is displayed 19 | * @throws {Error} If there's an issue deleting the cron job or retrieving the list 20 | * 21 | * @example 22 | * ```bash 23 | * yarn cron:delete 24 | * ``` 25 | */ 26 | async function deleteCron() { 27 | const cronId = "ADD_CRON_ID_HERE"; 28 | 29 | const client = new Client({ 30 | apiUrl: process.env.LANGGRAPH_API_URL, 31 | }); 32 | 33 | await client.crons.delete(cronId); 34 | console.log("\n\nDeleted cron\n\n"); 35 | const crons = await client.crons.search(); 36 | console.log("\n\nAll Crons\n\n"); 37 | console.dir(crons, { depth: null }); 38 | } 39 | 40 | deleteCron().catch(console.error); 41 | -------------------------------------------------------------------------------- /scripts/crons/list-crons.ts: -------------------------------------------------------------------------------- 1 | import "dotenv/config"; 2 | import { Client } from "@langchain/langgraph-sdk"; 3 | 4 | /** 5 | * Retrieves and displays a list of all configured cron jobs from LangGraph. 6 | * 7 | * This function connects to the LangGraph API and fetches all existing cron jobs, 8 | * then logs them to the console for inspection. 9 | * 10 | * @async 11 | * @returns {Promise} A promise that resolves when the cron jobs are retrieved 12 | * and displayed 13 | * @throws {Error} If there's an issue connecting to the API or retrieving the cron jobs 14 | * 15 | * @example 16 | * ```bash 17 | * yarn cron:list 18 | * ``` 19 | */ 20 | async function listCrons() { 21 | const client = new Client({ 22 | apiUrl: process.env.LANGGRAPH_API_URL, 23 | }); 24 | 25 | const crons = await client.crons.search(); 26 | console.log("Crons"); 27 | console.dir(crons, { depth: null }); 28 | } 29 | 30 | listCrons().catch(console.error); 31 | -------------------------------------------------------------------------------- /scripts/delete-run-thread.ts: -------------------------------------------------------------------------------- 1 | import "dotenv/config"; 2 | import { Client } from "@langchain/langgraph-sdk"; 3 | 4 | // Uncomment to delete a single run & thread 5 | 6 | // async function deleteRunAndThread() { 7 | // const threadId = "ADD_THREAD_ID_HERE"; 8 | // const runId = "ADD_RUN_ID_HERE"; 9 | // const client = new Client({ 10 | // apiUrl: process.env.LANGGRAPH_API_URL, 11 | // }); 12 | 13 | // await client.runs.delete(threadId, runId); 14 | // await client.threads.delete(threadId); 15 | // } 16 | 17 | // deleteRunAndThread().catch(console.error); 18 | 19 | async function deleteRunsAndThreads() { 20 | const runAndThreadIds = [ 21 | { 22 | runId: "ADD_RUN_ID_HERE", 23 | threadId: "ADD_THREAD_ID_HERE", 24 | }, 25 | // ... 26 | ]; 27 | 28 | const client = new Client({ 29 | apiUrl: process.env.LANGGRAPH_API_URL, 30 | }); 31 | 32 | await Promise.all( 33 | runAndThreadIds.map(async ({ runId, threadId }) => { 34 | try { 35 | await client.runs.delete(threadId, runId); 36 | } catch (e) { 37 | console.error( 38 | "Failed to delete run", 39 | runId, 40 | "from thread", 41 | threadId, 42 | e, 43 | ); 44 | } 45 | 46 | try { 47 | await client.threads.delete(threadId); 48 | } catch (e) { 49 | console.error("Failed to delete thread", threadId, e); 50 | } 51 | }), 52 | ); 53 | } 54 | 55 | deleteRunsAndThreads().catch(console.error); 56 | -------------------------------------------------------------------------------- /scripts/generate-post.ts: -------------------------------------------------------------------------------- 1 | import "dotenv/config"; 2 | import { Client } from "@langchain/langgraph-sdk"; 3 | import { 4 | SKIP_CONTENT_RELEVANCY_CHECK, 5 | SKIP_USED_URLS_CHECK, 6 | TEXT_ONLY_MODE, 7 | } from "../src/agents/generate-post/constants.js"; 8 | 9 | /** 10 | * Generate a post based on a LangChain blog post. 11 | * This may be modified to generate posts for other content. 12 | */ 13 | async function invokeGraph() { 14 | const link = "https://blog.langchain.dev/customers-appfolio/"; 15 | 16 | const client = new Client({ 17 | apiUrl: process.env.LANGGRAPH_API_URL || "http://localhost:54367", 18 | }); 19 | 20 | const { thread_id } = await client.threads.create(); 21 | await client.runs.create(thread_id, "generate_post", { 22 | input: { 23 | links: [link], 24 | }, 25 | config: { 26 | configurable: { 27 | // By default, the graph will read these values from the environment 28 | // [TWITTER_USER_ID]: process.env.TWITTER_USER_ID, 29 | // [LINKEDIN_USER_ID]: process.env.LINKEDIN_USER_ID, 30 | // This ensures the graph runs in a basic text only mode. 31 | // If you followed the full setup instructions, you may remove this line. 32 | [TEXT_ONLY_MODE]: false, 33 | // These will skip content relevancy checks and used URLs checks 34 | [SKIP_CONTENT_RELEVANCY_CHECK]: true, 35 | [SKIP_USED_URLS_CHECK]: true, 36 | }, 37 | }, 38 | }); 39 | } 40 | 41 | invokeGraph().catch(console.error); 42 | -------------------------------------------------------------------------------- /scripts/get-all-used-links.ts: -------------------------------------------------------------------------------- 1 | import "dotenv/config"; 2 | import { Client } from "@langchain/langgraph-sdk"; 3 | import { extractUrls } from "../src/agents/utils.js"; 4 | 5 | async function getCurrentInterrupts() { 6 | const client = new Client({ 7 | apiUrl: process.env.LANGGRAPH_API_URL, 8 | }); 9 | 10 | const interrupts = await client.threads.search({ 11 | status: "interrupted", 12 | limit: 100, 13 | }); 14 | 15 | const links: string[] = interrupts.flatMap( 16 | (i) => (i.values as Record).links, 17 | ); 18 | 19 | return links; 20 | } 21 | 22 | async function getScheduledPosts() { 23 | const client = new Client({ 24 | apiUrl: process.env.LANGGRAPH_API_URL, 25 | }); 26 | 27 | const threads = await client.threads.search({ 28 | limit: 300, 29 | metadata: { 30 | graph_id: "upload_post", 31 | }, 32 | }); 33 | const idleAndBusyThreads = threads.filter( 34 | (t) => t.status === "idle" || t.status === "busy", 35 | ); 36 | 37 | let links: string[] = []; 38 | 39 | for (const { thread_id } of idleAndBusyThreads) { 40 | const run = await client.runs.list(thread_id); 41 | const linksFromPost = extractUrls((run[0] as any).kwargs.input.post); 42 | if (linksFromPost.length > 0) { 43 | links = links.concat(linksFromPost); 44 | } 45 | } 46 | 47 | return links; 48 | } 49 | 50 | export async function getAllUsedLinks() { 51 | const currentInterrupts = await getCurrentInterrupts(); 52 | const scheduledPosts = await getScheduledPosts(); 53 | return [...new Set(currentInterrupts.concat(scheduledPosts))]; 54 | } 55 | 56 | console.log(await getAllUsedLinks()); 57 | -------------------------------------------------------------------------------- /scripts/get-scheduled-runs.ts: -------------------------------------------------------------------------------- 1 | import "dotenv/config"; 2 | import { Client, Run } from "@langchain/langgraph-sdk"; 3 | import { SlackClient } from "../src/clients/slack/client.js"; 4 | import { format } from "date-fns"; 5 | import { toZonedTime } from "date-fns-tz"; 6 | 7 | type PendingRun = { 8 | thread_id: string; 9 | run_id: string; 10 | post: string; 11 | image?: { 12 | imageUrl: string; 13 | mimeType: string; 14 | }; 15 | scheduleDate: string; 16 | }; 17 | 18 | async function getScheduledRuns() { 19 | const client = new Client({ 20 | apiUrl: process.env.LANGGRAPH_API_URL, 21 | // apiUrl: "http://localhost:54367", 22 | }); 23 | 24 | const threads = await client.threads.search({ 25 | metadata: { 26 | graph_id: "upload_post", 27 | }, 28 | status: "busy", 29 | }); 30 | let pendingRuns: PendingRun[] = []; 31 | 32 | for await (const thread of threads) { 33 | const runs = await client.runs.list(thread.thread_id); 34 | const run = runs[0] as Run & { 35 | kwargs: Record; 36 | }; 37 | if (!run) { 38 | console.warn(`No run found for thread ${thread.thread_id}`); 39 | continue; 40 | } 41 | pendingRuns.push({ 42 | thread_id: thread.thread_id, 43 | run_id: run.run_id, 44 | post: run.kwargs.input.post, 45 | image: run.kwargs.input.image, 46 | scheduleDate: run.created_at, 47 | }); 48 | } 49 | 50 | // Sort the pending runs by schedule date 51 | pendingRuns.sort((a, b) => { 52 | return ( 53 | new Date(a.scheduleDate).getTime() - new Date(b.scheduleDate).getTime() 54 | ); 55 | }); 56 | 57 | const pendingRunsString = pendingRuns.map( 58 | (post, index) => `*Post ${index + 1}*: 59 | 60 | Scheduled for *${format(toZonedTime(new Date(post.scheduleDate), "America/Los_Angeles"), "MM/dd hh:mm a")} PST* 61 | 62 | Post: 63 | \`\`\` 64 | ${post.post} 65 | \`\`\` 66 | 67 | Image: 68 | \`\`\` 69 | ${post.image?.imageUrl} 70 | \`\`\``, 71 | ); 72 | 73 | const slackMessageContent = `Number of scheduled posts: *${pendingRuns.length}* 74 | 75 | Scheduled posts: 76 | 77 | ${pendingRunsString.join("\n\n")}`; 78 | 79 | if (process.env.SLACK_CHANNEL_ID && process.env.SLACK_CHANNEL_ID) { 80 | const slackClient = new SlackClient(); 81 | 82 | await slackClient.sendMessage( 83 | process.env.SLACK_CHANNEL_ID, 84 | slackMessageContent, 85 | ); 86 | } else { 87 | console.log(slackMessageContent); 88 | } 89 | } 90 | 91 | getScheduledRuns().catch(console.error); 92 | -------------------------------------------------------------------------------- /scripts/repurposer/create-cron.ts: -------------------------------------------------------------------------------- 1 | import "dotenv/config"; 2 | import { Client } from "@langchain/langgraph-sdk"; 3 | 4 | /** 5 | * Creates a new cron job in LangGraph for data ingestion. 6 | * 7 | * This function sets up a daily cron job that runs at midnight (00:00) to ingest data. 8 | * It uses the LangGraph Client to create a new cron job with specified configuration 9 | * and then retrieves a list of all existing cron jobs. 10 | * 11 | * @async 12 | * @returns {Promise} A promise that resolves when the cron job is created 13 | * and the list of crons is retrieved 14 | * @throws {Error} If there's an issue creating the cron job or retrieving the list 15 | * 16 | * @example 17 | * ```bash 18 | * yarn cron:create 19 | * ``` 20 | */ 21 | async function createCron() { 22 | const client = new Client({ 23 | apiUrl: process.env.LANGGRAPH_API_URL, 24 | }); 25 | 26 | const res = await client.crons.create("ingest_repurposed_data", { 27 | schedule: "0 0 * * *", 28 | config: { 29 | configurable: { 30 | repurposerSlackChannelId: "ADD_SLACK_CHANNEL_ID_HERE", 31 | maxDaysHistory: 1, 32 | }, 33 | }, 34 | input: {}, 35 | }); 36 | console.log("Created cron"); 37 | console.log(res); 38 | 39 | const crons = await client.crons.search(); 40 | console.log("Crons"); 41 | console.log(crons); 42 | } 43 | 44 | createCron().catch(console.error); 45 | -------------------------------------------------------------------------------- /scripts/repurposer/ingest.ts: -------------------------------------------------------------------------------- 1 | import "dotenv/config"; 2 | import { Client } from "@langchain/langgraph-sdk"; 3 | 4 | async function invokeGraph() { 5 | const client = new Client({ 6 | apiUrl: process.env.LANGGRAPH_API_URL || "http://localhost:54367", 7 | }); 8 | 9 | const { thread_id } = await client.threads.create(); 10 | await client.runs.create(thread_id, "ingest_repurposed_data", { 11 | input: {}, 12 | config: { 13 | configurable: { 14 | repurposerSlackChannelId: "", 15 | }, 16 | }, 17 | }); 18 | } 19 | 20 | invokeGraph().catch(console.error); 21 | -------------------------------------------------------------------------------- /slack-messaging/.gitignore: -------------------------------------------------------------------------------- 1 | # Python-generated files 2 | __pycache__/ 3 | *.py[oc] 4 | build/ 5 | dist/ 6 | wheels/ 7 | *.egg-info 8 | .env 9 | 10 | # Virtual environments 11 | .venv 12 | .ipynb_checkpoints/ 13 | .python_version 14 | -------------------------------------------------------------------------------- /slack-messaging/src/langgraph_slack/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import dotenv 3 | 4 | dotenv.load_dotenv() 5 | logging.basicConfig( 6 | level=logging.INFO, 7 | format="%(asctime)s %(levelname)s %(message)s", 8 | datefmt="%Y-%m-%d %H:%M:%S", 9 | ) 10 | -------------------------------------------------------------------------------- /slack-messaging/src/langgraph_slack/__main__.py: -------------------------------------------------------------------------------- 1 | import uvicorn 2 | 3 | uvicorn.run("langgraph_slack.server:app", host="0.0.0.0", port=8080) 4 | -------------------------------------------------------------------------------- /slack-messaging/src/langgraph_slack/auth.py: -------------------------------------------------------------------------------- 1 | from langgraph_sdk import Auth 2 | 3 | 4 | auth = Auth() 5 | 6 | 7 | @auth.authenticate 8 | async def authenticate(request, path, headers, method): 9 | user_agent = headers.get(b"user-agent") 10 | if user_agent and user_agent.startswith(b"Slackbot"): 11 | return {"identity": "default-user", "permissions": ["read", "write"]} 12 | return None 13 | -------------------------------------------------------------------------------- /slack-messaging/src/langgraph_slack/config.py: -------------------------------------------------------------------------------- 1 | from os import environ 2 | import logging 3 | 4 | LOGGER = logging.getLogger(__name__) 5 | 6 | if DEPLOY_MODAL := environ.get("DEPLOY_MODAL"): 7 | DEPLOY_MODAL = DEPLOY_MODAL.lower() == "true" 8 | BOT_USER_ID = environ.get("SLACK_BOT_USER_ID") 9 | BOT_TOKEN = environ.get("SLACK_BOT_TOKEN") 10 | if DEPLOY_MODAL: 11 | if not environ.get("SLACK_BOT_TOKEN"): 12 | environ["SLACK_BOT_TOKEN"] = "fake-token" 13 | BOT_USER_ID = BOT_USER_ID or "fake-user-id" 14 | else: 15 | assert isinstance(BOT_TOKEN, str) 16 | # APP_TOKEN = environ["SLACK_APP_TOKEN"] 17 | 18 | 19 | LANGGRAPH_URL = environ.get("LANGGRAPH_URL") 20 | ASSISTANT_ID = environ.get("LANGGRAPH_ASSISTANT_ID", "chat") 21 | CONFIG = environ.get("CONFIG") or "{}" 22 | DEPLOYMENT_URL = environ.get("DEPLOYMENT_URL", "") 23 | SLACK_CHANNEL_ID = environ.get("SLACK_CHANNEL_ID") 24 | -------------------------------------------------------------------------------- /src/agents/curate-data/constants.ts: -------------------------------------------------------------------------------- 1 | export const NUM_POSTS_PER_SUBREDDIT = "num_posts_per_subreddit"; 2 | -------------------------------------------------------------------------------- /src/agents/curate-data/loaders/ai-news-blog.ts: -------------------------------------------------------------------------------- 1 | import { traceable } from "langsmith/traceable"; 2 | import { parseStringPromise } from "xml2js"; 3 | 4 | const AI_NEWS_BLOG_RSS_URL = "https://buttondown.com/ainews/rss"; 5 | 6 | interface RSSItem { 7 | title: string[]; 8 | link: string[]; 9 | pubDate: string[]; 10 | guid: string[]; 11 | } 12 | 13 | interface RSSFeed { 14 | rss: { 15 | channel: [ 16 | { 17 | item: RSSItem[]; 18 | }, 19 | ]; 20 | }; 21 | } 22 | 23 | /** 24 | * Loads the latest posts from the AI News Blog RSS feed. 25 | * 26 | * @returns {Promise} Array of links 27 | */ 28 | async function aiNewsBlogLoaderFunc(): Promise { 29 | const lastCheckTime = new Date(new Date().getTime() - 96 * 60 * 60 * 1000); // 24 hours ago 30 | 31 | try { 32 | // Fetch the RSS feed 33 | const response = await fetch(AI_NEWS_BLOG_RSS_URL); 34 | if (!response.ok) { 35 | throw new Error(`Failed to fetch RSS feed: ${response.statusText}`); 36 | } 37 | 38 | // Get the text content 39 | const xmlContent = await response.text(); 40 | // Parse the XML content 41 | const parsedFeed = (await parseStringPromise(xmlContent)) as RSSFeed; 42 | 43 | // Get all items from the feed 44 | const items = parsedFeed.rss.channel[0].item; 45 | 46 | // Filter for only new items 47 | const filteredItems = items.filter((item) => { 48 | const pubDate = new Date(item.pubDate[0]); 49 | return pubDate > lastCheckTime; 50 | }); 51 | 52 | // Return array of links 53 | return filteredItems.map((item) => item.link[0]); 54 | } catch (error) { 55 | console.error("Error fetching RSS feed:", error); 56 | throw error; 57 | } 58 | } 59 | 60 | export const aiNewsBlogLoader = traceable(aiNewsBlogLoaderFunc, { 61 | name: "ai-news-loader", 62 | }); 63 | -------------------------------------------------------------------------------- /src/agents/curate-data/loaders/github/trending.ts: -------------------------------------------------------------------------------- 1 | import { BaseStore } from "@langchain/langgraph"; 2 | import { 3 | getGitHubRepoURLs, 4 | putGitHubRepoURLs, 5 | } from "../../utils/stores/github-repos.js"; 6 | import { getUniqueArrayItems } from "../../utils/get-unique-array.js"; 7 | import * as cheerio from "cheerio"; 8 | import { traceable } from "langsmith/traceable"; 9 | 10 | const TYPESCRIPT_TRENDING_URL = 11 | "https://github.com/trending/typescript?since=daily"; 12 | const PYTHON_TRENDING_URL = "https://github.com/trending/python?since=daily"; 13 | 14 | // Check github dependabot for depending on langchain 15 | // Check for github langchain tags 16 | async function githubTrendingLoaderFunc(store: BaseStore | undefined) { 17 | const fetchRepos = async (url: string) => { 18 | const response = await fetch(url); 19 | const html = await response.text(); 20 | const $ = cheerio.load(html); 21 | 22 | return $("h2.h3.lh-condensed") 23 | .map((_, element) => { 24 | const repoPath = $(element).find("a").attr("href"); 25 | return repoPath ? `https://github.com${repoPath}` : null; 26 | }) 27 | .get() 28 | .filter((url): url is string => url !== null); 29 | }; 30 | 31 | const [pythonRepos, typescriptRepos] = await Promise.all([ 32 | fetchRepos(PYTHON_TRENDING_URL), 33 | fetchRepos(TYPESCRIPT_TRENDING_URL), 34 | ]); 35 | 36 | const processedRepos = await getGitHubRepoURLs(store); 37 | const uniqueRepos = getUniqueArrayItems(processedRepos, [ 38 | ...pythonRepos, 39 | ...typescriptRepos, 40 | ]); 41 | const allRepos = Array.from(new Set([...processedRepos, ...uniqueRepos])); 42 | 43 | await putGitHubRepoURLs(allRepos, store); 44 | 45 | return uniqueRepos; 46 | } 47 | 48 | export const githubTrendingLoader = traceable(githubTrendingLoaderFunc, { 49 | name: "github-loader", 50 | }); 51 | -------------------------------------------------------------------------------- /src/agents/curate-data/loaders/latent-space.ts: -------------------------------------------------------------------------------- 1 | import * as cheerio from "cheerio"; 2 | import { 3 | getLatentSpaceLinks, 4 | putLatentSpaceLinks, 5 | } from "../utils/stores/latent-space-links.js"; 6 | import { BaseStore } from "@langchain/langgraph"; 7 | import { getUniqueArrayItems } from "../utils/get-unique-array.js"; 8 | import { traceable } from "langsmith/traceable"; 9 | 10 | async function latentSpaceLoaderFunc(store: BaseStore | undefined) { 11 | const siteMapUrl = "https://www.latent.space/sitemap/2025"; 12 | 13 | const links = await fetch(siteMapUrl) 14 | .then((response) => response.text()) 15 | .then((html) => { 16 | const $ = cheerio.load(html); 17 | 18 | const links = $(".sitemap-link") 19 | .map((_, element) => $(element).attr("href")) 20 | .get(); 21 | 22 | return links; 23 | }); 24 | 25 | const processedLinks = await getLatentSpaceLinks(store); 26 | const uniqueLinks = getUniqueArrayItems(processedLinks, links); 27 | const allLinks = Array.from(new Set([...processedLinks, ...uniqueLinks])); 28 | 29 | await putLatentSpaceLinks(allLinks, store); 30 | 31 | return uniqueLinks; 32 | } 33 | 34 | export const latentSpaceLoader = traceable(latentSpaceLoaderFunc, { 35 | name: "latent-space-loader", 36 | }); 37 | -------------------------------------------------------------------------------- /src/agents/curate-data/loaders/tests/ai-news-blog.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { aiNewsBlogLoader } from "../ai-news-blog.js"; 3 | 4 | test("aiNewsBlogLoader", async () => { 5 | const results = await aiNewsBlogLoader(); 6 | console.log(results); 7 | expect(results.length).toBeGreaterThan(0); 8 | }); 9 | -------------------------------------------------------------------------------- /src/agents/curate-data/loaders/tests/github.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { InMemoryStore } from "@langchain/langgraph"; 3 | import { langchainDependencyReposLoader } from "../github/langchain.js"; 4 | import { githubTrendingLoader } from "../github/trending.js"; 5 | 6 | test("githubTrendingLoader", async () => { 7 | const store = new InMemoryStore(); 8 | const config = { 9 | store, 10 | }; 11 | const results = await githubTrendingLoader(config.store); 12 | console.log("\n\nTEST COMPLETED\n\n"); 13 | console.log("results.length", results); 14 | expect(results.length).toBeGreaterThan(0); 15 | 16 | // This should return 0 results due to all the links being in the store 17 | const results2 = await githubTrendingLoader(config.store); 18 | console.log("\n\nTEST COMPLETED\n\n"); 19 | console.log("results2.length", results2); 20 | expect(results2.length).toBe(0); 21 | }); 22 | 23 | test("langchainDependencyReposLoader", async () => { 24 | const store = new InMemoryStore(); 25 | const config = { 26 | store, 27 | }; 28 | const results = await langchainDependencyReposLoader(config.store); 29 | console.log("\n\nTEST COMPLETED\n\n"); 30 | console.log("results.length", results.length); 31 | console.log(results); 32 | expect(results.length).toBe(10); 33 | }, 240000); // 4 minutes since there's 30/s delays after each 5 requests 34 | -------------------------------------------------------------------------------- /src/agents/curate-data/loaders/tests/latent-space.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { latentSpaceLoader } from "../latent-space.js"; 3 | import { InMemoryStore } from "@langchain/langgraph"; 4 | import { putLatentSpaceLinks } from "../../utils/stores/latent-space-links.js"; 5 | 6 | test("Latent space loader", async () => { 7 | const store = new InMemoryStore(); 8 | const config = { 9 | store, 10 | }; 11 | // Seed the store with some existing values 12 | await putLatentSpaceLinks( 13 | [ 14 | "https://www.latent.space/p/2024-simonw", 15 | "https://www.latent.space/p/o1-skill-issue", 16 | "https://www.latent.space/p/exa", 17 | ], 18 | config.store, 19 | ); 20 | const data = await latentSpaceLoader(config.store); 21 | expect(data.length).toBeGreaterThan(1); 22 | }); 23 | -------------------------------------------------------------------------------- /src/agents/curate-data/loaders/tests/reddit.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { getRedditPosts } from "../reddit.js"; 3 | import { InMemoryStore } from "@langchain/langgraph"; 4 | 5 | test("getRedditPosts", async () => { 6 | const store = new InMemoryStore(); 7 | const config = { 8 | store, 9 | }; 10 | const results = await getRedditPosts(config.store); 11 | console.log("\n\nTEST COMPLETED\n\n"); 12 | console.log("results.length", results.length); 13 | expect(results.length).toBeGreaterThan(0); 14 | }); 15 | -------------------------------------------------------------------------------- /src/agents/curate-data/loaders/tests/twitter.int.test.ts: -------------------------------------------------------------------------------- 1 | import fs from "fs/promises"; 2 | import { test, expect } from "@jest/globals"; 3 | import { twitterLoader } from "../twitter.js"; 4 | 5 | test("twitterLoader", async () => { 6 | const resultsPath = 7 | "/Users/bracesproul/code/lang-chain-ai/projects/social-media-agent/src/agents/curate-reports/nodes/tests/data/tweets-3.json"; 8 | const results = await twitterLoader(); 9 | console.log("\n\nTEST COMPLETED\n\n"); 10 | console.log("results.length", results.length); 11 | await fs.writeFile(resultsPath, JSON.stringify(results)); 12 | expect(results.length).toBeGreaterThan(0); 13 | }); 14 | -------------------------------------------------------------------------------- /src/agents/curate-data/nodes/format-data.ts: -------------------------------------------------------------------------------- 1 | import { CurateDataState } from "../state.js"; 2 | 3 | export async function formatData( 4 | state: CurateDataState, 5 | ): Promise> { 6 | return { 7 | curatedData: { 8 | tweetsGroupedByContent: state.tweetsGroupedByContent, 9 | redditPosts: state.redditPosts, 10 | generalContents: state.pageContents?.map((pc, idx) => ({ 11 | pageContent: pc, 12 | relevantLinks: (state.relevantLinks?.[idx] || []) as string[], 13 | })), 14 | githubTrendingData: state.githubTrendingData, 15 | }, 16 | }; 17 | } 18 | -------------------------------------------------------------------------------- /src/agents/curate-data/nodes/tweets/prompts.ts: -------------------------------------------------------------------------------- 1 | export const GROUP_BY_CONTENT_CRITERIA = ` 2 | - Tweets discussing a new model, benchmark, tool, product or other released by an individual or organization 3 | - Tweets discussing a new UI/UX pattern for AI applications 4 | - Tweets discussing pitfalls of specific prompting strategies when working with LLMs 5 | - General news or updates about AI 6 | - You should try to group your tweets into fine-grained topics, to avoid grouping unrelated tweets into the same group. 7 | 8 | 9 | 10 | Tweets which discuss/reference the same model, benchmark, product, tool, etc should be grouped together. Ensure you do not group unrelated tweets together, unless you believe they are relevant to each others subjects. 11 | Remember, you are allowed to put the same tweet into multiple groups, if you think they're relevant to each other. This should be used if a single tweet is relevant to multiple topics. 12 | If you think a tweet is talking about a model, benchmark, tool, product or other, do your very best to identify what exactly it is, and group it accordingly. 13 | `; 14 | -------------------------------------------------------------------------------- /src/agents/curate-data/nodes/tweets/tests/group-by-content.int.test.ts: -------------------------------------------------------------------------------- 1 | import fs from "fs"; 2 | import * as ls from "langsmith/jest"; 3 | import { SimpleEvaluator } from "langsmith/jest"; 4 | import { TweetV2 } from "twitter-api-v2"; 5 | import { groupTweetsByContent } from "../group-tweets-by-content.js"; 6 | import { TweetsGroupedByContent } from "../../../types.js"; 7 | import { formatInTimeZone } from "date-fns-tz"; 8 | 9 | const tweetEvaluator: SimpleEvaluator = () => { 10 | return { 11 | key: "tweet_generation", 12 | score: 1, 13 | }; 14 | }; 15 | 16 | function loadTweets(): TweetV2[] { 17 | const tweets = JSON.parse( 18 | fs.readFileSync( 19 | // "src/agents/curate-data/nodes/tweets/tests/data/relevant-tweets/relevant-01-17-2025-12-10.json", 20 | // "src/agents/curate-data/nodes/tweets/tests/data/relevant-tweets/relevant-01-19-2025-12-59.json", 21 | "src/agents/curate-data/nodes/tweets/tests/data/relevant-tweets/relevant-01-20-2025-11-28.json", 22 | "utf-8", 23 | ), 24 | ); 25 | return tweets; 26 | } 27 | 28 | async function saveTweets(tweets: TweetsGroupedByContent[]) { 29 | const currentDateUTC = new Date().toISOString(); 30 | const formattedPSTDate = formatInTimeZone( 31 | currentDateUTC, 32 | "America/Los_Angeles", 33 | "MM-dd-yyyy-HH-mm", 34 | ); 35 | await fs.promises.writeFile( 36 | `src/agents/curate-data/nodes/tweets/tests/data/grouped-by-llms/${formattedPSTDate}.json`, 37 | JSON.stringify(tweets), 38 | ); 39 | } 40 | 41 | const validatedTweets = loadTweets(); 42 | 43 | ls.describe("SMA - Curate Data - Group By Content", () => { 44 | ls.test( 45 | "Can group tweets", 46 | { 47 | inputs: { validatedTweets }, 48 | expected: {}, 49 | }, 50 | async ({ inputs }) => { 51 | console.log( 52 | "Starting test with", 53 | inputs.validatedTweets.length, 54 | "tweets", 55 | ); 56 | 57 | const result = await groupTweetsByContent(inputs as any); 58 | 59 | if (!result.tweetsGroupedByContent?.length) { 60 | console.log("No tweets were found that are relevant to AI"); 61 | return result; 62 | } 63 | 64 | await saveTweets(result.tweetsGroupedByContent || []); 65 | 66 | await ls.expect(result).evaluatedBy(tweetEvaluator).toBe(1); 67 | return result; 68 | }, 69 | ); 70 | }); 71 | -------------------------------------------------------------------------------- /src/agents/curate-data/nodes/tweets/tests/re-group-reflect.int.test.ts: -------------------------------------------------------------------------------- 1 | import * as ls from "langsmith/jest"; 2 | import fs from "fs"; 3 | import { TweetsGroupedByContent } from "../../../types.js"; 4 | import { formatInTimeZone } from "date-fns-tz"; 5 | import { reflectOnTweetGroups } from "../reflect-tweet-groups.js"; 6 | import { reGroupTweets } from "../re-group-tweets.js"; 7 | 8 | const tweetEvaluator: ls.SimpleEvaluator = () => { 9 | return { 10 | key: "tweet_generation", 11 | score: 1, 12 | }; 13 | }; 14 | 15 | function loadTweets(): TweetsGroupedByContent[] { 16 | const tweets = JSON.parse( 17 | fs.readFileSync( 18 | // "src/agents/curate-data/nodes/tweets/tests/data/grouped-by-llms/01-19-2025-13-36.json", 19 | // "src/agents/curate-data/nodes/tweets/tests/data/grouped-by-llms/01-19-2025-13-46.json", 20 | "src/agents/curate-data/nodes/tweets/tests/data/grouped-by-llms/01-20-2025-13-00.json", 21 | "utf-8", 22 | ), 23 | ); 24 | return tweets; 25 | } 26 | 27 | async function saveTweets(tweets: TweetsGroupedByContent[]) { 28 | const currentDateUTC = new Date().toISOString(); 29 | const formattedPSTDate = formatInTimeZone( 30 | currentDateUTC, 31 | "America/Los_Angeles", 32 | "MM-dd-yyyy-HH-mm", 33 | ); 34 | await fs.promises.writeFile( 35 | `src/agents/curate-data/nodes/tweets/tests/data/grouped-by-llms/re-grouped/${formattedPSTDate}.json`, 36 | JSON.stringify(tweets), 37 | ); 38 | } 39 | 40 | const tweetsGroupedByContent = loadTweets(); 41 | 42 | ls.describe("SMA - Curate Data - Reflect and Re-group", () => { 43 | ls.test( 44 | "Can reflect and re-group tweets", 45 | { 46 | inputs: { tweetsGroupedByContent }, 47 | expected: {}, 48 | }, 49 | async ({ inputs }) => { 50 | console.log( 51 | "Starting test with", 52 | inputs.tweetsGroupedByContent.length, 53 | "tweet groups", 54 | ); 55 | 56 | const reflectionResult = await reflectOnTweetGroups(inputs as any); 57 | 58 | if (!reflectionResult.similarGroupIndices?.length) { 59 | console.log("No groups found needing reflection"); 60 | return reflectionResult; 61 | } 62 | console.log( 63 | "reflectionResult.similarGroupIndices\n------------\n", 64 | reflectionResult.similarGroupIndices, 65 | "\n------------\n", 66 | ); 67 | 68 | const reGroupResult = await reGroupTweets({ 69 | tweetsGroupedByContent: inputs.tweetsGroupedByContent, 70 | similarGroupIndices: reflectionResult.similarGroupIndices, 71 | } as any); 72 | 73 | console.log("reGroupResult\n------------\n"); 74 | console.dir(reGroupResult, { depth: null }); 75 | console.log("\n------------\n"); 76 | 77 | await saveTweets(reGroupResult.tweetsGroupedByContent || []); 78 | 79 | await ls.expect(reGroupResult).evaluatedBy(tweetEvaluator).toBe(1); 80 | return reGroupResult; 81 | }, 82 | ); 83 | }); 84 | -------------------------------------------------------------------------------- /src/agents/curate-data/nodes/tweets/tests/validate-bulk-tweets.int.test.ts: -------------------------------------------------------------------------------- 1 | import fs from "fs"; 2 | import * as ls from "langsmith/jest"; 3 | import { validateBulkTweets } from "../../validate-bulk-tweets.js"; 4 | import { SimpleEvaluator } from "langsmith/jest"; 5 | import { formatInTimeZone } from "date-fns-tz"; 6 | import { TweetV2 } from "twitter-api-v2"; 7 | 8 | const tweetEvaluator: SimpleEvaluator = () => { 9 | return { 10 | key: "tweet_generation", 11 | score: 1, 12 | }; 13 | }; 14 | 15 | function loadTweets(): TweetV2[] { 16 | const tweets = JSON.parse( 17 | fs.readFileSync( 18 | // "src/agents/curate-data/nodes/tweets/tests/data/tweets.json", 19 | // "src/agents/curate-data/nodes/tweets/tests/data/tweets-2.json", 20 | "src/agents/curate-data/nodes/tweets/tests/data/tweets-3.json", 21 | "utf-8", 22 | ), 23 | ); 24 | return tweets; 25 | } 26 | 27 | function saveRelevantTweets(tweets: TweetV2[]): void { 28 | try { 29 | const currentDateUTC = new Date().toISOString(); 30 | const formattedPSTDate = formatInTimeZone( 31 | currentDateUTC, 32 | "America/Los_Angeles", 33 | "MM-dd-yyyy-HH-mm", 34 | ); 35 | fs.writeFileSync( 36 | `src/agents/curate-data/nodes/tweets/tests/data/relevant-tweets/relevant-${formattedPSTDate}.json`, 37 | JSON.stringify(tweets), 38 | ); 39 | } catch (e) { 40 | console.error("Failed to save relevant tweets:", e); 41 | console.log("Tweets:", tweets); 42 | } 43 | } 44 | 45 | const rawTweets = loadTweets(); 46 | 47 | ls.describe("SMA - Curate Data - Validate Bulk Tweets", () => { 48 | ls.test( 49 | "Can validate tweets", 50 | // You can pass an "iterations" parameter or other LS config here if desired 51 | { 52 | inputs: { rawTweets }, 53 | expected: {}, 54 | }, 55 | async ({ inputs }) => { 56 | console.log("Starting test with", inputs.rawTweets.length, "tweets"); 57 | const result = await validateBulkTweets(inputs as any); 58 | if (result.validatedTweets?.length === 0) { 59 | console.log("No tweets were found that are relevant to AI"); 60 | return result; 61 | } 62 | 63 | saveRelevantTweets(result.validatedTweets || []); 64 | await ls.expect(result).evaluatedBy(tweetEvaluator).toBe(1); 65 | return result; 66 | }, 67 | ); 68 | }); 69 | -------------------------------------------------------------------------------- /src/agents/curate-data/nodes/verify-github-wrapper.ts: -------------------------------------------------------------------------------- 1 | import { LangGraphRunnableConfig } from "@langchain/langgraph"; 2 | import { CurateDataState } from "../state.js"; 3 | import { GitHubTrendingData } from "../types.js"; 4 | import { verifyGitHubContent } from "../../shared/nodes/verify-github.js"; 5 | 6 | export async function verifyGitHubWrapper( 7 | state: CurateDataState, 8 | config: LangGraphRunnableConfig, 9 | ): Promise> { 10 | const verifiedRepoData: GitHubTrendingData[] = []; 11 | 12 | // Iterate over each raw GitHub repo & verify + extract page contents 13 | for await (const repoURL of state.rawTrendingRepos) { 14 | const results = await verifyGitHubContent( 15 | { 16 | link: repoURL, 17 | }, 18 | config, 19 | ); 20 | 21 | if ( 22 | results.relevantLinks && 23 | results.relevantLinks.length > 0 && 24 | results.pageContents && 25 | results.pageContents.length > 0 26 | ) { 27 | verifiedRepoData.push({ 28 | repoURL, 29 | pageContent: results.pageContents[0], // Take first page content, as there should only be one 30 | }); 31 | } 32 | } 33 | 34 | return { 35 | githubTrendingData: verifiedRepoData, 36 | }; 37 | } 38 | -------------------------------------------------------------------------------- /src/agents/curate-data/nodes/verify-reddit-wrapper.ts: -------------------------------------------------------------------------------- 1 | import { LangGraphRunnableConfig } from "@langchain/langgraph"; 2 | import { CurateDataState } from "../state.js"; 3 | import { verifyRedditPostGraph } from "../../verify-reddit-post/verify-reddit-post-graph.js"; 4 | import { RedditPostsWithExternalData } from "../../verify-reddit-post/types.js"; 5 | import { VerifyRedditPostConfigurable } from "../../verify-reddit-post/verify-reddit-post-state.js"; 6 | 7 | export async function verifyRedditWrapper( 8 | state: CurateDataState, 9 | config: LangGraphRunnableConfig, 10 | ): Promise> { 11 | const verifiedRedditPosts: RedditPostsWithExternalData[] = []; 12 | 13 | for (const post of state.rawRedditPosts) { 14 | try { 15 | const result = await verifyRedditPostGraph.invoke( 16 | { 17 | redditPost: post, 18 | }, 19 | config, 20 | ); 21 | 22 | if ( 23 | result.relevantLinks && 24 | result.relevantLinks.length > 0 && 25 | result.pageContents && 26 | result.pageContents.length > 0 27 | ) { 28 | verifiedRedditPosts.push({ 29 | ...post, 30 | externalData: result.pageContents.map((pageContent, idx) => ({ 31 | pageContent, 32 | url: result.relevantLinks?.[idx] || "", 33 | })), 34 | }); 35 | } 36 | } catch (e) { 37 | console.error("Failed to verify Reddit post", e); 38 | } 39 | } 40 | 41 | return { 42 | redditPosts: verifiedRedditPosts, 43 | }; 44 | } 45 | -------------------------------------------------------------------------------- /src/agents/curate-data/state.ts: -------------------------------------------------------------------------------- 1 | import { Annotation } from "@langchain/langgraph"; 2 | import { 3 | CuratedData, 4 | GitHubTrendingData, 5 | ThreadRunId, 6 | TweetsGroupedByContent, 7 | } from "./types.js"; 8 | import { TweetV2 } from "twitter-api-v2"; 9 | import { SimpleRedditPostWithComments } from "../../clients/reddit/types.js"; 10 | import { RedditPostsWithExternalData } from "../verify-reddit-post/types.js"; 11 | import { NUM_POSTS_PER_SUBREDDIT } from "./constants.js"; 12 | import { Source } from "../supervisor/types.js"; 13 | import { VerifyLinksResultAnnotation } from "../verify-links/verify-links-state.js"; 14 | 15 | export const CurateDataAnnotation = Annotation.Root({ 16 | ...VerifyLinksResultAnnotation.spec, 17 | /** 18 | * The final data object to be returned. 19 | */ 20 | curatedData: Annotation, 21 | /** 22 | * Collection of saved tweets from a Twitter list. 23 | * Each tweet contains metadata like ID, creation time, text content, and media references. 24 | */ 25 | rawTweets: Annotation, 26 | /** 27 | * A list of validated tweets. 28 | */ 29 | validatedTweets: Annotation, 30 | /** 31 | * Tweets which have been grouped by their external URLs. 32 | * Each group contains a list of tweets which reference the same external URL. 33 | */ 34 | tweetsGroupedByContent: Annotation, 35 | /** 36 | * Array of indices of similar groups of tweets to re-evaluate the grouping of. 37 | */ 38 | similarGroupIndices: Annotation, 39 | 40 | /** 41 | * List of trending GitHub repository names/paths. 42 | */ 43 | rawTrendingRepos: Annotation, 44 | /** 45 | * A list of trending GitHub repositories & README contents which have been 46 | * validated. 47 | */ 48 | githubTrendingData: Annotation, 49 | 50 | /** 51 | * A list of new AI Newsletter posts. 52 | */ 53 | aiNewsPosts: Annotation, 54 | /** 55 | * Collection of saved Reddit posts and their associated comments. 56 | * Each post contains the original content and relevant discussion threads. 57 | */ 58 | rawRedditPosts: Annotation, 59 | /** 60 | * A list of verified Reddit posts. 61 | */ 62 | redditPosts: Annotation, 63 | /** 64 | * The thread & run IDs for runs kicked off after curating data. 65 | */ 66 | threadRunIds: Annotation, 67 | /** 68 | * General URLs to scrape content from. 69 | */ 70 | generalUrls: Annotation, 71 | }); 72 | 73 | export const CurateDataConfigurableAnnotation = Annotation.Root({ 74 | /** 75 | * The sources to ingest from. 76 | */ 77 | sources: Annotation, 78 | /** 79 | * The number of posts to fetch per subreddit when ingesting Reddit posts. 80 | */ 81 | [NUM_POSTS_PER_SUBREDDIT]: Annotation(), 82 | }); 83 | 84 | export type CurateDataState = typeof CurateDataAnnotation.State; 85 | export type CurateDataConfigurable = 86 | typeof CurateDataConfigurableAnnotation.State; 87 | -------------------------------------------------------------------------------- /src/agents/curate-data/types.ts: -------------------------------------------------------------------------------- 1 | import { TweetV2 } from "twitter-api-v2"; 2 | import { SimpleRedditPostWithComments } from "../../clients/reddit/types.js"; 3 | 4 | export type TweetV2WithURLs = TweetV2 & { 5 | external_urls: string[]; 6 | }; 7 | 8 | export type GitHubTrendingData = { 9 | repoURL: string; 10 | pageContent: string; 11 | }; 12 | 13 | export type TweetsGroupedByContent = { 14 | explanation: string; 15 | tweets: TweetV2WithURLs[]; 16 | }; 17 | 18 | export type ThreadRunId = { thread_id: string; run_id: string }; 19 | 20 | export type CuratedData = { 21 | /** 22 | * The tweets grouped by content. 23 | */ 24 | tweetsGroupedByContent?: TweetsGroupedByContent[]; 25 | /** 26 | * If reports were curated, they will be included here. 27 | */ 28 | redditPosts?: SimpleRedditPostWithComments[]; 29 | /** 30 | * The general content scraped from URLs 31 | */ 32 | generalContents?: { 33 | pageContent: string; 34 | relevantLinks: string[]; 35 | }[]; 36 | /** 37 | * The GitHub trending data. 38 | */ 39 | githubTrendingData?: GitHubTrendingData[]; 40 | }; 41 | -------------------------------------------------------------------------------- /src/agents/curate-data/utils/created-at-after.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Returns true if the createdAt date is after a reference date 3 | * @param createdAt The createdAt date in ISO 8601 format 4 | * @param referenceDate The reference date to compare against 5 | * @returns true if the createdAt date is after a reference date, false otherwise 6 | */ 7 | export function createdAtAfter(createdAt: string, referenceDate: Date) { 8 | return new Date(createdAt).getTime() > referenceDate.getTime(); 9 | } 10 | -------------------------------------------------------------------------------- /src/agents/curate-data/utils/get-unique-array.ts: -------------------------------------------------------------------------------- 1 | export function getUniqueArrayItems(existing: T[], toAdd: T[]): T[] { 2 | const existingSet = new Set(existing); 3 | return toAdd.filter((item) => !existingSet.has(item)); 4 | } 5 | -------------------------------------------------------------------------------- /src/agents/curate-data/utils/stores/github-repos.ts: -------------------------------------------------------------------------------- 1 | import { BaseStore } from "@langchain/langgraph"; 2 | 3 | export const NAMESPACE = ["saved_data", "github_repos"]; 4 | export const KEY = "urls"; 5 | export const OBJECT_KEY = "data"; 6 | 7 | export async function putGitHubRepoURLs( 8 | repoUrls: string[], 9 | store: BaseStore | undefined, 10 | ) { 11 | if (!store) { 12 | throw new Error("No store provided"); 13 | } 14 | await store.put(NAMESPACE, KEY, { 15 | [OBJECT_KEY]: repoUrls, 16 | }); 17 | } 18 | 19 | export async function getGitHubRepoURLs( 20 | store: BaseStore | undefined, 21 | ): Promise { 22 | if (!store) { 23 | throw new Error("No store provided"); 24 | } 25 | const repoUrls = await store.get(NAMESPACE, KEY); 26 | if (!repoUrls) { 27 | return []; 28 | } 29 | return repoUrls.value?.[OBJECT_KEY] || []; 30 | } 31 | -------------------------------------------------------------------------------- /src/agents/curate-data/utils/stores/latent-space-links.ts: -------------------------------------------------------------------------------- 1 | import { BaseStore } from "@langchain/langgraph"; 2 | 3 | export const NAMESPACE = ["saved_data", "latent_space"]; 4 | export const KEY = "links"; 5 | export const OBJECT_KEY = "data"; 6 | 7 | export async function putLatentSpaceLinks( 8 | links: string[], 9 | store: BaseStore | undefined, 10 | ) { 11 | if (!store) { 12 | throw new Error("No store provided"); 13 | } 14 | await store.put(NAMESPACE, KEY, { 15 | [OBJECT_KEY]: links, 16 | }); 17 | } 18 | 19 | export async function getLatentSpaceLinks( 20 | store: BaseStore | undefined, 21 | ): Promise { 22 | if (!store) { 23 | throw new Error("No store provided"); 24 | } 25 | const links = await store.get(NAMESPACE, KEY); 26 | if (!links) { 27 | return []; 28 | } 29 | return links.value?.[OBJECT_KEY] || []; 30 | } 31 | -------------------------------------------------------------------------------- /src/agents/curate-data/utils/stores/reddit-post-ids.ts: -------------------------------------------------------------------------------- 1 | import { BaseStore } from "@langchain/langgraph"; 2 | 3 | export const NAMESPACE = ["saved_data", "reddit"]; 4 | export const KEY = "post_ids"; 5 | export const OBJECT_KEY = "data"; 6 | 7 | export async function putRedditPostIds( 8 | postIds: string[], 9 | store: BaseStore | undefined, 10 | ) { 11 | if (!store) { 12 | throw new Error("No store provided"); 13 | } 14 | await store.put(NAMESPACE, KEY, { 15 | [OBJECT_KEY]: postIds, 16 | }); 17 | } 18 | 19 | export async function getRedditPostIds( 20 | store: BaseStore | undefined, 21 | ): Promise { 22 | if (!store) { 23 | throw new Error("No store provided"); 24 | } 25 | const savedPostIds = await store.get(NAMESPACE, KEY); 26 | if (!savedPostIds) { 27 | return []; 28 | } 29 | return savedPostIds.value?.[OBJECT_KEY] || []; 30 | } 31 | -------------------------------------------------------------------------------- /src/agents/curate-data/utils/stores/twitter.ts: -------------------------------------------------------------------------------- 1 | import { BaseStore } from "@langchain/langgraph"; 2 | 3 | export const NAMESPACE = ["saved_data", "twitter"]; 4 | export const IDS_KEY = "ids"; 5 | export const IDS_OBJECT_KEY = "data"; 6 | 7 | export const LAST_INGESTED_ID_KEY = "last_ingested_id"; 8 | export const LAST_INGESTED_ID_OBJECT_KEY = "data"; 9 | 10 | export async function putTweetIds( 11 | tweetIds: string[], 12 | store: BaseStore | undefined, 13 | ) { 14 | if (!store) { 15 | throw new Error("No store provided"); 16 | } 17 | await store.put(NAMESPACE, IDS_KEY, { 18 | [IDS_OBJECT_KEY]: tweetIds, 19 | }); 20 | } 21 | 22 | export async function getTweetIds( 23 | store: BaseStore | undefined, 24 | ): Promise { 25 | if (!store) { 26 | throw new Error("No store provided"); 27 | } 28 | const tweetIds = await store.get(NAMESPACE, IDS_KEY); 29 | if (!tweetIds) { 30 | return []; 31 | } 32 | return tweetIds.value?.[IDS_OBJECT_KEY] || []; 33 | } 34 | 35 | export async function putLastIngestedTweetId( 36 | id: string, 37 | store: BaseStore | undefined, 38 | ) { 39 | if (!store) { 40 | throw new Error("No store provided"); 41 | } 42 | await store.put(NAMESPACE, LAST_INGESTED_ID_KEY, { 43 | [LAST_INGESTED_ID_OBJECT_KEY]: id, 44 | }); 45 | } 46 | 47 | export async function getLastIngestedTweetId( 48 | store: BaseStore | undefined, 49 | ): Promise { 50 | if (!store) { 51 | throw new Error("No store provided"); 52 | } 53 | const idData = await store.get(NAMESPACE, LAST_INGESTED_ID_KEY); 54 | if (!idData) { 55 | return ""; 56 | } 57 | return idData.value?.[LAST_INGESTED_ID_OBJECT_KEY] || ""; 58 | } 59 | -------------------------------------------------------------------------------- /src/agents/curated-post-interrupt/index.ts: -------------------------------------------------------------------------------- 1 | import { END, START, StateGraph } from "@langchain/langgraph"; 2 | import { 3 | CuratedPostInterruptAnnotation, 4 | CuratedPostInterruptConfigurableAnnotation, 5 | CuratedPostInterruptState, 6 | CuratedPostInterruptUpdate, 7 | } from "./types.js"; 8 | import { updateScheduledDate } from "../shared/nodes/update-scheduled-date.js"; 9 | import { humanNode } from "../shared/nodes/generate-post/human-node.js"; 10 | import { schedulePost } from "../shared/nodes/generate-post/schedule-post.js"; 11 | import { rewritePost } from "../shared/nodes/generate-post/rewrite-post.js"; 12 | 13 | function rewriteOrEndConditionalEdge( 14 | state: CuratedPostInterruptState, 15 | ): 16 | | "rewritePost" 17 | | "schedulePost" 18 | | "humanNode" 19 | | "updateScheduleDate" 20 | | typeof END { 21 | if (!state.next) { 22 | return END; 23 | } 24 | 25 | if (state.next === "unknownResponse") { 26 | // If the user's response is unknown, we should route back to the human node. 27 | return "humanNode"; 28 | } 29 | return state.next; 30 | } 31 | 32 | const workflow = new StateGraph( 33 | CuratedPostInterruptAnnotation, 34 | CuratedPostInterruptConfigurableAnnotation, 35 | ) 36 | // Interrupts the node for human in the loop. 37 | .addNode( 38 | "humanNode", 39 | humanNode, 40 | ) 41 | // Schedules the post for Twitter/LinkedIn. 42 | .addNode( 43 | "schedulePost", 44 | schedulePost, 45 | ) 46 | // Rewrite a post based on the user's response. 47 | .addNode( 48 | "rewritePost", 49 | rewritePost, 50 | ) 51 | // Updated the scheduled date from the natural language response from the user. 52 | .addNode("updateScheduleDate", updateScheduledDate) 53 | .addEdge(START, "humanNode") 54 | .addConditionalEdges("humanNode", rewriteOrEndConditionalEdge, [ 55 | "rewritePost", 56 | "schedulePost", 57 | "updateScheduleDate", 58 | "humanNode", 59 | END, 60 | ]) 61 | // Always route back to `humanNode` if the post was re-written or date was updated. 62 | .addEdge("rewritePost", "humanNode") 63 | .addEdge("updateScheduleDate", "humanNode") 64 | 65 | // Always end after scheduling the post. 66 | .addEdge("schedulePost", END); 67 | 68 | export const curatedPostInterruptGraph = workflow.compile(); 69 | curatedPostInterruptGraph.name = "Curated Post Interrupt Graph"; 70 | -------------------------------------------------------------------------------- /src/agents/curated-post-interrupt/types.ts: -------------------------------------------------------------------------------- 1 | import { Annotation, END } from "@langchain/langgraph"; 2 | import { IngestDataAnnotation } from "../ingest-data/ingest-data-state.js"; 3 | import { DateType } from "../types.js"; 4 | import { VerifyLinksResultAnnotation } from "../verify-links/verify-links-state.js"; 5 | import { 6 | POST_TO_LINKEDIN_ORGANIZATION, 7 | TEXT_ONLY_MODE, 8 | } from "../generate-post/constants.js"; 9 | import { ComplexPost } from "../shared/nodes/generate-post/types.js"; 10 | 11 | export const CuratedPostInterruptAnnotation = Annotation.Root({ 12 | /** 13 | * The links to use to generate a post. 14 | */ 15 | links: Annotation, 16 | /** 17 | * The report generated on the content of the message. Used 18 | * as context for generating the post. 19 | */ 20 | report: IngestDataAnnotation.spec.report, 21 | ...VerifyLinksResultAnnotation.spec, 22 | /** 23 | * The generated post for LinkedIn/Twitter. 24 | */ 25 | post: Annotation, 26 | /** 27 | * The complex post, if the user decides to split the URL from the main body. 28 | * 29 | * TODO: Refactor the post/complexPost state interfaces to use a single shared interface 30 | * which includes images too. 31 | * Tracking issue: https://github.com/langchain-ai/social-media-agent/issues/144 32 | */ 33 | complexPost: Annotation, 34 | /** 35 | * The date to schedule the post for. 36 | */ 37 | scheduleDate: Annotation, 38 | /** 39 | * Response from the user for the post. Typically used to request 40 | * changes to be made to the post. 41 | */ 42 | userResponse: Annotation, 43 | /** 44 | * The node to execute next. 45 | */ 46 | next: Annotation< 47 | | "schedulePost" 48 | | "rewritePost" 49 | | "unknownResponse" 50 | | "updateScheduleDate" 51 | | typeof END 52 | | undefined 53 | >, 54 | /** 55 | * The image to attach to the post, and the MIME type. 56 | */ 57 | image: Annotation< 58 | | { 59 | imageUrl: string; 60 | mimeType: string; 61 | } 62 | | undefined 63 | >, 64 | /** 65 | * The number of times the post has been condensed. We should stop condensing after 66 | * 3 times to prevent an infinite loop. 67 | */ 68 | condenseCount: Annotation({ 69 | reducer: (_state, update) => update, 70 | default: () => 0, 71 | }), 72 | }); 73 | 74 | export type CuratedPostInterruptState = 75 | typeof CuratedPostInterruptAnnotation.State; 76 | export type CuratedPostInterruptUpdate = 77 | typeof CuratedPostInterruptAnnotation.Update; 78 | 79 | export const CuratedPostInterruptConfigurableAnnotation = Annotation.Root({ 80 | /** 81 | * Whether to post to the LinkedIn organization or the user's profile. 82 | * If true, [LINKEDIN_ORGANIZATION_ID] is required. 83 | */ 84 | [POST_TO_LINKEDIN_ORGANIZATION]: Annotation, 85 | /** 86 | * Whether or not to use text only mode throughout the graph. 87 | * If true, it will not try to extract, validate, or upload images. 88 | * Additionally, it will not be able to handle validating YouTube videos. 89 | * @default false 90 | */ 91 | [TEXT_ONLY_MODE]: Annotation({ 92 | reducer: (_state, update) => update, 93 | default: () => false, 94 | }), 95 | }); 96 | -------------------------------------------------------------------------------- /src/agents/find-images/find-images-graph.ts: -------------------------------------------------------------------------------- 1 | import { Annotation, END, START, StateGraph } from "@langchain/langgraph"; 2 | import { findImages } from "./nodes/find-images.js"; 3 | import { validateImages } from "./nodes/validate-images.js"; 4 | import { reRankImages } from "./nodes/re-rank-images.js"; 5 | import { VerifyLinksResultAnnotation } from "../verify-links/verify-links-state.js"; 6 | import { Image } from "../types.js"; 7 | 8 | export const FindImagesAnnotation = Annotation.Root({ 9 | ...VerifyLinksResultAnnotation.spec, 10 | /** 11 | * The report generated on the content of the message. Used 12 | * as context for generating the post. 13 | */ 14 | report: Annotation, 15 | /** 16 | * The generated post for LinkedIn/Twitter. 17 | */ 18 | post: Annotation, 19 | /** 20 | * The main image for the post 21 | */ 22 | image: Annotation, 23 | }); 24 | 25 | function validateImagesOrEnd(state: typeof FindImagesAnnotation.State) { 26 | if (state.imageOptions?.length) { 27 | return "validateImages"; 28 | } 29 | return END; 30 | } 31 | 32 | const findImagesWorkflow = new StateGraph(FindImagesAnnotation) 33 | .addNode("findImages", findImages) 34 | .addNode("validateImages", validateImages) 35 | .addNode("reRankImages", reRankImages) 36 | 37 | .addEdge(START, "findImages") 38 | 39 | .addConditionalEdges("findImages", validateImagesOrEnd, [ 40 | "validateImages", 41 | END, 42 | ]) 43 | 44 | .addEdge("validateImages", "reRankImages") 45 | 46 | .addEdge("reRankImages", END); 47 | 48 | export const findImagesGraph = findImagesWorkflow.compile(); 49 | findImagesGraph.name = "Find Images Graph"; 50 | -------------------------------------------------------------------------------- /src/agents/generate-post/constants.ts: -------------------------------------------------------------------------------- 1 | import type { BrowserContextOptions, PageScreenshotOptions } from "playwright"; 2 | 3 | export const ALLOWED_DAYS = [ 4 | "Monday", 5 | "Tuesday", 6 | "Wednesday", 7 | "Thursday", 8 | "Friday", 9 | "Saturday", 10 | "Sunday", 11 | ]; 12 | 13 | export const ALLOWED_TIMES = [ 14 | "8:00 AM", 15 | "8:10 AM", 16 | "8:20 AM", 17 | "8:30 AM", 18 | "8:40 AM", 19 | "8:50 AM", 20 | "9:00 AM", 21 | "9:10 AM", 22 | "9:20 AM", 23 | "9:30 AM", 24 | "9:40 AM", 25 | "9:50 AM", 26 | "10:00 AM", 27 | "10:10 AM", 28 | "10:20 AM", 29 | "10:30 AM", 30 | "10:40 AM", 31 | "10:50 AM", 32 | "11:00 AM", 33 | "11:10 AM", 34 | "11:20 AM", 35 | "11:30 AM", 36 | "11:40 AM", 37 | "11:50 AM", 38 | "12:00 PM", 39 | "12:10 PM", 40 | "12:20 PM", 41 | "12:30 PM", 42 | "12:40 PM", 43 | "12:50 PM", 44 | "1:00 PM", 45 | "1:10 PM", 46 | "1:20 PM", 47 | "1:30 PM", 48 | "1:40 PM", 49 | "1:50 PM", 50 | "2:00 PM", 51 | "2:10 PM", 52 | "2:20 PM", 53 | "2:30 PM", 54 | "2:40 PM", 55 | "2:50 PM", 56 | "3:00 PM", 57 | "3:10 PM", 58 | "3:20 PM", 59 | "3:30 PM", 60 | "3:40 PM", 61 | "3:50 PM", 62 | "4:00 PM", 63 | "4:10 PM", 64 | "4:20 PM", 65 | "4:30 PM", 66 | "4:40 PM", 67 | "4:50 PM", 68 | "5:00 PM", 69 | ]; 70 | 71 | export const GITHUB_SCREENSHOT_OPTIONS: PageScreenshotOptions = { 72 | clip: { 73 | width: 1200, 74 | height: 1500, 75 | x: 525, 76 | y: 350, 77 | }, 78 | }; 79 | export const GITHUB_BROWSER_CONTEXT_OPTIONS: BrowserContextOptions = { 80 | viewport: { 81 | width: 1920, 82 | height: 1500, 83 | }, 84 | }; 85 | 86 | // Configurable keys 87 | // LinkedIn 88 | export const LINKEDIN_PERSON_URN = "linkedInPersonUrn"; 89 | export const LINKEDIN_ORGANIZATION_ID = "linkedInOrganizationId"; 90 | export const LINKEDIN_ACCESS_TOKEN = "linkedInAccessToken"; 91 | export const POST_TO_LINKEDIN_ORGANIZATION = "postToLinkedInOrganization"; 92 | export const LINKEDIN_USER_ID = "linkedInUserId"; 93 | // Twitter 94 | export const TWITTER_USER_ID = "twitterUserId"; 95 | export const TWITTER_TOKEN = "twitterToken"; 96 | export const TWITTER_TOKEN_SECRET = "twitterTokenSecret"; 97 | export const INGEST_TWITTER_USERNAME = "ingestTwitterUsername"; 98 | // Simplified text only mode 99 | export const TEXT_ONLY_MODE = "textOnlyMode"; 100 | 101 | export const SKIP_CONTENT_RELEVANCY_CHECK = "skipContentRelevancyCheck"; 102 | 103 | export const SKIP_USED_URLS_CHECK = "skipUsedUrlsCheck"; 104 | -------------------------------------------------------------------------------- /src/agents/generate-post/nodes/auth-socials.ts: -------------------------------------------------------------------------------- 1 | import { interrupt, LangGraphRunnableConfig } from "@langchain/langgraph"; 2 | import { GeneratePostAnnotation } from "../generate-post-state.js"; 3 | import { getLinkedInAuthOrInterrupt } from "../../shared/auth/linkedin.js"; 4 | import { getTwitterAuthOrInterrupt } from "../../shared/auth/twitter.js"; 5 | import { HumanInterrupt, HumanResponse } from "@langchain/langgraph/prebuilt"; 6 | import { shouldPostToLinkedInOrg } from "../../utils.js"; 7 | 8 | export async function authSocialsPassthrough( 9 | _state: typeof GeneratePostAnnotation.State, 10 | config: LangGraphRunnableConfig, 11 | ) { 12 | let linkedInHumanInterrupt: HumanInterrupt | undefined = undefined; 13 | const linkedInUserId = process.env.LINKEDIN_USER_ID; 14 | if (linkedInUserId) { 15 | const postToLinkedInOrg = shouldPostToLinkedInOrg(config); 16 | linkedInHumanInterrupt = await getLinkedInAuthOrInterrupt({ 17 | linkedInUserId, 18 | returnInterrupt: true, 19 | postToOrg: postToLinkedInOrg, 20 | }); 21 | } 22 | 23 | let twitterHumanInterrupt: HumanInterrupt | undefined = undefined; 24 | const twitterUserId = process.env.TWITTER_USER_ID; 25 | if (twitterUserId) { 26 | twitterHumanInterrupt = await getTwitterAuthOrInterrupt({ 27 | twitterUserId, 28 | returnInterrupt: true, 29 | }); 30 | } 31 | 32 | if (!twitterHumanInterrupt && !linkedInHumanInterrupt) { 33 | // Use has already authorized. Return early 34 | return {}; 35 | } 36 | 37 | const combinedArgs = { 38 | ...twitterHumanInterrupt?.action_request.args, 39 | ...linkedInHumanInterrupt?.action_request.args, 40 | }; 41 | 42 | const description = `# Authorization Required 43 | 44 | Please visit the following URL(s) to authorize your social media accounts: 45 | 46 | ${combinedArgs.authorizeTwitterURL ? `Twitter: ${combinedArgs.authorizeTwitterURL}` : ""} 47 | ${combinedArgs.authorizeLinkedInURL ? `LinkedIn: ${combinedArgs.authorizeLinkedInURL}` : ""} 48 | ${combinedArgs.authorizationDocs ? `LinkedIn Authorization Docs: ${combinedArgs.authorizationDocs}` : ""} 49 | 50 | Once done, please 'accept' this interrupt event.`; 51 | 52 | const interruptEvent: HumanInterrupt = { 53 | description, 54 | action_request: { 55 | action: "Authorize Social Media Accounts", 56 | args: combinedArgs, 57 | }, 58 | config: { 59 | allow_accept: true, 60 | allow_ignore: true, 61 | allow_respond: false, 62 | allow_edit: false, 63 | }, 64 | }; 65 | 66 | const interruptRes = interrupt([ 67 | interruptEvent, 68 | ])[0]; 69 | 70 | if (interruptRes.type === "ignore") { 71 | // Throw an error to end the graph. 72 | throw new Error("Authorization denied by user."); 73 | } 74 | 75 | return {}; 76 | } 77 | -------------------------------------------------------------------------------- /src/agents/generate-post/nodes/generate-post/index.ts: -------------------------------------------------------------------------------- 1 | import { LangGraphRunnableConfig } from "@langchain/langgraph"; 2 | import { GeneratePostAnnotation } from "../../generate-post-state.js"; 3 | import { ChatAnthropic } from "@langchain/anthropic"; 4 | import { GENERATE_POST_PROMPT } from "./prompts.js"; 5 | import { formatPrompt, parseGeneration } from "./utils.js"; 6 | import { ALLOWED_TIMES } from "../../constants.js"; 7 | import { 8 | getReflectionsPrompt, 9 | REFLECTIONS_PROMPT, 10 | } from "../../../../utils/reflections.js"; 11 | import { getNextSaturdayDate } from "../../../../utils/date.js"; 12 | 13 | export async function generatePost( 14 | state: typeof GeneratePostAnnotation.State, 15 | config: LangGraphRunnableConfig, 16 | ): Promise> { 17 | if (!state.report) { 18 | throw new Error("No report found"); 19 | } 20 | if (!state.relevantLinks?.length) { 21 | throw new Error("No relevant links found"); 22 | } 23 | const postModel = new ChatAnthropic({ 24 | model: "claude-3-5-sonnet-latest", 25 | temperature: 0.5, 26 | }); 27 | 28 | const prompt = formatPrompt(state.report, state.relevantLinks); 29 | 30 | const reflections = await getReflectionsPrompt(config); 31 | const reflectionsPrompt = REFLECTIONS_PROMPT.replace( 32 | "{reflections}", 33 | reflections, 34 | ); 35 | 36 | const generatePostPrompt = GENERATE_POST_PROMPT.replace( 37 | "{reflectionsPrompt}", 38 | reflectionsPrompt, 39 | ); 40 | 41 | const postResponse = await postModel.invoke([ 42 | { 43 | role: "system", 44 | content: generatePostPrompt, 45 | }, 46 | { 47 | role: "user", 48 | content: prompt, 49 | }, 50 | ]); 51 | 52 | // Randomly select a time from the allowed times 53 | const [postHour, postMinute] = ALLOWED_TIMES[ 54 | Math.floor(Math.random() * ALLOWED_TIMES.length) 55 | ] 56 | .split(" ")[0] 57 | .split(":"); 58 | const postDate = getNextSaturdayDate(Number(postHour), Number(postMinute)); 59 | 60 | return { 61 | post: parseGeneration(postResponse.content as string), 62 | scheduleDate: postDate, 63 | }; 64 | } 65 | -------------------------------------------------------------------------------- /src/agents/generate-post/nodes/generate-post/prompts.ts: -------------------------------------------------------------------------------- 1 | import { getPrompts } from "../../prompts/index.js"; 2 | 3 | export const GENERATE_POST_PROMPT = `You're a highly regarded marketing employee, working on crafting thoughtful and engaging content for the LinkedIn and Twitter pages. 4 | You've been provided with a report on some content that you need to turn into a LinkedIn/Twitter post. The same post will be used for both platforms. 5 | Your coworker has already taken the time to write a detailed marketing report on this content for you, so please take your time and read it carefully. 6 | 7 | The following are examples of LinkedIn/Twitter posts on third-party content that have done well, and you should use them as style inspiration for your post: 8 | 9 | ${getPrompts().tweetExamples} 10 | 11 | 12 | Now that you've seen some examples, lets's cover the structure of the LinkedIn/Twitter post you should follow. 13 | ${getPrompts().postStructureInstructions} 14 | 15 | This structure should ALWAYS be followed. And remember, the shorter and more engaging the post, the better (your yearly bonus depends on this!!). 16 | 17 | Here are a set of rules and guidelines you should strictly follow when creating the LinkedIn/Twitter post: 18 | 19 | ${getPrompts().postContentRules} 20 | 21 | 22 | {reflectionsPrompt} 23 | 24 | Lastly, you should follow the process below when writing the LinkedIn/Twitter post: 25 | 26 | Step 1. First, read over the marketing report VERY thoroughly. 27 | Step 2. Take notes, and write down your thoughts about the report after reading it carefully. This should include details you think will help make the post more engaging, and your initial thoughts about what to focus the post on, the style, etc. This should be the first text you write. Wrap the notes and thoughts inside a "" tag. 28 | Step 3. Lastly, write the LinkedIn/Twitter post. Use the notes and thoughts you wrote down in the previous step to help you write the post. This should be the last text you write. Wrap your report inside a "" tag. Ensure you write only ONE post for both LinkedIn and Twitter. 29 | 30 | 31 | Given these examples, rules, and the content provided by the user, curate a LinkedIn/Twitter post that is engaging and follows the structure of the examples provided.`; 32 | -------------------------------------------------------------------------------- /src/agents/generate-post/nodes/generate-post/utils.ts: -------------------------------------------------------------------------------- 1 | import { filterLinksForPostContent } from "../../../utils.js"; 2 | 3 | /** 4 | * Parse the LLM generation to extract the report from inside the tag. 5 | * If the report can not be parsed, the original generation is returned. 6 | * @param generation The text generation to parse 7 | * @returns The parsed generation, or the unmodified generation if it cannot be parsed 8 | */ 9 | export function parseGeneration(generation: string): string { 10 | const reportMatch = generation.match(/([\s\S]*?)<\/post>/); 11 | if (!reportMatch) { 12 | console.warn( 13 | "Could not parse post from generation:\nSTART OF POST GENERATION\n\n", 14 | generation, 15 | "\n\nEND OF POST GENERATION", 16 | ); 17 | } 18 | return reportMatch ? reportMatch[1].trim() : generation; 19 | } 20 | 21 | export function formatPrompt(report: string, relevantLinks: string[]): string { 22 | return `Here is the report I wrote on the content I'd like promoted by LangChain: 23 | 24 | ${report} 25 | 26 | 27 | Here are the relevant links used to create the report. 28 | You should remove tracking query parameters from the link, if present. 29 | If you are unsure whether a link's parameters are tracking, do not remove them. It's better to have a link with tracking parameters than a broken link. 30 | The links do NOT contribute to the post's length. They are temporarily removed from the post before the length is calculated, and re-added afterwards. 31 | 32 | ${filterLinksForPostContent(relevantLinks)} 33 | `; 34 | } 35 | -------------------------------------------------------------------------------- /src/agents/generate-post/nodes/generate-report/index.ts: -------------------------------------------------------------------------------- 1 | import { LangGraphRunnableConfig } from "@langchain/langgraph"; 2 | import { GeneratePostAnnotation } from "../../generate-post-state.js"; 3 | import { ChatAnthropic } from "@langchain/anthropic"; 4 | import { GENERATE_REPORT_PROMPT } from "./prompts.js"; 5 | 6 | /** 7 | * Parse the LLM generation to extract the report from inside the tag. 8 | * If the report can not be parsed, the original generation is returned. 9 | * @param generation The text generation to parse 10 | * @returns The parsed generation, or the unmodified generation if it cannot be parsed 11 | */ 12 | function parseGeneration(generation: string): string { 13 | const reportMatch = generation.match(/([\s\S]*?)<\/report>/); 14 | if (!reportMatch) { 15 | console.warn( 16 | "Could not parse report from generation:\nSTART OF GENERATION\n\n", 17 | generation, 18 | "\n\nEND OF GENERATION", 19 | ); 20 | } 21 | return reportMatch ? reportMatch[1].trim() : generation; 22 | } 23 | 24 | const formatReportPrompt = (pageContents: string[]): string => { 25 | return `The following text contains summaries, or entire pages from the content I submitted to you. Please review the content and generate a report on it. 26 | ${pageContents.map((content, index) => `\n${content}\n`).join("\n\n")}`; 27 | }; 28 | 29 | export async function generateContentReport( 30 | state: typeof GeneratePostAnnotation.State, 31 | _config: LangGraphRunnableConfig, 32 | ): Promise> { 33 | if (!state.pageContents?.length) { 34 | throw new Error( 35 | "No page contents found. pageContents must be defined to generate a content report.", 36 | ); 37 | } 38 | 39 | const reportModel = new ChatAnthropic({ 40 | model: "claude-3-5-sonnet-latest", 41 | temperature: 0, 42 | }); 43 | 44 | const result = await reportModel.invoke([ 45 | { 46 | role: "system", 47 | content: GENERATE_REPORT_PROMPT, 48 | }, 49 | { 50 | role: "user", 51 | content: formatReportPrompt(state.pageContents), 52 | }, 53 | ]); 54 | 55 | return { 56 | report: parseGeneration(result.content as string), 57 | }; 58 | } 59 | -------------------------------------------------------------------------------- /src/agents/generate-post/nodes/rewrite-with-split-url.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { 3 | GeneratePostState, 4 | GeneratePostUpdate, 5 | } from "../generate-post-state.js"; 6 | import { ChatAnthropic } from "@langchain/anthropic"; 7 | 8 | const postSchema = z.object({ 9 | main_post: z 10 | .string() 11 | .describe( 12 | "The main content of the post. Should NOT include the URL, but it should include a concise callout indicating the URL is in the reply.", 13 | ), 14 | reply_post: z 15 | .string() 16 | .describe( 17 | "The reply to the Tweet. This should contain a very concise callout (e.g. 'Check out the repo here:'), and the URL.", 18 | ), 19 | }); 20 | 21 | const REWRITE_WITH_SPLIT_URL_PROMPT = `You're an advanced AI marketer who has been tasked with splitting a social media post into two unique posts: 22 | 1. The first is the main body of the post. You should NOT make any changes to the input, EXCEPT for replacing the callout URL at the bottom, with a message indicating the URL is in the reply. 23 | You may include an emoji to indicate the URL is in the reply. 24 | Example: 25 | "Repo link in reply 👇" 26 | or 27 | "Video link in reply 🧵" 28 | 2. The second is the reply post. This should contain a very concise callout (e.g. 'Check out the repo here:'), and the URL. 29 | Example: 30 | "Checkout the repo here: https://github.com/bracesproul/langchain-ai" 31 | 32 | Given the following post: 33 | {POST} 34 | 35 | Please split it into the two unique posts. Ensure the ONLY modification you make is to the callout & URL at the end of the post.`; 36 | 37 | export async function rewritePostWithSplitUrl( 38 | state: GeneratePostState, 39 | ): Promise { 40 | const postModel = new ChatAnthropic({ 41 | model: "claude-3-7-sonnet-latest", 42 | temperature: 0, 43 | }).bindTools( 44 | [ 45 | { 46 | name: "rewrite_post", 47 | description: 48 | "Rewrite the post with the split URL from the main post content to the reply", 49 | schema: postSchema, 50 | }, 51 | ], 52 | { 53 | tool_choice: "rewrite_post", 54 | }, 55 | ); 56 | 57 | const formattedPrompt = REWRITE_WITH_SPLIT_URL_PROMPT.replace( 58 | "{POST}", 59 | state.post || "", 60 | ); 61 | 62 | const result = await postModel.invoke([ 63 | { 64 | role: "user", 65 | content: formattedPrompt, 66 | }, 67 | ]); 68 | 69 | const rewrittenPost = result.tool_calls?.[0].args as 70 | | z.infer 71 | | undefined; 72 | 73 | return { 74 | complexPost: rewrittenPost, 75 | }; 76 | } 77 | -------------------------------------------------------------------------------- /src/agents/generate-report/index.ts: -------------------------------------------------------------------------------- 1 | import { END, START, StateGraph } from "@langchain/langgraph"; 2 | import { GenerateReportAnnotation } from "./state.js"; 3 | import { generateReport } from "./nodes/generate-report.js"; 4 | import { extractKeyDetails } from "./nodes/extract-key-details.js"; 5 | 6 | const generateReportWorkflow = new StateGraph(GenerateReportAnnotation) 7 | .addNode("extractKeyDetails", extractKeyDetails) 8 | .addNode("generateReport", generateReport) 9 | .addEdge(START, "extractKeyDetails") 10 | .addEdge("extractKeyDetails", "generateReport") 11 | .addEdge("generateReport", END); 12 | 13 | export const generateReportGraph = generateReportWorkflow.compile(); 14 | generateReportGraph.name = "Generate Report Graph"; 15 | -------------------------------------------------------------------------------- /src/agents/generate-report/nodes/extract-key-details.ts: -------------------------------------------------------------------------------- 1 | import { ChatOpenAI } from "@langchain/openai"; 2 | import { GenerateReportState } from "../state.js"; 3 | import { EXTRACT_KEY_DETAILS_PROMPT } from "../prompts.js"; 4 | import { TweetsGroupedByContent } from "../../curate-data/types.js"; 5 | import { formatImageMessages } from "../utils.js"; 6 | 7 | const formatKeyDetailsPrompt = ( 8 | pageContents: string[], 9 | tweetGroup: TweetsGroupedByContent | undefined, 10 | ): string => { 11 | let tweetGroupText = ""; 12 | if (tweetGroup) { 13 | tweetGroupText = `Here is a group of tweets I extracted which are relevant to this content: 14 | 15 | ${tweetGroup.explanation} 16 | 17 | ${tweetGroup.tweets 18 | .map((tweet) => { 19 | const tweetText = tweet.note_tweet?.text || tweet.text || ""; 20 | return `\n${tweetText}\n`; 21 | }) 22 | .join("\n\n")} 23 | 24 | `; 25 | } 26 | 27 | const pageContentsText = 28 | pageContents.length > 0 29 | ? pageContents 30 | .map( 31 | (content, index) => 32 | `\n${content}\n`, 33 | ) 34 | .join("\n\n") 35 | : ""; 36 | 37 | if (pageContentsText.length > 0) { 38 | return `The following text contains summaries, or entire pages from the content I submitted to you. Please review the content and extract ALL of the key details from it. 39 | ${pageContentsText} 40 | 41 | ${tweetGroupText}`; 42 | } 43 | 44 | return tweetGroupText; 45 | }; 46 | 47 | export async function extractKeyDetails( 48 | state: GenerateReportState, 49 | ): Promise> { 50 | if (!state.pageContents?.length && !state.tweetGroup) { 51 | throw new Error( 52 | "Missing page contents and tweet group. One of these must be defined to extract key details.", 53 | ); 54 | } 55 | const keyDetailsPrompt = formatKeyDetailsPrompt( 56 | state.pageContents || [], 57 | state.tweetGroup, 58 | ); 59 | 60 | const model = new ChatOpenAI({ 61 | model: "o1", 62 | streaming: false, 63 | }); 64 | 65 | const imageMessage = state.imageOptions?.length 66 | ? formatImageMessages(state.imageOptions) 67 | : undefined; 68 | 69 | const keyDetailsRes = await model.invoke([ 70 | { 71 | role: "system", 72 | content: EXTRACT_KEY_DETAILS_PROMPT, 73 | }, 74 | ...(imageMessage ? [imageMessage] : []), 75 | { 76 | role: "user", 77 | content: keyDetailsPrompt, 78 | }, 79 | ]); 80 | 81 | return { 82 | keyReportDetails: keyDetailsRes.content as string, 83 | }; 84 | } 85 | -------------------------------------------------------------------------------- /src/agents/generate-report/state.ts: -------------------------------------------------------------------------------- 1 | import { Annotation } from "@langchain/langgraph"; 2 | import { TweetsGroupedByContent } from "../curate-data/types.js"; 3 | import { VerifyLinksResultAnnotation } from "../verify-links/verify-links-state.js"; 4 | 5 | export const GenerateReportAnnotation = Annotation.Root({ 6 | ...VerifyLinksResultAnnotation.spec, 7 | tweetGroup: Annotation, 8 | keyReportDetails: Annotation, 9 | /** 10 | * Must be an array even though it will only contain a single report. 11 | * This is due to its usage in a subgraph, and the shared key is `reports`. 12 | */ 13 | reports: Annotation< 14 | Array<{ 15 | report: string; 16 | keyDetails: string; 17 | }> 18 | >({ 19 | reducer: (state, update) => state.concat(update), 20 | default: () => [], 21 | }), 22 | }); 23 | 24 | export type GenerateReportState = typeof GenerateReportAnnotation.State; 25 | -------------------------------------------------------------------------------- /src/agents/generate-report/utils.ts: -------------------------------------------------------------------------------- 1 | import { BaseMessageLike } from "@langchain/core/messages"; 2 | 3 | export function formatImageMessages(imageOptions: string[]): BaseMessageLike { 4 | return { 5 | role: "user", 6 | content: [ 7 | { 8 | type: "text", 9 | text: "The following are images you should use as context when extracting key details. All of the following images were extracted from the content you are also provided with.", 10 | }, 11 | ...imageOptions.map((url) => ({ 12 | type: "image_url", 13 | image_url: { 14 | url, 15 | }, 16 | })), 17 | ], 18 | }; 19 | } 20 | -------------------------------------------------------------------------------- /src/agents/generate-thread/index.ts: -------------------------------------------------------------------------------- 1 | import { END, START, StateGraph } from "@langchain/langgraph"; 2 | import { generateThreadPlan } from "./nodes/generate-thread-plan.js"; 3 | import { generateThreadPosts } from "./nodes/generate-thread-posts.js"; 4 | import { GenerateThreadAnnotation, GenerateThreadState } from "./state.js"; 5 | import { humanNode } from "./nodes/human-node/index.js"; 6 | import { updateScheduledDate } from "../shared/nodes/update-scheduled-date.js"; 7 | import { rewriteThread } from "./nodes/rewrite-thread.js"; 8 | import { scheduleThread } from "./nodes/schedule-thread.js"; 9 | 10 | function rewriteOrEndConditionalEdge( 11 | state: GenerateThreadState, 12 | ): 13 | | "rewriteThread" 14 | | "scheduleThread" 15 | | "updateScheduleDate" 16 | | "humanNode" 17 | | typeof END { 18 | if (state.next) { 19 | if (state.next === "unknownResponse") { 20 | // If the user's response is unknown, we should route back to the human node. 21 | return "humanNode"; 22 | } else if (state.next === "rewritePost") { 23 | return "rewriteThread"; 24 | } else if (state.next === "schedulePost") { 25 | return "scheduleThread"; 26 | } 27 | 28 | return state.next; 29 | } 30 | return END; 31 | } 32 | 33 | const generateThreadWorkflow = new StateGraph(GenerateThreadAnnotation) 34 | .addNode("generateThreadPlan", generateThreadPlan) 35 | .addNode("generateThreadPosts", generateThreadPosts) 36 | .addNode("humanNode", humanNode) 37 | // Updated the scheduled date from the natural language response from the user. 38 | .addNode("updateScheduleDate", updateScheduledDate) 39 | .addNode("scheduleThread", scheduleThread) 40 | .addNode("rewriteThread", rewriteThread) 41 | .addEdge(START, "generateThreadPlan") 42 | .addEdge("generateThreadPlan", "generateThreadPosts") 43 | .addEdge("generateThreadPosts", "humanNode") 44 | .addConditionalEdges("humanNode", rewriteOrEndConditionalEdge, [ 45 | "rewriteThread", 46 | "scheduleThread", 47 | "updateScheduleDate", 48 | "humanNode", 49 | END, 50 | ]) 51 | .addEdge("rewriteThread", "humanNode") 52 | .addEdge("updateScheduleDate", "humanNode") 53 | .addEdge("scheduleThread", END); 54 | 55 | export const generateThreadGraph = generateThreadWorkflow.compile(); 56 | generateThreadGraph.name = "Generate Thread Graph"; 57 | -------------------------------------------------------------------------------- /src/agents/generate-thread/nodes/schedule-thread.ts: -------------------------------------------------------------------------------- 1 | import { GenerateThreadState } from "../state.js"; 2 | 3 | export async function scheduleThread(state: GenerateThreadState) { 4 | throw new Error("Not implemented" + state); 5 | } 6 | -------------------------------------------------------------------------------- /src/agents/generate-thread/state.ts: -------------------------------------------------------------------------------- 1 | import { Annotation, END } from "@langchain/langgraph"; 2 | import { ThreadPost } from "./types.js"; 3 | import { DateType } from "../types.js"; 4 | 5 | export const GenerateThreadAnnotation = Annotation.Root({ 6 | /** 7 | * The reports to use for generating the thread. 8 | */ 9 | reports: Annotation, 10 | /** 11 | * The total number of posts to generate. 12 | */ 13 | totalPosts: Annotation, 14 | /** 15 | * The plan generated for the thread. 16 | */ 17 | threadPlan: Annotation, 18 | /** 19 | * The posts generated for the thread. 20 | */ 21 | threadPosts: Annotation, 22 | /** 23 | * The date to schedule the post for. 24 | */ 25 | scheduleDate: Annotation, 26 | /** 27 | * Response from the user for the post. Typically used to request 28 | * changes to be made to the post. 29 | */ 30 | userResponse: Annotation, 31 | /** 32 | * The node to execute next. 33 | */ 34 | next: Annotation< 35 | | "schedulePost" 36 | | "rewritePost" 37 | | "updateScheduleDate" 38 | | "unknownResponse" 39 | | typeof END 40 | | undefined 41 | >, 42 | 43 | /** 44 | * The image to attach to the post, and the MIME type. 45 | */ 46 | image: Annotation< 47 | | { 48 | imageUrl: string; 49 | mimeType: string; 50 | } 51 | | undefined 52 | >, 53 | }); 54 | 55 | export type GenerateThreadState = typeof GenerateThreadAnnotation.State; 56 | -------------------------------------------------------------------------------- /src/agents/generate-thread/types.ts: -------------------------------------------------------------------------------- 1 | export type ThreadPost = { 2 | text: string; 3 | index: number; 4 | imageUrls?: string[]; 5 | }; 6 | -------------------------------------------------------------------------------- /src/agents/generate-thread/utils.ts: -------------------------------------------------------------------------------- 1 | import { EXAMPLES } from "../generate-post/prompts/examples.js"; 2 | 3 | export function formatReportsForPrompt(reports: string[]): string { 4 | return reports 5 | .map((r, index) => `\n${r}\n`) 6 | .join("\n"); 7 | } 8 | 9 | export function formatBodyPostsForPrompt(posts: string[]): string { 10 | if (posts.length === 0) { 11 | return "You have not generated any body posts yet, only the introduction."; 12 | } 13 | const postsString = posts 14 | .map((p, index) => `\n${p}\n`) 15 | .join("\n"); 16 | return `Here are the body posts you have generated so far: 17 | 18 | ${postsString} 19 | `; 20 | } 21 | 22 | export function formatAllPostsForPrompt(posts: string[]): string { 23 | return posts 24 | .map((p, index) => `\n${p}\n`) 25 | .join("\n"); 26 | } 27 | 28 | export function formatTweetExamplesForPrompt(): string { 29 | return EXAMPLES.map( 30 | (ex, index) => `\n${ex}\n`, 31 | ).join("\n"); 32 | } 33 | 34 | export function parseTweetGeneration(tweet: string): string { 35 | const tweetMatch = tweet.match(/([\s\S]*?)<\/tweet>/); 36 | if (!tweetMatch) { 37 | console.warn( 38 | "Could not parse tweet from generation:\nSTART OF TWEET\n\n", 39 | tweet, 40 | "\n\nEND OF TWEET", 41 | ); 42 | } 43 | return tweetMatch?.[1] ? tweetMatch[1].trim() : tweet; 44 | } 45 | -------------------------------------------------------------------------------- /src/agents/ingest-data/ingest-data-graph.ts: -------------------------------------------------------------------------------- 1 | import { 2 | END, 3 | LangGraphRunnableConfig, 4 | START, 5 | StateGraph, 6 | } from "@langchain/langgraph"; 7 | import { 8 | IngestDataConfigurableAnnotation, 9 | IngestDataAnnotation, 10 | } from "./ingest-data-state.js"; 11 | import { ingestSlackData } from "./nodes/ingest-slack.js"; 12 | import { Client } from "@langchain/langgraph-sdk"; 13 | import { 14 | POST_TO_LINKEDIN_ORGANIZATION, 15 | SKIP_CONTENT_RELEVANCY_CHECK, 16 | SKIP_USED_URLS_CHECK, 17 | TEXT_ONLY_MODE, 18 | } from "../generate-post/constants.js"; 19 | import { 20 | getAfterSecondsFromLinks, 21 | isTextOnly, 22 | shouldPostToLinkedInOrg, 23 | skipContentRelevancyCheck, 24 | skipUsedUrlsCheck, 25 | } from "../utils.js"; 26 | 27 | async function generatePostFromMessages( 28 | state: typeof IngestDataAnnotation.State, 29 | config: LangGraphRunnableConfig, 30 | ) { 31 | const client = new Client({ 32 | apiUrl: `http://localhost:${process.env.PORT}`, 33 | }); 34 | 35 | const linkAndDelay = getAfterSecondsFromLinks(state.links); 36 | const isTextOnlyMode = isTextOnly(config); 37 | const postToLinkedInOrg = shouldPostToLinkedInOrg(config); 38 | const shouldSkipContentRelevancyCheck = await skipContentRelevancyCheck( 39 | config?.configurable, 40 | ); 41 | const shouldSkipUsedUrlsCheck = await skipUsedUrlsCheck(config?.configurable); 42 | 43 | for await (const { link, afterSeconds } of linkAndDelay) { 44 | const thread = await client.threads.create(); 45 | await client.runs.create(thread.thread_id, "generate_post", { 46 | input: { 47 | links: [link], 48 | }, 49 | config: { 50 | configurable: { 51 | [POST_TO_LINKEDIN_ORGANIZATION]: postToLinkedInOrg, 52 | [TEXT_ONLY_MODE]: isTextOnlyMode, 53 | [SKIP_CONTENT_RELEVANCY_CHECK]: shouldSkipContentRelevancyCheck, 54 | [SKIP_USED_URLS_CHECK]: shouldSkipUsedUrlsCheck, 55 | }, 56 | }, 57 | afterSeconds, 58 | }); 59 | } 60 | return {}; 61 | } 62 | 63 | const builder = new StateGraph( 64 | IngestDataAnnotation, 65 | IngestDataConfigurableAnnotation, 66 | ) 67 | // Ingests posts from Slack channel. 68 | .addNode("ingestSlackData", ingestSlackData) 69 | // Subgraph which is invoked once for each message. 70 | // This subgraph will verify content is relevant to 71 | // LangChain, generate a report on the content, and 72 | // finally generate and schedule a post. 73 | .addNode("generatePostGraph", generatePostFromMessages) 74 | // Start node 75 | .addEdge(START, "ingestSlackData") 76 | // After ingesting data, route to the subgraph for each message. 77 | .addEdge("ingestSlackData", "generatePostGraph") 78 | // Finish after generating the Twitter post. 79 | .addEdge("generatePostGraph", END); 80 | 81 | export const graph = builder.compile(); 82 | 83 | graph.name = "Social Media Agent"; 84 | -------------------------------------------------------------------------------- /src/agents/ingest-data/ingest-data-state.ts: -------------------------------------------------------------------------------- 1 | import { Annotation } from "@langchain/langgraph"; 2 | import { SimpleSlackMessage } from "../../clients/slack/client.js"; 3 | import { 4 | POST_TO_LINKEDIN_ORGANIZATION, 5 | SKIP_CONTENT_RELEVANCY_CHECK, 6 | SKIP_USED_URLS_CHECK, 7 | TEXT_ONLY_MODE, 8 | } from "../generate-post/constants.js"; 9 | 10 | export type LangChainProduct = "langchain" | "langgraph" | "langsmith"; 11 | export type SimpleSlackMessageWithLinks = SimpleSlackMessage & { 12 | links: string[]; 13 | }; 14 | 15 | export const IngestDataAnnotation = Annotation.Root({ 16 | /** 17 | * The links to content to use for generating posts. 18 | */ 19 | links: Annotation({ 20 | reducer: (_state, update) => update, 21 | default: () => [], 22 | }), 23 | /** 24 | * A report generated on the content. Will be used in the main 25 | * graph when generating the post about this content. 26 | */ 27 | report: Annotation({ 28 | reducer: (_state, update) => update, 29 | default: () => "", 30 | }), 31 | /** 32 | * The content of the linkedin post. 33 | */ 34 | linkedinPost: Annotation({ 35 | reducer: (_state, update) => update, 36 | default: () => "", 37 | }), 38 | /** 39 | * The content of the tweet. 40 | */ 41 | twitterPost: Annotation({ 42 | reducer: (_state, update) => update, 43 | default: () => "", 44 | }), 45 | }); 46 | 47 | export const IngestDataConfigurableAnnotation = Annotation.Root({ 48 | maxMessages: Annotation({ 49 | reducer: (_state, update) => update, 50 | default: () => 100, 51 | }), 52 | /** 53 | * The maximum number of days to go back when ingesting 54 | * messages from Slack. 55 | */ 56 | maxDaysHistory: Annotation, 57 | slackChannelId: Annotation, 58 | /** 59 | * Whether or not to skip ingesting messages from Slack. 60 | * This will throw an error if slack messages are not 61 | * pre-provided in state. 62 | */ 63 | skipIngest: Annotation, 64 | /** 65 | * Whether to post to the LinkedIn organization or the user's profile. 66 | * If true, [LINKEDIN_ORGANIZATION_ID] is required. 67 | */ 68 | [POST_TO_LINKEDIN_ORGANIZATION]: Annotation, 69 | /** 70 | * Whether or not to use text only mode throughout the graph. 71 | * If true, it will not try to extract, validate, or upload images. 72 | * Additionally, it will not be able to handle validating YouTube videos. 73 | * @default false 74 | */ 75 | [TEXT_ONLY_MODE]: Annotation({ 76 | reducer: (_state, update) => update, 77 | default: () => false, 78 | }), 79 | /** 80 | * Whether or not to skip content verification. 81 | * If true, it will not attempt to verify the content from the link provided. 82 | * @default undefined 83 | */ 84 | [SKIP_CONTENT_RELEVANCY_CHECK]: Annotation(), 85 | /** 86 | * Whether or not to skip the used URLs check. This will also 87 | * skip saving the URLs in the store. 88 | */ 89 | [SKIP_USED_URLS_CHECK]: Annotation(), 90 | }); 91 | -------------------------------------------------------------------------------- /src/agents/ingest-data/nodes/ingest-slack.ts: -------------------------------------------------------------------------------- 1 | import { IngestDataAnnotation } from "../ingest-data-state.js"; 2 | import { LangGraphRunnableConfig } from "@langchain/langgraph"; 3 | import { SlackClient } from "../../../clients/slack/client.js"; 4 | import { extractUrlsFromSlackText } from "../../utils.js"; 5 | 6 | const getChannelIdFromConfig = async ( 7 | config: LangGraphRunnableConfig, 8 | ): Promise => { 9 | return config.configurable?.slackChannelId; 10 | }; 11 | 12 | export async function ingestSlackData( 13 | state: typeof IngestDataAnnotation.State, 14 | config: LangGraphRunnableConfig, 15 | ): Promise> { 16 | if (config.configurable?.skipIngest) { 17 | if (state.links.length === 0) { 18 | throw new Error("Can not skip ingest with no links"); 19 | } 20 | return {}; 21 | } 22 | 23 | const channelId = await getChannelIdFromConfig(config); 24 | if (!channelId) { 25 | throw new Error("Channel ID not found"); 26 | } 27 | 28 | const client = new SlackClient(); 29 | const recentMessages = await client.getChannelMessages(channelId, { 30 | maxMessages: config.configurable?.maxMessages, 31 | maxHoursHistory: config.configurable?.maxDaysHistory 32 | ? 24 * config.configurable?.maxDaysHistory 33 | : undefined, 34 | }); 35 | 36 | const links = recentMessages.flatMap((msg) => { 37 | const links = extractUrlsFromSlackText(msg.text); 38 | if (!links.length) { 39 | return []; 40 | } 41 | return links; 42 | }); 43 | 44 | return { 45 | links, 46 | }; 47 | } 48 | -------------------------------------------------------------------------------- /src/agents/ingest-data/nodes/ingest-twitter.ts: -------------------------------------------------------------------------------- 1 | import { IngestDataAnnotation } from "../ingest-data-state.js"; 2 | import { LangGraphRunnableConfig } from "@langchain/langgraph"; 3 | import Arcade from "@arcadeai/arcadejs"; 4 | import { getArcadeTwitterAuthOrInterrupt } from "../../shared/auth/twitter.js"; 5 | import { INGEST_TWITTER_USERNAME } from "../../generate-post/constants.js"; 6 | 7 | type TweetResult = { 8 | author_id: string; 9 | author_name: string; 10 | author_username: string; 11 | edit_history_tweet_ids: string[]; 12 | id: string; 13 | text: string; 14 | tweet_url: string; 15 | }; 16 | 17 | /** 18 | * Ingests Twitter data into the graph. 19 | * 20 | * This function will ingest tweets by a username. 21 | * 22 | * @param state The current state of the graph. 23 | * @param config The configuration for the ingest operation. 24 | * @returns A partial update to the graph state with the ingested tweets. 25 | */ 26 | export async function ingestTweets( 27 | state: typeof IngestDataAnnotation.State, 28 | config: LangGraphRunnableConfig, 29 | ): Promise> { 30 | if (config.configurable?.skipIngest) { 31 | if (state.links.length === 0) { 32 | throw new Error("Can not skip ingest with no links"); 33 | } 34 | return {}; 35 | } 36 | const twitterUserId = process.env.TWITTER_USER_ID; 37 | if (!twitterUserId) { 38 | throw new Error("Twitter user ID not found in configurable fields."); 39 | } 40 | 41 | const username = config.configurable?.[INGEST_TWITTER_USERNAME] as 42 | | string 43 | | undefined; 44 | if (!username) { 45 | throw new Error("Twitter username not found in configurable fields."); 46 | } 47 | 48 | const arcade = new Arcade({ 49 | apiKey: process.env.ARCADE_API_KEY, 50 | }); 51 | await getArcadeTwitterAuthOrInterrupt(twitterUserId, arcade); 52 | 53 | let links: string[] = []; 54 | const result = await arcade.tools.execute({ 55 | tool_name: "X.SearchRecentTweetsByUsername", 56 | input: { 57 | username, 58 | // (integer, optional, Defaults to 10) The maximum number of results to return. Cannot be less than 10. 59 | // 15 since the rate limit is 15 req/15 min 60 | max_results: 15, 61 | }, 62 | user_id: twitterUserId, 63 | }); 64 | 65 | const castValue = result.output?.value as { data: TweetResult[] | undefined }; 66 | if (castValue && castValue.data) { 67 | links = castValue.data.map((t) => t.tweet_url); 68 | } 69 | 70 | return { 71 | links, 72 | }; 73 | } 74 | -------------------------------------------------------------------------------- /src/agents/ingest-repurposed-data/constants.ts: -------------------------------------------------------------------------------- 1 | export const DEFAULT_POST_QUANTITY = 3; 2 | -------------------------------------------------------------------------------- /src/agents/ingest-repurposed-data/index.ts: -------------------------------------------------------------------------------- 1 | import { 2 | END, 3 | LangGraphRunnableConfig, 4 | START, 5 | StateGraph, 6 | } from "@langchain/langgraph"; 7 | import { ingestSlackMessages } from "./nodes/ingest-slack.js"; 8 | import { Client } from "@langchain/langgraph-sdk"; 9 | import { POST_TO_LINKEDIN_ORGANIZATION } from "../generate-post/constants.js"; 10 | import { shouldPostToLinkedInOrg } from "../utils.js"; 11 | import { 12 | IngestRepurposedDataAnnotation, 13 | IngestRepurposedDataConfigurableAnnotation, 14 | IngestRepurposedDataState, 15 | } from "./types.js"; 16 | import { extract } from "./nodes/extract.js"; 17 | 18 | async function generatePostsFromMessages( 19 | state: IngestRepurposedDataState, 20 | config: LangGraphRunnableConfig, 21 | ) { 22 | const client = new Client({ 23 | apiUrl: `http://localhost:${process.env.PORT}`, 24 | }); 25 | 26 | const postToLinkedInOrg = shouldPostToLinkedInOrg(config); 27 | 28 | for await (const content of state.contents) { 29 | const thread = await client.threads.create(); 30 | await client.runs.create(thread.thread_id, "repurposer", { 31 | input: { 32 | originalLink: content.originalLink, 33 | contextLinks: content.additionalContextLinks, 34 | quantity: content.quantity, 35 | }, 36 | config: { 37 | configurable: { 38 | [POST_TO_LINKEDIN_ORGANIZATION]: postToLinkedInOrg, 39 | }, 40 | }, 41 | }); 42 | } 43 | return {}; 44 | } 45 | 46 | function ingestSlackMessagesOrSkip( 47 | state: IngestRepurposedDataState, 48 | ): "extract" | "ingestSlackMessages" { 49 | if (state.messages.length > 0) { 50 | return "extract"; 51 | } 52 | return "ingestSlackMessages"; 53 | } 54 | 55 | const builder = new StateGraph( 56 | IngestRepurposedDataAnnotation, 57 | IngestRepurposedDataConfigurableAnnotation, 58 | ) 59 | // Ingests posts from Slack channel. 60 | .addNode("ingestSlackMessages", ingestSlackMessages) 61 | // A node which extracts the links and other data from the slack messages 62 | .addNode("extract", extract) 63 | // Subgraph which is invoked once for each message. 64 | // This subgraph will verify content is relevant to 65 | // LangChain, generate a report on the content, and 66 | // finally generate and schedule the specified number of posts. 67 | .addNode("generatePostsGraph", generatePostsFromMessages) 68 | // Start node 69 | .addConditionalEdges(START, ingestSlackMessagesOrSkip, [ 70 | "ingestSlackMessages", 71 | "extract", 72 | ]) 73 | // After ingesting the messages, send them to the extract function to extract the links and other data 74 | .addEdge("ingestSlackMessages", "extract") 75 | // After extracting the data, route to the subgraph for each message. 76 | .addEdge("extract", "generatePostsGraph") 77 | // Finish after kicking off the subgraph for each message. 78 | .addEdge("generatePostsGraph", END); 79 | 80 | export const graph = builder.compile(); 81 | 82 | graph.name = "Ingest Repurposed Data Graph"; 83 | -------------------------------------------------------------------------------- /src/agents/ingest-repurposed-data/nodes/ingest-slack.ts: -------------------------------------------------------------------------------- 1 | import { IngestRepurposedDataState } from "../types.js"; 2 | import { LangGraphRunnableConfig } from "@langchain/langgraph"; 3 | import { SlackClient } from "../../../clients/slack/client.js"; 4 | 5 | const getChannelIdFromConfig = async ( 6 | config: LangGraphRunnableConfig, 7 | ): Promise => { 8 | if (config.configurable?.repurposerSlackChannelId) { 9 | return config.configurable?.repurposerSlackChannelId; 10 | } 11 | 12 | throw new Error("Repurposer Slack channel ID not found in config."); 13 | }; 14 | 15 | export async function ingestSlackMessages( 16 | state: IngestRepurposedDataState, 17 | config: LangGraphRunnableConfig, 18 | ): Promise> { 19 | if (config.configurable?.skipIngest) { 20 | if (state.contents.length === 0) { 21 | throw new Error("Can not skip ingest with no links"); 22 | } 23 | return {}; 24 | } 25 | 26 | const channelId = await getChannelIdFromConfig(config); 27 | if (!channelId) { 28 | throw new Error("Channel ID not found"); 29 | } 30 | 31 | const client = new SlackClient(); 32 | const recentMessages = await client.getChannelMessages(channelId); 33 | 34 | return { 35 | slackMessages: recentMessages, 36 | }; 37 | } 38 | -------------------------------------------------------------------------------- /src/agents/ingest-repurposed-data/types.ts: -------------------------------------------------------------------------------- 1 | import { Annotation, MessagesAnnotation } from "@langchain/langgraph"; 2 | import { SimpleSlackMessage } from "../../clients/slack/client.js"; 3 | import { POST_TO_LINKEDIN_ORGANIZATION } from "../generate-post/constants.js"; 4 | 5 | export type RepurposedContent = { 6 | originalLink: string; 7 | additionalContextLinks?: string[]; 8 | quantity: number; 9 | attachmentUrls: string[] | undefined; 10 | }; 11 | 12 | export const IngestRepurposedDataAnnotation = Annotation.Root({ 13 | /** 14 | * The message that triggered the repurposer. Must be of type list, but it will only 15 | * ever contain a single message from the user. 16 | */ 17 | messages: MessagesAnnotation.spec["messages"], 18 | /** 19 | * The contents to use for generating repurposed posts. 20 | */ 21 | contents: Annotation, 22 | /** 23 | * The Slack messages ingested. 24 | */ 25 | slackMessages: Annotation, 26 | }); 27 | 28 | export type IngestRepurposedDataState = 29 | typeof IngestRepurposedDataAnnotation.State; 30 | export type IngestRepurposedDataUpdate = 31 | typeof IngestRepurposedDataAnnotation.Update; 32 | 33 | export const IngestRepurposedDataConfigurableAnnotation = Annotation.Root({ 34 | /** 35 | * The ID of the slack channel to use when ingesting data. 36 | */ 37 | repurposerSlackChannelId: Annotation, 38 | /** 39 | * Whether or not to skip ingesting messages from Slack. 40 | * This will throw an error if slack messages are not 41 | * pre-provided in state. 42 | */ 43 | skipIngest: Annotation, 44 | /** 45 | * Whether to post to the LinkedIn organization or the user's profile. 46 | * If true, [LINKEDIN_ORGANIZATION_ID] is required. 47 | */ 48 | [POST_TO_LINKEDIN_ORGANIZATION]: Annotation, 49 | }); 50 | -------------------------------------------------------------------------------- /src/agents/repurposer-post-interrupt/index.ts: -------------------------------------------------------------------------------- 1 | import { END, START, StateGraph } from "@langchain/langgraph"; 2 | import { 3 | RepurposerPostInterruptAnnotation, 4 | RepurposerPostInterruptConfigurableAnnotation, 5 | RepurposerPostInterruptState, 6 | } from "./types.js"; 7 | import { updateScheduledDate } from "../shared/nodes/update-scheduled-date.js"; 8 | import { schedulePost } from "../shared/nodes/generate-post/schedule-post.js"; 9 | import { rewritePost } from "./nodes/rewrite-posts.js"; 10 | import { humanNode } from "./nodes/human-node/index.js"; 11 | 12 | function rewriteOrEndConditionalEdge( 13 | state: RepurposerPostInterruptState, 14 | ): "rewritePost" | "schedulePost" | "humanNode" | typeof END { 15 | if (!state.next) { 16 | return END; 17 | } 18 | 19 | if (state.next === "unknownResponse") { 20 | // If the user's response is unknown, we should route back to the human node. 21 | return "humanNode"; 22 | } 23 | return state.next; 24 | } 25 | 26 | const workflow = new StateGraph( 27 | RepurposerPostInterruptAnnotation, 28 | RepurposerPostInterruptConfigurableAnnotation, 29 | ) 30 | // Interrupts the node for human in the loop. 31 | .addNode("humanNode", humanNode) 32 | // Schedules the post for Twitter/LinkedIn. 33 | .addNode("schedulePost", schedulePost) 34 | // Rewrite a post based on the user's response. 35 | .addNode("rewritePost", rewritePost) 36 | // Updated the scheduled date from the natural language response from the user. 37 | .addNode("updateScheduleDate", updateScheduledDate) 38 | .addEdge(START, "humanNode") 39 | .addConditionalEdges("humanNode", rewriteOrEndConditionalEdge, [ 40 | "rewritePost", 41 | "schedulePost", 42 | "updateScheduleDate", 43 | "humanNode", 44 | END, 45 | ]) 46 | // Always route back to `humanNode` if the post was re-written or date was updated. 47 | .addEdge("rewritePost", "humanNode") 48 | .addEdge("updateScheduleDate", "humanNode") 49 | 50 | // Always end after scheduling the post. 51 | .addEdge("schedulePost", END); 52 | 53 | export const repurposerPostInterruptGraph = workflow.compile(); 54 | repurposerPostInterruptGraph.name = "Repurposer Post Interrupt Graph"; 55 | -------------------------------------------------------------------------------- /src/agents/repurposer-post-interrupt/nodes/human-node/router.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { ChatAnthropic } from "@langchain/anthropic"; 3 | 4 | const ROUTE_POST_PROMPT = `You're an advanced AI assistant, tasked with routing a user's response. 5 | The only route which can be taken is 'rewrite_post'. If the user is not asking to rewrite a post, then choose the 'unknown_response' route. 6 | 7 | Here's the post the user is responding to: 8 | 9 | {POST} 10 | 11 | 12 | Here's the user's response: 13 | 14 | {USER_RESPONSE} 15 | 16 | 17 | Please examine the {POST_OR_POSTS} and determine which route to take. 18 | `; 19 | 20 | const routeResponseSchema = z.object({ 21 | route: z.enum(["rewrite_post", "unknown_response"]), 22 | }); 23 | 24 | export async function routeResponse( 25 | post: string, 26 | userResponse: string, 27 | ): Promise> { 28 | const model = new ChatAnthropic({ 29 | model: "claude-3-5-sonnet-latest", 30 | temperature: 0, 31 | }).bindTools( 32 | [ 33 | { 34 | name: "route_response", 35 | description: "Route the user's response to the appropriate route.", 36 | schema: routeResponseSchema, 37 | }, 38 | ], 39 | { 40 | tool_choice: "route_response", 41 | }, 42 | ); 43 | 44 | const formattedPrompt = ROUTE_POST_PROMPT.replace("{POST}", post).replace( 45 | "{USER_RESPONSE}", 46 | userResponse, 47 | ); 48 | 49 | const response = await model.invoke(formattedPrompt); 50 | 51 | return response.tool_calls?.[0].args as z.infer; 52 | } 53 | -------------------------------------------------------------------------------- /src/agents/repurposer-post-interrupt/types.ts: -------------------------------------------------------------------------------- 1 | import { Annotation, END } from "@langchain/langgraph"; 2 | import { 3 | AdditionalContext, 4 | DateType, 5 | Image, 6 | RepurposedPost, 7 | } from "../types.js"; 8 | import { POST_TO_LINKEDIN_ORGANIZATION } from "../generate-post/constants.js"; 9 | 10 | export const RepurposerPostInterruptAnnotation = Annotation.Root({ 11 | /** 12 | * The link to the original post/content the new campaign is based on. 13 | */ 14 | originalLink: Annotation, 15 | /** 16 | * The original content input as a string. Contains all extracted/scraped 17 | * content from the original link. 18 | */ 19 | originalContent: Annotation, 20 | /** 21 | * The links to use to generate a series of posts. 22 | */ 23 | contextLinks: Annotation, 24 | /** 25 | * The additional context to use for generating posts. 26 | */ 27 | additionalContexts: Annotation, 28 | /** 29 | * The pageContents field is required as it's the input to the generateReportGraph. 30 | * It will contain a string, combining the above originalContent and additionalContexts. 31 | */ 32 | pageContents: Annotation, 33 | /** 34 | * The report generated on the content of the message. Used 35 | * as context for generating the post. 36 | */ 37 | reports: Annotation< 38 | Array<{ 39 | report: string; 40 | keyDetails: string; 41 | }> 42 | >({ 43 | reducer: (state, update) => state.concat(update), 44 | default: () => [], 45 | }), 46 | /** 47 | * The image options extracted from the original/additional contexts. 48 | */ 49 | imageOptions: Annotation(), 50 | /** 51 | * The generated campaign plan to generate posts from. 52 | */ 53 | campaignPlan: Annotation, 54 | /** 55 | * The generated posts for LinkedIn/Twitter. 56 | */ 57 | posts: Annotation, 58 | /** 59 | * The generated post for LinkedIn/Twitter. 60 | */ 61 | post: Annotation, 62 | /** 63 | * A human response if the user submitted feedback after the interrupt. 64 | */ 65 | userResponse: Annotation, 66 | /** 67 | * The next node to execute. 68 | */ 69 | next: Annotation< 70 | "rewritePost" | "schedulePost" | "unknownResponse" | typeof END 71 | >(), 72 | /** 73 | * The image to use for the post. 74 | */ 75 | image: Annotation, 76 | /** 77 | * The date to schedule the posts for. Only one priority level can be specified. 78 | * If a date is specified, every post will be posted on that date. 79 | * (this is only intended to be used for testing/single posts) 80 | */ 81 | scheduleDate: Annotation(), 82 | }); 83 | 84 | export type RepurposerPostInterruptState = 85 | typeof RepurposerPostInterruptAnnotation.State; 86 | export type RepurposerPostInterruptUpdate = 87 | typeof RepurposerPostInterruptAnnotation.Update; 88 | 89 | export const RepurposerPostInterruptConfigurableAnnotation = Annotation.Root({ 90 | /** 91 | * Whether to post to the LinkedIn organization or the user's profile. 92 | * If true, [LINKEDIN_ORGANIZATION_ID] is required. 93 | */ 94 | [POST_TO_LINKEDIN_ORGANIZATION]: Annotation, 95 | }); 96 | -------------------------------------------------------------------------------- /src/agents/repurposer/index.ts: -------------------------------------------------------------------------------- 1 | import { END, START, StateGraph } from "@langchain/langgraph"; 2 | import { 3 | RepurposerGraphAnnotation, 4 | RepurposerInputAnnotation, 5 | RepurposerConfigurableAnnotation, 6 | } from "./types.js"; 7 | import { extractContent } from "./nodes/extract-content/index.js"; 8 | import { generateCampaignPlan } from "./nodes/generate-campaign-plan.js"; 9 | import { generatePosts } from "./nodes/generate-posts.js"; 10 | import { generateReportGraph } from "../generate-report/index.js"; 11 | import { validateImages } from "./nodes/validate-images.js"; 12 | import { startInterruptGraphRuns } from "./nodes/start-interrupt-graph.js"; 13 | 14 | const repurposerBuilder = new StateGraph( 15 | { 16 | stateSchema: RepurposerGraphAnnotation, 17 | input: RepurposerInputAnnotation, 18 | }, 19 | RepurposerConfigurableAnnotation, 20 | ) 21 | .addNode("extractContent", extractContent) 22 | .addNode("validateImages", validateImages) 23 | .addNode("generateReport", generateReportGraph) 24 | .addNode("generateCampaignPlan", generateCampaignPlan) 25 | .addNode("generatePosts", generatePosts) 26 | .addNode("startInterruptGraphRuns", startInterruptGraphRuns) 27 | 28 | .addEdge(START, "extractContent") 29 | .addEdge("extractContent", "validateImages") 30 | .addEdge("validateImages", "generateReport") 31 | .addEdge("generateReport", "generateCampaignPlan") 32 | .addEdge("generateCampaignPlan", "generatePosts") 33 | .addEdge("generatePosts", "startInterruptGraphRuns") 34 | .addEdge("startInterruptGraphRuns", END); 35 | 36 | export const repurposerGraph = repurposerBuilder.compile(); 37 | 38 | repurposerGraph.name = "Repurposer Graph"; 39 | -------------------------------------------------------------------------------- /src/agents/repurposer/nodes/extract-content/index.ts: -------------------------------------------------------------------------------- 1 | import { RepurposerState } from "../../types.js"; 2 | import { getUrlContents } from "./get-url-contents.js"; 3 | 4 | export async function extractContent( 5 | state: RepurposerState, 6 | ): Promise> { 7 | const { contents: originalContent, imageUrls } = await getUrlContents( 8 | state.originalLink, 9 | ); 10 | const originalContentPrompt = `Here is the original content. This content is the basis of the new marketing campaign. This post has already been shared, so use this as a base for the new campaign building on top of this post: 11 | 12 | 13 | ${originalContent} 14 | `; 15 | 16 | if (!state.contextLinks?.length) { 17 | return { 18 | pageContents: [originalContentPrompt], 19 | imageOptions: imageUrls, 20 | originalContent, 21 | }; 22 | } 23 | 24 | const additionalContextPromises = state.contextLinks.map(async (link) => { 25 | const { contents, imageUrls } = await getUrlContents(link); 26 | return { 27 | content: contents, 28 | link, 29 | imageUrls, 30 | }; 31 | }); 32 | const additionalContexts = await Promise.all(additionalContextPromises); 33 | 34 | const masterPageContent = `${originalContentPrompt} 35 | 36 | Here is additional related context you should use in the new marketing campaign. This context has not been released yet, so use this as the new context for this marketing campaign: 37 | 38 | ${additionalContexts 39 | .map( 40 | ({ content, link }, index) => ` 41 | ${content} 42 | `, 43 | ) 44 | .join("\n")} 45 | `; 46 | 47 | return { 48 | pageContents: [masterPageContent], 49 | imageOptions: [ 50 | ...imageUrls, 51 | ...additionalContexts.flatMap((c) => c.imageUrls || []), 52 | ], 53 | originalContent, 54 | additionalContexts: additionalContexts.map((c) => ({ 55 | content: c.content, 56 | link: c.link, 57 | })), 58 | }; 59 | } 60 | -------------------------------------------------------------------------------- /src/agents/repurposer/nodes/start-interrupt-graph.ts: -------------------------------------------------------------------------------- 1 | import { Client } from "@langchain/langgraph-sdk"; 2 | import { RepurposerState, RepurposerUpdate } from "../types.js"; 3 | 4 | export async function startInterruptGraphRuns( 5 | state: RepurposerState, 6 | ): Promise { 7 | const client = new Client({ 8 | apiUrl: `http://localhost:${process.env.PORT}`, 9 | }); 10 | 11 | const runsPromise = await Promise.all( 12 | state.posts.map(async (post, index) => { 13 | const image = state.images.find((i) => i.index === index); 14 | const { thread_id } = await client.threads.create(); 15 | await client.runs.create(thread_id, "repurposer_post_interrupt", { 16 | input: { 17 | post: post.content, 18 | image, 19 | originalLink: state.originalLink, 20 | originalContent: state.originalContent, 21 | contextLinks: state.contextLinks, 22 | additionalContexts: state.additionalContexts, 23 | reports: state.reports, 24 | imageOptions: state.imageOptions, 25 | posts: state.posts, 26 | campaignPlan: state.campaignPlan, 27 | }, 28 | }); 29 | }), 30 | ); 31 | 32 | await Promise.all(runsPromise); 33 | 34 | return {}; 35 | } 36 | -------------------------------------------------------------------------------- /src/agents/repurposer/tests/graph.int.test.ts: -------------------------------------------------------------------------------- 1 | import * as ls from "langsmith/jest"; 2 | import { SimpleEvaluator } from "langsmith/jest"; 3 | import { repurposerGraph } from "../index.js"; 4 | 5 | const tweetEvaluator: SimpleEvaluator = () => { 6 | return { 7 | key: "content_extraction", 8 | score: 1, 9 | }; 10 | }; 11 | 12 | ls.describe("SMA - Repurposer", () => { 13 | ls.test( 14 | "Can extract content", 15 | { 16 | inputs: { 17 | originalLink: "https://x.com/LangChainAI/status/1857117443065540707", 18 | quantity: 3, 19 | }, 20 | expected: {}, 21 | }, 22 | async ({ inputs }) => { 23 | const result = await repurposerGraph.nodes.extractContent.invoke( 24 | inputs as any, 25 | ); 26 | 27 | console.log("Result\n"); 28 | console.dir(result, { depth: null }); 29 | 30 | await ls.expect(result).evaluatedBy(tweetEvaluator).toBe(1); 31 | return result; 32 | }, 33 | ); 34 | }); 35 | -------------------------------------------------------------------------------- /src/agents/repurposer/tests/images.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { parseResult } from "../nodes/validate-images.js"; 3 | 4 | test("Can extract indices from a string", () => { 5 | const str = 6 | '```\n\n1. \n Image 16: This image is a bar graph showing the "buzziest AI agent applications," with Cursor, Perplexity, and Replit as the top three. This directly relates to the section "Agent success stories: Cursor steals the spotlight" in the text, making it highly relevant.\n Image 17: This image is a decorative graphic and doesn\'t provide any specific information related to the content of the webpage. It should be excluded.\n Image 18: This image, similar to image 17, is a decorative graphic and doesn\'t offer any relevant information. It should also be excluded.\n\n2. \n 16\n\n```\n'; 7 | 8 | const indices = parseResult(str); 9 | expect(indices).toEqual([16]); 10 | }); 11 | -------------------------------------------------------------------------------- /src/agents/repurposer/utils.ts: -------------------------------------------------------------------------------- 1 | export function formatReportForPrompt(report: { 2 | report: string; 3 | keyDetails: string; 4 | }): string { 5 | return `\n${report.keyDetails}\n\n\n\n${report.report}\n`; 6 | } 7 | -------------------------------------------------------------------------------- /src/agents/shared/nodes/generate-post/rewrite-post.ts: -------------------------------------------------------------------------------- 1 | import { Client } from "@langchain/langgraph-sdk"; 2 | import { LangGraphRunnableConfig } from "@langchain/langgraph"; 3 | import { BaseGeneratePostState, BaseGeneratePostUpdate } from "./types.js"; 4 | import { ChatAnthropic } from "@langchain/anthropic"; 5 | import { 6 | getReflectionsPrompt, 7 | REFLECTIONS_PROMPT, 8 | } from "../../../../utils/reflections.js"; 9 | 10 | const REWRITE_POST_PROMPT = `You're a highly regarded marketing employee, working on crafting thoughtful and engaging content for the LinkedIn and Twitter pages. 11 | You wrote a post for the LinkedIn and Twitter pages, however your boss has asked for some changes to be made before it can be published. 12 | 13 | The original post you wrote is as follows: 14 | 15 | {originalPost} 16 | 17 | 18 | {reflectionsPrompt} 19 | 20 | Listen to your boss closely, and make the necessary changes to the post. You should respond ONLY with the updated post, with no additional information, or text before or after the post.`; 21 | 22 | interface RunReflectionsArgs { 23 | originalPost: string; 24 | newPost: string; 25 | userResponse: string; 26 | } 27 | 28 | /** 29 | * Kick off a new run to generate reflections. 30 | * @param param0 31 | */ 32 | async function runReflections({ 33 | originalPost, 34 | newPost, 35 | userResponse, 36 | }: RunReflectionsArgs) { 37 | const client = new Client({ 38 | apiUrl: `http://localhost:${process.env.PORT}`, 39 | }); 40 | 41 | const thread = await client.threads.create(); 42 | await client.runs.create(thread.thread_id, "reflection", { 43 | input: { 44 | originalPost, 45 | newPost, 46 | userResponse, 47 | }, 48 | }); 49 | } 50 | 51 | export async function rewritePost< 52 | State extends BaseGeneratePostState = BaseGeneratePostState, 53 | Update extends BaseGeneratePostUpdate = BaseGeneratePostUpdate, 54 | >(state: State, config: LangGraphRunnableConfig): Promise { 55 | if (!state.post) { 56 | throw new Error("No post found"); 57 | } 58 | if (!state.userResponse) { 59 | throw new Error("No user response found"); 60 | } 61 | 62 | const rewritePostModel = new ChatAnthropic({ 63 | model: "claude-3-5-sonnet-latest", 64 | temperature: 0.5, 65 | }); 66 | 67 | const reflections = await getReflectionsPrompt(config); 68 | const reflectionsPrompt = REFLECTIONS_PROMPT.replace( 69 | "{reflections}", 70 | reflections, 71 | ); 72 | 73 | const systemPrompt = REWRITE_POST_PROMPT.replace( 74 | "{originalPost}", 75 | state.post, 76 | ).replace("{reflectionsPrompt}", reflectionsPrompt); 77 | 78 | const revisePostResponse = await rewritePostModel.invoke([ 79 | { 80 | role: "system", 81 | content: systemPrompt, 82 | }, 83 | { 84 | role: "user", 85 | content: state.userResponse, 86 | }, 87 | ]); 88 | 89 | await runReflections({ 90 | originalPost: state.post, 91 | newPost: revisePostResponse.content as string, 92 | userResponse: state.userResponse, 93 | }); 94 | 95 | return { 96 | post: revisePostResponse.content as string, 97 | next: undefined, 98 | userResponse: undefined, 99 | } as Update; 100 | } 101 | -------------------------------------------------------------------------------- /src/agents/shared/nodes/generate-post/types.ts: -------------------------------------------------------------------------------- 1 | import { Annotation } from "@langchain/langgraph"; 2 | import { DateType } from "../../../types.js"; 3 | import { IngestDataAnnotation } from "../../../ingest-data/ingest-data-state.js"; 4 | import { VerifyLinksResultAnnotation } from "../../../verify-links/verify-links-state.js"; 5 | 6 | export type ComplexPost = { 7 | /** 8 | * The main post content. 9 | */ 10 | main_post: string; 11 | /** 12 | * The reply post content. 13 | */ 14 | reply_post: string; 15 | }; 16 | 17 | const BaseGeneratePostAnnotation = Annotation.Root({ 18 | /** 19 | * The generated post for LinkedIn/Twitter. 20 | */ 21 | post: Annotation, 22 | /** 23 | * The complex post, if the user decides to split the URL from the main body. 24 | * 25 | * TODO: Refactor the post/complexPost state interfaces to use a single shared interface 26 | * which includes images too. 27 | * Tracking issue: https://github.com/langchain-ai/social-media-agent/issues/144 28 | */ 29 | complexPost: Annotation, 30 | /** 31 | * The date to schedule the post for. 32 | */ 33 | scheduleDate: Annotation, 34 | /** 35 | * The image to attach to the post, and the MIME type. 36 | */ 37 | image: Annotation< 38 | | { 39 | imageUrl: string; 40 | mimeType: string; 41 | } 42 | | undefined 43 | >, 44 | /** 45 | * The links to use to generate a post. 46 | */ 47 | links: Annotation, 48 | /** 49 | * The report generated on the content of the message. Used 50 | * as context for generating the post. 51 | */ 52 | report: IngestDataAnnotation.spec.report, 53 | ...VerifyLinksResultAnnotation.spec, 54 | /** 55 | * The node to execute next. 56 | */ 57 | next: Annotation, 58 | /** 59 | * Response from the user for the post. Typically used to request 60 | * changes to be made to the post. 61 | */ 62 | userResponse: Annotation, 63 | }); 64 | 65 | export type BaseGeneratePostState = typeof BaseGeneratePostAnnotation.State; 66 | export type BaseGeneratePostUpdate = typeof BaseGeneratePostAnnotation.Update; 67 | -------------------------------------------------------------------------------- /src/agents/shared/nodes/update-scheduled-date.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { ChatAnthropic } from "@langchain/anthropic"; 3 | import { toZonedTime } from "date-fns-tz"; 4 | import { DateType } from "../../types.js"; 5 | import { timezoneToUtc } from "../../../utils/date.js"; 6 | 7 | const SCHEDULE_POST_DATE_PROMPT = `You're an intelligent AI assistant tasked with extracting the date to schedule a social media post from the user's message. 8 | 9 | The user may respond with either: 10 | 1. A priority level (P1, P2, P3) 11 | - **P1**: Saturday/Sunday between 8:00 AM and 10:00 AM PST. 12 | - **P2**: Friday/Monday between 8:00 AM and 10:00 AM PST _OR_ Saturday/Sunday between 11:30 AM and 1:00 PM PST. 13 | - **P3**: Saturday/Sunday between 1:00 PM and 5:00 PM PST. 14 | 2. A date 15 | 16 | Your task is to extract the date/priority level from the user's message and return it in a structured format the system can handle. 17 | 18 | If the user's message is asking for a date, convert it to the following format: 19 | 'MM/dd/yyyy hh:mm a z'. Example: '12/25/2024 10:00 AM PST' 20 | Always use PST for the timezone. If they don't specify a time, you can make one up, as long as it's between 8:00 AM and 3:00 PM PST (5 minute intervals). 21 | 22 | If the user's message is asking for a priority level, return it in the following format: 23 | 'p1', 'p2', or 'p3' 24 | 25 | The current date and time (in PST) are: {currentDateAndTime} 26 | 27 | You should use this to infer the date if the user's message does not contain an exact date, 28 | Example: 'this saturday' 29 | 30 | If the user's message can not be interpreted as a date or priority level, return 'p3'.`; 31 | 32 | const scheduleDateSchema = z.object({ 33 | scheduleDate: z 34 | .string() 35 | .describe( 36 | "The date in the format 'MM/dd/yyyy hh:mm a z' or a priority level (p1, p2, p3).", 37 | ), 38 | }); 39 | 40 | export async function updateScheduledDate( 41 | state: Record, 42 | ): Promise> { 43 | if (!state.userResponse) { 44 | throw new Error("No user response found"); 45 | } 46 | const model = new ChatAnthropic({ 47 | model: "claude-3-5-sonnet-latest", 48 | temperature: 0.5, 49 | }).withStructuredOutput(scheduleDateSchema, { 50 | name: "scheduleDate", 51 | }); 52 | const pstDate = toZonedTime(new Date(), "America/Los_Angeles"); 53 | const pstDateString = pstDate.toISOString(); 54 | 55 | const prompt = SCHEDULE_POST_DATE_PROMPT.replace( 56 | "{currentDateAndTime}", 57 | pstDateString, 58 | ); 59 | 60 | const result = await model.invoke([ 61 | { 62 | role: "system", 63 | content: prompt, 64 | }, 65 | { 66 | role: "user", 67 | content: state.userResponse, 68 | }, 69 | ]); 70 | 71 | if ( 72 | typeof result.scheduleDate === "string" && 73 | ["p1", "p2", "p3"].includes(result.scheduleDate) 74 | ) { 75 | return { 76 | scheduleDate: result.scheduleDate as DateType, 77 | }; 78 | } 79 | 80 | return { 81 | next: undefined, 82 | userResponse: undefined, 83 | scheduleDate: timezoneToUtc(result.scheduleDate), 84 | }; 85 | } 86 | -------------------------------------------------------------------------------- /src/agents/shared/nodes/verify-content.ts: -------------------------------------------------------------------------------- 1 | import { ChatAnthropic } from "@langchain/anthropic"; 2 | import { traceable } from "langsmith/traceable"; 3 | import { z } from "zod"; 4 | 5 | const RELEVANCY_SCHEMA = z 6 | .object({ 7 | reasoning: z 8 | .string() 9 | .describe( 10 | "Reasoning for why the webpage is or isn't relevant to your company's products.", 11 | ), 12 | relevant: z 13 | .boolean() 14 | .describe( 15 | "Whether or not the webpage is relevant to your company's products.", 16 | ), 17 | }) 18 | .describe("The relevancy of the content to your company's products."); 19 | 20 | async function verifyContentIsRelevantFunc( 21 | content: string, 22 | args: { 23 | systemPrompt: string; 24 | schema: z.ZodType>; 25 | }, 26 | ): Promise { 27 | const relevancyModel = new ChatAnthropic({ 28 | model: "claude-3-7-sonnet-latest", 29 | temperature: 0, 30 | }).withStructuredOutput(args.schema, { 31 | name: "relevancy", 32 | }); 33 | 34 | const { relevant } = await relevancyModel.invoke([ 35 | { 36 | role: "system", 37 | content: args.systemPrompt, 38 | }, 39 | { 40 | role: "user", 41 | content: content, 42 | }, 43 | ]); 44 | return relevant; 45 | } 46 | 47 | /** 48 | * Verifies if the content provided is relevant based on the provided system prompt, 49 | * using the provided relevancy schema. 50 | * 51 | * @param {string} content - The content to verify. 52 | * @param {object} args - The arguments containing the system prompt and relevancy schema. 53 | * @param {string} args.systemPrompt - The system prompt to use for verification. 54 | * @param {z.ZodType>} args.schema - The relevancy schema to use for verification. 55 | * @returns {Promise} A promise that resolves to a boolean indicating whether the content is relevant. 56 | */ 57 | export const verifyContentIsRelevant = traceable(verifyContentIsRelevantFunc, { 58 | name: "verify-content-relevancy", 59 | }); 60 | -------------------------------------------------------------------------------- /src/agents/shared/nodes/verify-youtube.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import { LangGraphRunnableConfig } from "@langchain/langgraph"; 3 | import { GeneratePostAnnotation } from "../../generate-post/generate-post-state.js"; 4 | import { getPrompts } from "../../generate-post/prompts/index.js"; 5 | import { VerifyContentAnnotation } from "../shared-state.js"; 6 | import { getVideoSummary } from "../youtube/video-summary.js"; 7 | import { skipContentRelevancyCheck } from "../../utils.js"; 8 | import { verifyContentIsRelevant } from "./verify-content.js"; 9 | 10 | type VerifyYouTubeContentReturn = { 11 | relevantLinks: (typeof GeneratePostAnnotation.State)["relevantLinks"]; 12 | pageContents: (typeof GeneratePostAnnotation.State)["pageContents"]; 13 | }; 14 | 15 | const VERIFY_RELEVANT_CONTENT_PROMPT = `You are a highly regarded marketing employee. 16 | You're given a summary/report on some content a third party submitted to you in hopes of having it promoted by you. 17 | You need to verify if the content is relevant to the following context before approving or denying the request. 18 | 19 | ${getPrompts().businessContext} 20 | 21 | ${getPrompts().contentValidationPrompt} 22 | 23 | Given this context, examine the summary/report closely, and determine if the content is relevant to your company's products. 24 | You should provide reasoning as to why or why not the content is relevant to your company's products, then a simple true or false for whether or not it's relevant. 25 | `; 26 | 27 | const RELEVANCY_SCHEMA = z 28 | .object({ 29 | reasoning: z 30 | .string() 31 | .describe( 32 | "Reasoning for why the content is or isn't relevant to your company's products.", 33 | ), 34 | relevant: z 35 | .boolean() 36 | .describe( 37 | "Whether or not the content is relevant to your company's products.", 38 | ), 39 | }) 40 | .describe("The relevancy of the content to your company's products."); 41 | 42 | /** 43 | * Verifies the content provided is relevant to your company's products. 44 | */ 45 | export async function verifyYouTubeContent( 46 | state: typeof VerifyContentAnnotation.State, 47 | config: LangGraphRunnableConfig, 48 | ): Promise { 49 | const { summary, thumbnail } = await getVideoSummary(state.link); 50 | 51 | const returnValue = { 52 | relevantLinks: [state.link], 53 | pageContents: [summary as string], 54 | ...(thumbnail ? { imageOptions: [thumbnail] } : {}), 55 | }; 56 | 57 | if (await skipContentRelevancyCheck(config.configurable)) { 58 | return returnValue; 59 | } 60 | 61 | if ( 62 | await verifyContentIsRelevant(summary, { 63 | systemPrompt: VERIFY_RELEVANT_CONTENT_PROMPT, 64 | schema: RELEVANCY_SCHEMA, 65 | }) 66 | ) { 67 | return returnValue; 68 | } 69 | 70 | // Not relevant, return empty arrays so this URL is not included. 71 | return { 72 | relevantLinks: [], 73 | pageContents: [], 74 | }; 75 | } 76 | -------------------------------------------------------------------------------- /src/agents/shared/shared-state.ts: -------------------------------------------------------------------------------- 1 | import { Annotation } from "@langchain/langgraph"; 2 | 3 | export const VerifyContentAnnotation = Annotation.Root({ 4 | /** 5 | * The link to the content to verify. 6 | */ 7 | link: Annotation, 8 | }); 9 | -------------------------------------------------------------------------------- /src/agents/shared/stores/post-subject-urls.ts: -------------------------------------------------------------------------------- 1 | import { LangGraphRunnableConfig } from "@langchain/langgraph"; 2 | import { skipUsedUrlsCheck } from "../../utils.js"; 3 | 4 | const NAMESPACE = ["saved_data", "used_urls"]; 5 | const KEY = "urls"; 6 | const OBJECT_KEY = "data"; 7 | 8 | /** 9 | * Save a list of URLs of which have been included in posts already. 10 | * 11 | * @param urls The list of URLs to save 12 | * @param config The configuration for the langgraph 13 | * @param overwrite Whether to overwrite the stored URLs if they already exist. 14 | * If true, it will overwrite. If false (default), it will first fetch the current stored URLs and add the new ones. 15 | * @returns {Promise} 16 | */ 17 | export async function saveUsedUrls( 18 | urls: string[], 19 | config: LangGraphRunnableConfig, 20 | overwrite = false, 21 | ): Promise { 22 | if (await skipUsedUrlsCheck(config.configurable)) { 23 | return; 24 | } 25 | 26 | const store = config.store; 27 | if (!store) { 28 | throw new Error("No store provided"); 29 | } 30 | 31 | const urlsToSaveSet = new Set(urls); 32 | if (!overwrite) { 33 | const existingUrls = await getSavedUrls(config); 34 | existingUrls.forEach((url) => urlsToSaveSet.add(url)); 35 | } 36 | 37 | await store.put(NAMESPACE, KEY, { 38 | [OBJECT_KEY]: Array.from(urlsToSaveSet), 39 | }); 40 | } 41 | 42 | /** 43 | * Get the list of URLs of which have been included in posts already. 44 | * 45 | * @param config The configuration for the langgraph 46 | * @returns {Promise} The list of URLs which have been included in posts already. 47 | */ 48 | export async function getSavedUrls( 49 | config: LangGraphRunnableConfig, 50 | ): Promise { 51 | if (await skipUsedUrlsCheck(config.configurable)) { 52 | return []; 53 | } 54 | 55 | const store = config.store; 56 | if (!store) { 57 | throw new Error("No store provided"); 58 | } 59 | const urls = await store.get(NAMESPACE, KEY); 60 | if (!urls) { 61 | return []; 62 | } 63 | return urls.value?.[OBJECT_KEY] || []; 64 | } 65 | -------------------------------------------------------------------------------- /src/agents/should-exclude.ts: -------------------------------------------------------------------------------- 1 | import { useLangChainPrompts } from "./utils.js"; 2 | 3 | export const LANGCHAIN_DOMAINS = [ 4 | "langchain.com", 5 | "langchain.dev", 6 | "langchain-ai.github.io", 7 | ]; 8 | 9 | export function shouldExcludeGeneralContent(url: string): boolean { 10 | // Do not exclude any content if USE_LANGCHAIN_PROMPTS is not set to true. 11 | if (!useLangChainPrompts()) { 12 | return false; 13 | } 14 | 15 | // We don't want to generate posts on LangChain website content. 16 | if (LANGCHAIN_DOMAINS.some((lcUrl) => url.includes(lcUrl))) { 17 | return true; 18 | } 19 | 20 | return false; 21 | } 22 | 23 | export function shouldExcludeGitHubContent(link: string): boolean { 24 | // Do not exclude any content if USE_LANGCHAIN_PROMPTS is not set to true. 25 | if (!useLangChainPrompts()) { 26 | return false; 27 | } 28 | 29 | const langChainGitHubOrg = "github.com/langchain-ai/"; 30 | // Do not generate posts on LangChain repos. 31 | return link.includes(langChainGitHubOrg); 32 | } 33 | 34 | export function shouldExcludeYouTubeContent(channelName: string): boolean { 35 | // Do not exclude any content if USE_LANGCHAIN_PROMPTS is not set to true. 36 | if (!useLangChainPrompts()) { 37 | return false; 38 | } 39 | 40 | return channelName.toLowerCase() === "langchain"; 41 | } 42 | 43 | export function shouldExcludeTweetContent(externalUrls: string[]): boolean { 44 | // Do not exclude any content if USE_LANGCHAIN_PROMPTS is not set to true. 45 | if (!useLangChainPrompts()) { 46 | return false; 47 | } 48 | 49 | // If there are no external URLs, then we should exclude the tweet. Return true. 50 | return externalUrls.length === 0; 51 | } 52 | -------------------------------------------------------------------------------- /src/agents/supervisor/nodes/generate-posts.ts: -------------------------------------------------------------------------------- 1 | import { Client } from "@langchain/langgraph-sdk"; 2 | import { SupervisorState } from "../supervisor-state.js"; 3 | import { extractUrls } from "../../utils.js"; 4 | import { SlackClient } from "../../../clients/slack/client.js"; 5 | 6 | export async function generatePosts( 7 | state: SupervisorState, 8 | ): Promise> { 9 | const client = new Client({ 10 | apiUrl: `http://localhost:${process.env.PORT}`, 11 | }); 12 | 13 | const idsAndTypes: Array<{ 14 | type: "thread" | "post"; 15 | thread_id: string; 16 | run_id: string; 17 | }> = []; 18 | 19 | for await (const reportAndPostType of state.reportAndPostType) { 20 | const { thread_id } = await client.threads.create(); 21 | const reportsMapped = reportAndPostType.reports.map((report, index) => { 22 | if (!reportAndPostType.keyDetails[index]) { 23 | return report; 24 | } 25 | 26 | return `# Report Key Details:\n${reportAndPostType.keyDetails[index]}\n\n${report}`; 27 | }); 28 | if (reportAndPostType.type === "thread") { 29 | const run = await client.runs.create(thread_id, "generate_thread", { 30 | input: { 31 | reports: reportsMapped, 32 | }, 33 | }); 34 | 35 | idsAndTypes.push({ 36 | type: "thread", 37 | thread_id, 38 | run_id: run.run_id, 39 | }); 40 | } else { 41 | const reportString = reportsMapped.join("\n"); 42 | const linksInReport = extractUrls(reportString); 43 | const run = await client.runs.create(thread_id, "generate_post", { 44 | input: {}, 45 | command: { 46 | goto: "generatePost", 47 | update: { 48 | report: reportString, 49 | links: [linksInReport?.[0] || ""], 50 | relevantLinks: [linksInReport?.[0] || ""], 51 | }, 52 | }, 53 | }); 54 | 55 | idsAndTypes.push({ 56 | type: "post", 57 | thread_id, 58 | run_id: run.run_id, 59 | }); 60 | } 61 | } 62 | 63 | if (!process.env.SLACK_CHANNEL_ID || !process.env.SLACK_BOT_OAUTH_TOKEN) { 64 | return {}; 65 | } 66 | 67 | const slackClient = new SlackClient(); 68 | 69 | const messageText = `*Ingested data successfully processed* 70 | 71 | Number of threads: *${idsAndTypes.filter((x) => x.type === "thread").length}* 72 | Number of individual posts: *${idsAndTypes.filter((x) => x.type === "post").length}* 73 | 74 | Thread post IDs: 75 | ${idsAndTypes 76 | .filter((x) => x.type === "thread") 77 | .map((x) => `- *${x.thread_id}* : *${x.run_id}*`) 78 | .join("\n")} 79 | 80 | Single post IDs: 81 | ${idsAndTypes 82 | .filter((x) => x.type === "post") 83 | .map((x) => `- *${x.thread_id}* : *${x.run_id}*`) 84 | .join("\n")}`; 85 | 86 | await slackClient.sendMessage(process.env.SLACK_CHANNEL_ID, messageText); 87 | 88 | return { 89 | idsAndTypes, 90 | }; 91 | } 92 | -------------------------------------------------------------------------------- /src/agents/supervisor/supervisor-graph.ts: -------------------------------------------------------------------------------- 1 | import { Annotation, END, Send, START, StateGraph } from "@langchain/langgraph"; 2 | import { 3 | SupervisorAnnotation, 4 | SupervisorConfigurableAnnotation, 5 | SupervisorState, 6 | } from "./supervisor-state.js"; 7 | import { curateDataGraph } from "../curate-data/index.js"; 8 | import { convertPostToString } from "../verify-reddit-post/utils.js"; 9 | import { generateReportGraph } from "../generate-report/index.js"; 10 | import { groupReports } from "./nodes/group-reports.js"; 11 | import { determinePostType } from "./nodes/determine-post-type.js"; 12 | import { generatePosts } from "./nodes/generate-posts.js"; 13 | 14 | function startGenerateReportRuns(state: SupervisorState): Send[] { 15 | const { 16 | tweetsGroupedByContent, 17 | githubTrendingData, 18 | generalContents, 19 | redditPosts, 20 | } = state.curatedData; 21 | const tweetSends = 22 | tweetsGroupedByContent?.map((tweetGroup) => { 23 | return new Send("generateReport", { 24 | tweetGroup, 25 | }); 26 | }) || []; 27 | const githubSends = 28 | githubTrendingData?.map((ghTrendingItem) => { 29 | return new Send("generateReport", { 30 | pageContent: [ghTrendingItem.pageContent], 31 | relevantLinks: [ghTrendingItem.repoURL], 32 | }); 33 | }) || []; 34 | const generalSends = 35 | generalContents?.map((gc) => { 36 | return new Send("generateReport", { 37 | pageContent: [gc.pageContent], 38 | relevantLinks: gc.relevantLinks, 39 | }); 40 | }) || []; 41 | const redditSends = 42 | redditPosts?.map((rp) => { 43 | return new Send("generateReport", { 44 | pageContent: [convertPostToString(rp)], 45 | relevantLinks: [rp.post.url], 46 | }); 47 | }) || []; 48 | 49 | return [...tweetSends, ...githubSends, ...generalSends, ...redditSends]; 50 | } 51 | 52 | const supervisorWorkflow = new StateGraph( 53 | { stateSchema: SupervisorAnnotation, input: Annotation.Root({}) }, 54 | SupervisorConfigurableAnnotation, 55 | ) 56 | // Calls the curate-data agent to fetch data from different sources. 57 | // This also means grouping the data into related groups, and expanding 58 | // the external URLs found in the tweets. 59 | .addNode("ingestData", curateDataGraph) 60 | .addNode("generateReport", generateReportGraph) 61 | .addNode("groupReports", groupReports) 62 | .addNode("determinePostType", determinePostType) 63 | .addNode("generatePosts", generatePosts) 64 | 65 | .addEdge(START, "ingestData") 66 | .addConditionalEdges("ingestData", startGenerateReportRuns, [ 67 | "generateReport", 68 | ]) 69 | .addEdge("generateReport", "groupReports") 70 | .addEdge("groupReports", "determinePostType") 71 | .addEdge("determinePostType", "generatePosts") 72 | .addEdge("generatePosts", END); 73 | 74 | export const supervisorGraph = supervisorWorkflow.compile(); 75 | supervisorGraph.name = "Supervisor Graph"; 76 | -------------------------------------------------------------------------------- /src/agents/supervisor/supervisor-state.ts: -------------------------------------------------------------------------------- 1 | import { Annotation } from "@langchain/langgraph"; 2 | import { Source } from "./types.js"; 3 | import { CuratedData } from "../curate-data/types.js"; 4 | 5 | export const SupervisorAnnotation = Annotation.Root({ 6 | /** 7 | * The final data object from ingesting all sources. 8 | */ 9 | curatedData: Annotation, 10 | /** 11 | * A list of reports, each containing a report & key details on a given data source/data group. 12 | * The report is used for generating a post, and key details are used for identifying reports on the same topic. 13 | */ 14 | reports: Annotation< 15 | Array<{ 16 | report: string; 17 | keyDetails: string; 18 | }> 19 | >({ 20 | reducer: (state, update) => state.concat(update), 21 | default: () => [], 22 | }), 23 | /** 24 | * The list of reports after they have been grouped. 25 | */ 26 | groupedReports: Annotation< 27 | Array<{ 28 | reports: string[]; 29 | keyDetails: string[]; 30 | }> 31 | >, 32 | /** 33 | * The report and type of post to generate. 34 | */ 35 | reportAndPostType: Annotation< 36 | Array<{ 37 | reports: string[]; 38 | keyDetails: string[]; 39 | reason: string; 40 | type: "thread" | "post"; 41 | }> 42 | >, 43 | /** 44 | * Thread and run IDs, along with the type of post to generate. 45 | */ 46 | idsAndTypes: Annotation< 47 | Array<{ 48 | type: "thread" | "post"; 49 | thread_id: string; 50 | run_id: string; 51 | }> 52 | >({ 53 | reducer: (state, update) => state.concat(update), 54 | default: () => [], 55 | }), 56 | }); 57 | 58 | export const SupervisorConfigurableAnnotation = Annotation.Root({ 59 | /** 60 | * The sources to ingest from. 61 | */ 62 | sources: Annotation, 63 | }); 64 | 65 | export type SupervisorState = typeof SupervisorAnnotation.State; 66 | -------------------------------------------------------------------------------- /src/agents/supervisor/tests/e2e.int.test.ts: -------------------------------------------------------------------------------- 1 | import * as ls from "langsmith/jest"; 2 | import { SimpleEvaluator } from "langsmith/jest"; 3 | import { Client } from "@langchain/langgraph-sdk"; 4 | import { Client as LSClient } from "langsmith"; 5 | import { SupervisorState } from "../supervisor-state.js"; 6 | 7 | const e2eEvaluator: SimpleEvaluator = () => { 8 | return { 9 | key: "successful_run", 10 | score: 1, 11 | }; 12 | }; 13 | 14 | ls.describe("SMA - Supervisor - E2E", () => { 15 | ls.test( 16 | "Can run end-to-end for Twitter posts", 17 | { 18 | inputs: {}, 19 | expected: {}, 20 | }, 21 | async () => { 22 | const lsClient = new LSClient(); 23 | const ingestDataExamples = lsClient.listExamples({ 24 | datasetId: "4ebe89f0-f008-4d97-b8dd-86b70221ab0f", 25 | exampleIds: ["a112b870-6826-472e-8f67-317068d5a8bb"], 26 | }); 27 | let inputs: Record = {}; 28 | for await (const ex of ingestDataExamples) { 29 | inputs = { 30 | ...ex.inputs, 31 | }; 32 | } 33 | const client = new Client({ 34 | // apiUrl: `http://localhost:${process.env.PORT || "2024"}`, 35 | apiUrl: "http://localhost:54367", 36 | }); 37 | 38 | console.log("Before invoking supervisor graph", inputs); 39 | const { thread_id } = await client.threads.create(); 40 | const result = await client.runs.wait(thread_id, "supervisor", { 41 | input: inputs, 42 | config: { 43 | configurable: { 44 | sources: ["twitter"], 45 | }, 46 | }, 47 | }); 48 | console.log("After invoking supervisor graph"); 49 | 50 | const { idsAndTypes } = result as SupervisorState; 51 | 52 | console.log("Waiting for all generate post results"); 53 | const allGeneratePostResults = await Promise.allSettled( 54 | idsAndTypes.map(async ({ thread_id, run_id, type }) => { 55 | const result = await client.runs.join(thread_id, run_id); 56 | console.log(`Got generate ${type} result:\n`, result); 57 | return result; 58 | }), 59 | ); 60 | 61 | await ls.expect(allGeneratePostResults).evaluatedBy(e2eEvaluator).toBe(1); 62 | return result; 63 | }, 64 | 480000, // 8 minutes 65 | ); 66 | }); 67 | -------------------------------------------------------------------------------- /src/agents/supervisor/types.ts: -------------------------------------------------------------------------------- 1 | export type Source = 2 | | "github" 3 | | "twitter" 4 | | "latent_space" 5 | | "ai_news" 6 | | "reddit"; 7 | -------------------------------------------------------------------------------- /src/agents/types.ts: -------------------------------------------------------------------------------- 1 | export type DateType = Date | "p1" | "p2" | "p3" | "r1" | "r2" | "r3"; 2 | 3 | export type Image = { imageUrl: string; mimeType: string }; 4 | 5 | export type AdditionalContext = { 6 | /** 7 | * The string content from the link. 8 | */ 9 | content: string; 10 | /** 11 | * The link from which the content was extracted. 12 | */ 13 | link: string; 14 | }; 15 | 16 | export type RepurposedPost = { 17 | /** 18 | * The content of the specific post. 19 | */ 20 | content: string; 21 | /** 22 | * The index of the post in the series. 23 | */ 24 | index: number; 25 | }; 26 | -------------------------------------------------------------------------------- /src/agents/verify-links/verify-links-graph.ts: -------------------------------------------------------------------------------- 1 | import { END, Send, START, StateGraph } from "@langchain/langgraph"; 2 | import { VerifyContentAnnotation } from "../shared/shared-state.js"; 3 | import { verifyYouTubeContent } from "../shared/nodes/verify-youtube.js"; 4 | import { verifyGeneralContent } from "../shared/nodes/verify-general.js"; 5 | import { verifyGitHubContent } from "../shared/nodes/verify-github.js"; 6 | import { verifyTweetGraph } from "../verify-tweet/verify-tweet-graph.js"; 7 | import { 8 | VerifyLinksGraphAnnotation, 9 | VerifyLinksGraphConfigurableAnnotation, 10 | } from "./verify-links-state.js"; 11 | import { getUrlType } from "../utils.js"; 12 | import { verifyRedditPostGraph } from "../verify-reddit-post/verify-reddit-post-graph.js"; 13 | import { VerifyRedditPostAnnotation } from "../verify-reddit-post/verify-reddit-post-state.js"; 14 | import { verifyLumaEvent } from "../shared/nodes/verify-luma.js"; 15 | 16 | function routeLinkTypes(state: typeof VerifyLinksGraphAnnotation.State) { 17 | return state.links.map((link) => { 18 | const type = getUrlType(link); 19 | if (type === "twitter") { 20 | return new Send("verifyTweetSubGraph", { 21 | link, 22 | }); 23 | } 24 | if (type === "youtube") { 25 | return new Send("verifyYouTubeContent", { 26 | link, 27 | }); 28 | } 29 | if (type === "github") { 30 | return new Send("verifyGitHubContent", { 31 | link, 32 | }); 33 | } 34 | if (type === "reddit") { 35 | return new Send("verifyRedditContent", { 36 | link, 37 | }); 38 | } 39 | if (type === "luma") { 40 | return new Send("verifyLumaEvent", { 41 | link, 42 | }); 43 | } 44 | return new Send("verifyGeneralContent", { 45 | link, 46 | }); 47 | }); 48 | } 49 | 50 | const verifyLinksWorkflow = new StateGraph( 51 | VerifyLinksGraphAnnotation, 52 | VerifyLinksGraphConfigurableAnnotation, 53 | ) 54 | .addNode("verifyYouTubeContent", verifyYouTubeContent, { 55 | input: VerifyContentAnnotation, 56 | }) 57 | .addNode("verifyGeneralContent", verifyGeneralContent, { 58 | input: VerifyContentAnnotation, 59 | }) 60 | .addNode("verifyGitHubContent", verifyGitHubContent, { 61 | input: VerifyContentAnnotation, 62 | }) 63 | .addNode("verifyTweetSubGraph", verifyTweetGraph, { 64 | input: VerifyContentAnnotation, 65 | }) 66 | .addNode("verifyRedditContent", verifyRedditPostGraph, { 67 | input: VerifyRedditPostAnnotation, 68 | }) 69 | .addNode("verifyLumaEvent", verifyLumaEvent, { 70 | input: VerifyContentAnnotation, 71 | }) 72 | // Start node 73 | .addConditionalEdges(START, routeLinkTypes, [ 74 | "verifyYouTubeContent", 75 | "verifyGeneralContent", 76 | "verifyGitHubContent", 77 | "verifyTweetSubGraph", 78 | "verifyRedditContent", 79 | "verifyLumaEvent", 80 | ]) 81 | .addEdge("verifyRedditContent", END) 82 | .addEdge("verifyYouTubeContent", END) 83 | .addEdge("verifyGeneralContent", END) 84 | .addEdge("verifyGitHubContent", END) 85 | .addEdge("verifyTweetSubGraph", END) 86 | .addEdge("verifyLumaEvent", END); 87 | 88 | export const verifyLinksGraph = verifyLinksWorkflow.compile(); 89 | verifyLinksGraph.name = "Verify Links Subgraph"; 90 | -------------------------------------------------------------------------------- /src/agents/verify-links/verify-links-state.ts: -------------------------------------------------------------------------------- 1 | import { Annotation } from "@langchain/langgraph"; 2 | import { filterUnwantedImageUrls } from "../utils.js"; 3 | import { SKIP_CONTENT_RELEVANCY_CHECK } from "../generate-post/constants.js"; 4 | 5 | export const VerifyLinksGraphSharedAnnotation = Annotation.Root({ 6 | /** 7 | * The links to verify. 8 | */ 9 | links: Annotation, 10 | }); 11 | 12 | const sharedLinksReducer = ( 13 | state: string[] | undefined, 14 | update: string[] | undefined, 15 | ) => { 16 | if (update === undefined) return undefined; 17 | // Use a set to ensure no duplicate links are added. 18 | const stateSet = new Set(state || []); 19 | update.filter((u): u is string => !!u).forEach((link) => stateSet.add(link)); 20 | return filterUnwantedImageUrls(Array.from(stateSet)); 21 | }; 22 | 23 | export const VerifyLinksResultAnnotation = Annotation.Root({ 24 | /** 25 | * Page content used in the verification nodes. Will be used in the report 26 | * generation node. 27 | */ 28 | pageContents: Annotation({ 29 | reducer: (state, update) => { 30 | if (update === undefined) return undefined; 31 | return (state || []).concat(update); 32 | }, 33 | default: () => [], 34 | }), 35 | /** 36 | * Relevant links found in the message. 37 | */ 38 | relevantLinks: Annotation({ 39 | reducer: sharedLinksReducer, 40 | default: () => [], 41 | }), 42 | /** 43 | * Image options to provide to the user. 44 | */ 45 | imageOptions: Annotation({ 46 | reducer: sharedLinksReducer, 47 | default: () => [], 48 | }), 49 | }); 50 | 51 | export const VerifyLinksGraphAnnotation = Annotation.Root({ 52 | /** 53 | * The links to verify. 54 | */ 55 | links: VerifyLinksGraphSharedAnnotation.spec.links, 56 | ...VerifyLinksResultAnnotation.spec, 57 | }); 58 | 59 | export const VerifyLinksGraphConfigurableAnnotation = Annotation.Root({ 60 | /** 61 | * Whether or not to skip the content relevancy check. 62 | */ 63 | [SKIP_CONTENT_RELEVANCY_CHECK]: Annotation(), 64 | }); 65 | -------------------------------------------------------------------------------- /src/agents/verify-reddit-post/nodes/get-external-urls.ts: -------------------------------------------------------------------------------- 1 | import { extractUrls, getUrlType } from "../../utils.js"; 2 | import { VerifyRedditGraphState } from "../types.js"; 3 | 4 | export async function getExternalUrls( 5 | state: VerifyRedditGraphState, 6 | ): Promise> { 7 | if (!state.redditPost) { 8 | throw new Error("No reddit post found"); 9 | } 10 | const urls = extractUrls(state.redditPost.post.selftext); 11 | const filteredUrls = urls.filter((url) => getUrlType(url) !== "reddit"); 12 | 13 | const postUrl = state.redditPost.post.url; 14 | if ( 15 | postUrl && 16 | getUrlType(postUrl) !== "reddit" && 17 | !filteredUrls.includes(postUrl) 18 | ) { 19 | filteredUrls.push(postUrl); 20 | } 21 | 22 | return { 23 | externalURLs: filteredUrls, 24 | }; 25 | } 26 | -------------------------------------------------------------------------------- /src/agents/verify-reddit-post/nodes/get-post.ts: -------------------------------------------------------------------------------- 1 | import { RedditClient } from "../../../clients/reddit/client.js"; 2 | import { VerifyRedditGraphState } from "../types.js"; 3 | 4 | export async function getPost( 5 | state: VerifyRedditGraphState, 6 | ): Promise> { 7 | if (state.redditPost) { 8 | // Post already exists, don't do anything 9 | return {}; 10 | } 11 | 12 | const client = await RedditClient.fromUserless(); 13 | const redditPost = await client.getSimplePostAndComments( 14 | state.postID || state.link || "", 15 | ); 16 | 17 | return { 18 | redditPost, 19 | }; 20 | } 21 | -------------------------------------------------------------------------------- /src/agents/verify-reddit-post/tests/e2e.int.test.ts: -------------------------------------------------------------------------------- 1 | import { v4 as uuidv4 } from "uuid"; 2 | import * as ls from "langsmith/jest"; 3 | import { type SimpleEvaluator } from "langsmith/jest"; 4 | import { InMemoryStore, MemorySaver } from "@langchain/langgraph"; 5 | import { INPUTS } from "./data/inputs-outputs.js"; 6 | import { verifyRedditPostGraph } from "../verify-reddit-post-graph.js"; 7 | import { VerifyRedditGraphState } from "../types.js"; 8 | import { BASE_VERIFY_REDDIT_CONFIG } from "../verify-reddit-post-state.js"; 9 | 10 | const checkVerifyPostResult: SimpleEvaluator = ({ expected, actual }) => { 11 | const { pageContents } = actual as VerifyRedditGraphState; 12 | const { relevant } = expected as { relevant: boolean }; 13 | 14 | const hasPageContentsAndLinks = pageContents && pageContents?.length > 0; 15 | 16 | if (relevant) { 17 | return { 18 | key: "validation_result_expected", 19 | score: Number(hasPageContentsAndLinks), 20 | }; 21 | } 22 | 23 | return { 24 | key: "validation_result_expected", 25 | score: Number(!hasPageContentsAndLinks), 26 | }; 27 | }; 28 | 29 | ls.describe("SMA - Verify Reddit Post - E2E", () => { 30 | ls.test.each(INPUTS)( 31 | "Evaluates the verify reddit post agent", 32 | async ({ inputs }) => { 33 | verifyRedditPostGraph.checkpointer = new MemorySaver(); 34 | verifyRedditPostGraph.store = new InMemoryStore(); 35 | 36 | const threadId = uuidv4(); 37 | const config = { 38 | configurable: { 39 | ...BASE_VERIFY_REDDIT_CONFIG, 40 | thread_id: threadId, 41 | }, 42 | }; 43 | 44 | const results = await verifyRedditPostGraph.invoke(inputs, config); 45 | console.log("Finished invoking graph with URL", inputs.link); 46 | await ls 47 | .expect(results) 48 | .evaluatedBy(checkVerifyPostResult) 49 | // Expect this to be 1, if it's 0 that means there's a discrepancy between the expected, and whether or not page contents and links were found 50 | .toBe(1); 51 | return results; 52 | }, 53 | ); 54 | }); 55 | -------------------------------------------------------------------------------- /src/agents/verify-reddit-post/utils.ts: -------------------------------------------------------------------------------- 1 | import { 2 | SimpleRedditComment, 3 | SimpleRedditPostWithComments, 4 | } from "../../clients/reddit/types.js"; 5 | 6 | export function formatComments(comments: SimpleRedditComment[]): string { 7 | return comments 8 | .map( 9 | (c) => 10 | `${c.author}: ${c.body}${c.replies ? "\nReply:\n" + formatComments(c.replies) : ""}`, 11 | ) 12 | .join("\n"); 13 | } 14 | 15 | export function convertPostToString( 16 | redditPostWithComments: SimpleRedditPostWithComments, 17 | ): string { 18 | const mainPost = `${redditPostWithComments.post.title} 19 | ${redditPostWithComments.post.selftext} 20 | ${redditPostWithComments.post.url || ""}`; 21 | const comments = redditPostWithComments.comments 22 | ? formatComments(redditPostWithComments.comments) 23 | : ""; 24 | return `${mainPost}${comments ? "\n\nComments:\n" + comments : ""}`; 25 | } 26 | -------------------------------------------------------------------------------- /src/agents/verify-reddit-post/verify-reddit-post-state.ts: -------------------------------------------------------------------------------- 1 | import { Annotation } from "@langchain/langgraph"; 2 | import { SimpleRedditPostWithComments } from "../../clients/reddit/types.js"; 3 | import { 4 | VerifyLinksGraphConfigurableAnnotation, 5 | VerifyLinksResultAnnotation, 6 | } from "../verify-links/verify-links-state.js"; 7 | import { SKIP_CONTENT_RELEVANCY_CHECK } from "../generate-post/constants.js"; 8 | 9 | export const VerifyRedditPostAnnotation = Annotation.Root({ 10 | /** 11 | * The reddit post to verify. Optional, if not provided then a `link`, or `postID` must be provided. 12 | */ 13 | redditPost: Annotation, 14 | /** 15 | * A link to a Reddit post. Optional, if not provided then a `redditPost` or `postID` must be provided. 16 | */ 17 | link: Annotation, 18 | /** 19 | * The ID of a Reddit post. Optional, if not provided then a `redditPost` or `link` must be provided. 20 | */ 21 | postID: Annotation, 22 | /** 23 | * The external URLs found in the body of the Reddit post. 24 | */ 25 | externalURLs: Annotation({ 26 | reducer: (state, update) => state.concat(update), 27 | default: () => [], 28 | }), 29 | // REQUIRED DUE TO USING SHARED NODES 30 | ...VerifyLinksResultAnnotation.spec, 31 | }); 32 | 33 | export const VerifyRedditPostConfigurableAnnotation = Annotation.Root({ 34 | ...VerifyLinksGraphConfigurableAnnotation.spec, 35 | }); 36 | 37 | export type VerifyRedditPostConfigurable = 38 | typeof VerifyRedditPostConfigurableAnnotation.State; 39 | 40 | export const BASE_VERIFY_REDDIT_CONFIG: VerifyRedditPostConfigurable = { 41 | [SKIP_CONTENT_RELEVANCY_CHECK]: undefined, 42 | }; 43 | -------------------------------------------------------------------------------- /src/agents/verify-tweet/nodes/get-tweet-content.ts: -------------------------------------------------------------------------------- 1 | import { VerifyTweetAnnotation } from "../verify-tweet-state.js"; 2 | import { extractTweetId } from "../../utils.js"; 3 | import { 4 | getFullThreadText, 5 | getMediaUrls, 6 | resolveAndReplaceTweetTextLinks, 7 | } from "../../../clients/twitter/utils.js"; 8 | import { TweetV2, TweetV2SingleResult } from "twitter-api-v2"; 9 | import { shouldExcludeTweetContent } from "../../should-exclude.js"; 10 | import { getTwitterClient } from "../../../clients/twitter/client.js"; 11 | 12 | export async function getTweetContent( 13 | state: typeof VerifyTweetAnnotation.State, 14 | ) { 15 | const tweetId = extractTweetId(state.link); 16 | if (!tweetId) { 17 | console.error("Failed to extract tweet ID from link:", state.link); 18 | return {}; 19 | } 20 | 21 | const twitterClient = await getTwitterClient(); 22 | 23 | let tweetContent: TweetV2SingleResult | undefined; 24 | try { 25 | tweetContent = await twitterClient.getTweet(tweetId); 26 | if (!tweetContent) { 27 | throw new Error("No tweet content returned from Twitter API."); 28 | } 29 | } catch (e: any) { 30 | console.error("Failed to get tweet content", e); 31 | return {}; 32 | } 33 | 34 | const threadReplies: TweetV2[] = []; 35 | try { 36 | if (tweetContent.data.author_id) { 37 | threadReplies.push( 38 | ...(await twitterClient.getThreadReplies( 39 | tweetId, 40 | tweetContent.data.author_id, 41 | )), 42 | ); 43 | } 44 | } catch (e) { 45 | console.error("Failed to get thread replies", e); 46 | } 47 | 48 | const mediaUrls = await getMediaUrls(tweetContent, threadReplies); 49 | const tweetContentText = getFullThreadText(tweetContent, threadReplies); 50 | 51 | const { content, externalUrls } = 52 | await resolveAndReplaceTweetTextLinks(tweetContentText); 53 | 54 | const shouldExclude = shouldExcludeTweetContent(externalUrls); 55 | if (shouldExclude) { 56 | return {}; 57 | } 58 | 59 | if (!externalUrls.length) { 60 | return { 61 | tweetContent: content, 62 | imageOptions: mediaUrls, 63 | }; 64 | } 65 | 66 | return { 67 | tweetContent: content, 68 | tweetContentUrls: externalUrls, 69 | imageOptions: mediaUrls, 70 | }; 71 | } 72 | -------------------------------------------------------------------------------- /src/agents/verify-tweet/verify-tweet-state.ts: -------------------------------------------------------------------------------- 1 | import { Annotation } from "@langchain/langgraph"; 2 | import { VerifyContentAnnotation } from "../shared/shared-state.js"; 3 | import { 4 | VerifyLinksGraphConfigurableAnnotation, 5 | VerifyLinksResultAnnotation, 6 | } from "../verify-links/verify-links-state.js"; 7 | 8 | export const VerifyTweetAnnotation = Annotation.Root({ 9 | /** 10 | * The link to the content to verify. 11 | */ 12 | link: VerifyContentAnnotation.spec.link, 13 | /** 14 | * The raw content of the Tweet 15 | */ 16 | tweetContent: Annotation, 17 | /** 18 | * URLs which were found in the Tweet 19 | */ 20 | tweetContentUrls: Annotation({ 21 | reducer: (state, update) => state.concat(update), 22 | default: () => [], 23 | }), 24 | ...VerifyLinksResultAnnotation.spec, 25 | /** 26 | * Page content used in the verification nodes. Will be used in the report 27 | * generation node. 28 | * 29 | * pageContents is defined in the VerifyLinksResultAnnotation spec, so 30 | * we spread it above this to ensure it uses this custom reducer. 31 | */ 32 | pageContents: Annotation({ 33 | reducer: (state, update) => { 34 | if (update === undefined) return undefined; 35 | 36 | if (update[0]?.startsWith("The following is the content of the Tweet:")) { 37 | // This means the update is from validateTweetContent so we can remove 38 | // all other state fields. 39 | return update; 40 | } 41 | 42 | return (state || []).concat(update); 43 | }, 44 | default: () => [], 45 | }), 46 | }); 47 | 48 | export const VerifyTweetConfigurableAnnotation = Annotation.Root({ 49 | ...VerifyLinksGraphConfigurableAnnotation.spec, 50 | }); 51 | -------------------------------------------------------------------------------- /src/clients/reddit/get-user-less-token.ts: -------------------------------------------------------------------------------- 1 | interface RedditTokenResponse { 2 | access_token: string; 3 | token_type: string; 4 | expires_in: number; 5 | scope: string; 6 | } 7 | 8 | export async function getRedditUserlessToken(): Promise { 9 | const redditClientId = process.env.REDDIT_CLIENT_ID; 10 | const redditClientSecret = process.env.REDDIT_CLIENT_SECRET; 11 | if (!redditClientId || !redditClientSecret) { 12 | throw new Error("Missing Reddit client ID or secret"); 13 | } 14 | 15 | const tokenUrl = "https://www.reddit.com/api/v1/access_token"; 16 | 17 | // Create Basic Auth header 18 | const authHeader = Buffer.from( 19 | `${redditClientId}:${redditClientSecret}`, 20 | ).toString("base64"); 21 | 22 | // Prepare the form data based on grant type 23 | const formData = new URLSearchParams({ grant_type: "client_credentials" }); 24 | 25 | try { 26 | const response = await fetch(tokenUrl, { 27 | method: "POST", 28 | headers: { 29 | Authorization: `Basic ${authHeader}`, 30 | "Content-Type": "application/x-www-form-urlencoded", 31 | }, 32 | body: formData, 33 | }); 34 | 35 | if (!response.ok) { 36 | throw new Error(`HTTP error! status: ${response.status}`); 37 | } 38 | 39 | const data: RedditTokenResponse = await response.json(); 40 | return data; 41 | } catch (error) { 42 | throw new Error(`Failed to get Reddit token: ${error}`); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/clients/reddit/tests/reddit.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { RedditClient } from "../client.js"; 3 | 4 | test("Reddit client can fetch posts from subreddit", async () => { 5 | const subredditName = "LocalLLaMA"; 6 | 7 | const client = await RedditClient.fromUserless(); 8 | const posts = await client.getTopPosts(subredditName, { limit: 10 }); 9 | console.log("Posts:\n"); 10 | console.dir(posts.map(client.simplifyPost), { depth: null }); 11 | expect(posts.length).toBe(10); 12 | }); 13 | 14 | test("Reddit client can fetch comments from post", async () => { 15 | const subredditName = "LocalLLaMA"; 16 | const client = await RedditClient.fromUserless(); 17 | const posts = await client.getTopPosts(subredditName, { limit: 1 }); 18 | 19 | const postId = posts[0].id; 20 | const comments = await client.getPostComments(postId); 21 | console.log("Comments:\n"); 22 | console.dir(comments.map(client.simplifyComment), { depth: null }); 23 | expect(comments.length).toBeGreaterThan(0); 24 | }); 25 | 26 | test("Reddit client can fetch post by URL", async () => { 27 | const client = await RedditClient.fromUserless(); 28 | const url = 29 | "https://www.reddit.com/r/LocalLLaMA/comments/1i31ji5/what_is_elevenlabs_doing_how_is_it_so_good/"; 30 | const post = await client.getPostByURL(url); 31 | console.log("Post:\n"); 32 | console.dir(post, { depth: null }); 33 | expect(post).toBeDefined(); 34 | }); 35 | 36 | test("Can get posts and comments from URL", async () => { 37 | const client = await RedditClient.fromUserless(); 38 | const url = 39 | "https://www.reddit.com/r/LocalLLaMA/comments/1i31ji5/what_is_elevenlabs_doing_how_is_it_so_good/"; 40 | const post = await client.getPostByURL(url); 41 | expect(post).toBeDefined(); 42 | const comments = await client.getPostComments(post.id); 43 | expect(comments.length).toBeGreaterThan(0); 44 | }); 45 | 46 | test("Can get posts and comments from URL, then simplify", async () => { 47 | const client = await RedditClient.fromUserless(); 48 | const url = 49 | "https://www.reddit.com/r/LocalLLaMA/comments/1i31ji5/what_is_elevenlabs_doing_how_is_it_so_good/"; 50 | const post = await client.getPostByURL(url); 51 | expect(post).toBeDefined(); 52 | const comments = await client.getPostComments(post.id); 53 | expect(comments.length).toBeGreaterThan(0); 54 | 55 | const simplePost = client.simplifyPost(post); 56 | const simpleComments = comments.map(client.simplifyComment); 57 | 58 | expect(simplePost).toBeDefined(); 59 | expect(simpleComments.length).toBe(comments.length); 60 | }); 61 | -------------------------------------------------------------------------------- /src/clients/slack/utils.ts: -------------------------------------------------------------------------------- 1 | import { traceable } from "langsmith/traceable"; 2 | import { SlackClient } from "./client.js"; 3 | import { File } from "./types.js"; 4 | 5 | export function getUrlForPublicFile(file: File) { 6 | if (!file.permalink_public || !file.url_private) { 7 | return undefined; 8 | } 9 | 10 | const pubSecret = file.permalink_public.split("-").pop(); 11 | return `${file.url_private}?pub_secret=${pubSecret}`; 12 | } 13 | 14 | async function getPublicFileUrlsFunc( 15 | fileIds: string[] | undefined, 16 | ): Promise { 17 | if (!fileIds) return undefined; 18 | 19 | const slackClient = new SlackClient(); 20 | 21 | try { 22 | const publicUrlPromises = fileIds.map(async (fileId) => { 23 | try { 24 | const publicFile = await slackClient.makeFilePublic(fileId); 25 | if (!publicFile.file) { 26 | return undefined; 27 | } 28 | 29 | return getUrlForPublicFile(publicFile.file as File); 30 | } catch (e: any) { 31 | const isAlreadyPublic = e?.message?.includes("already_public"); 32 | if (!isAlreadyPublic) { 33 | console.error("Failed to make public URL for file ID:", fileId, e); 34 | return undefined; 35 | } 36 | 37 | // File has already been made public. 38 | // Attempt to get the URL from the file info endpoint 39 | try { 40 | const fileInfo = await slackClient.getPublicFile(fileId); 41 | if (!fileInfo.file) { 42 | return undefined; 43 | } 44 | 45 | return getUrlForPublicFile(fileInfo.file as File); 46 | } catch (e) { 47 | console.error("Failed to get public file:", fileId, e); 48 | return undefined; 49 | } 50 | } 51 | }); 52 | 53 | return (await Promise.all(publicUrlPromises)) 54 | .filter((u) => u !== undefined) 55 | .flat(); 56 | } catch (e) { 57 | console.error(`Failed to make public URLs for file IDs:`, fileIds, e); 58 | return undefined; 59 | } 60 | } 61 | 62 | export const getPublicFileUrls = traceable(getPublicFileUrlsFunc, { 63 | name: "get_public_file_urls", 64 | }); 65 | -------------------------------------------------------------------------------- /src/clients/twitter/SETUP.md: -------------------------------------------------------------------------------- 1 | # Setup Twitter API Instructions 2 | 3 | - Create a Twitter developer account 4 | - Create a new app and give it a name. 5 | - Copy the `API Key` and `API Key Secret` and `Bearer Token` and set them as `TWITTER_API_KEY`, `TWITTER_API_KEY_SECRET`, and `TWITTER_BEARER_TOKEN` in your `.env` file. 6 | - After saving, visit the App Dashboard. Find the `User authentication settings` section, and click the `Set up` button. This is how you will authorize users to use the Twitter API on their behalf. 7 | - Set the following fields: 8 | - `App permissions`: `Read and write` 9 | - `Type of App`: `Web App, Automated App or Bot` 10 | - `App info`: 11 | - `Callback URI/Redirect URL`: `http://localhost:3000/auth/twitter/callback` 12 | - `Website URL`: Your website URL 13 | - Save. You'll then be given a `Client ID` and `Client Secret`. Set these as `TWITTER_CLIENT_ID` and `TWITTER_CLIENT_SECRET` in your `.env` file. 14 | 15 | Once done, run the `yarn start:auth` command to run the Twitter OAuth server. Open [http://localhost:3000](http://localhost:3000) in your browser, and click `Login with Twitter`. 16 | 17 | After authorizing your account with the app, navigate to your terminal where you'll see a JSON object logged. Copy the `token` and `tokenSecret` values and set them as `TWITTER_USER_TOKEN` and `TWITTER_USER_TOKEN_SECRET` in your `.env` file. 18 | -------------------------------------------------------------------------------- /src/clients/twitter/types.ts: -------------------------------------------------------------------------------- 1 | import { EUploadMimeType, TwitterApi } from "twitter-api-v2"; 2 | 3 | /** 4 | * Interface for creating a new Tweet request 5 | */ 6 | export interface CreateTweetRequest { 7 | /** 8 | * Text of the Tweet being created. 9 | */ 10 | text: string; 11 | 12 | /** 13 | * Media to be attached to the Tweet. 14 | */ 15 | media?: CreateMediaRequest; 16 | 17 | /** 18 | * Link to the Tweet being quoted 19 | */ 20 | quoteTweetId?: string; 21 | } 22 | 23 | /** 24 | * Interface for creating a new media request 25 | */ 26 | export interface CreateMediaRequest { 27 | /** 28 | * The base64-encoded file content being uploaded. 29 | */ 30 | media: Buffer; 31 | /** 32 | * The type of media being uploaded. 33 | */ 34 | mimeType: EUploadMimeType | string; 35 | /** 36 | * A list of user IDs to set as additional owners allowed to use the returned mediaId in Tweets or Cards. 37 | * Maximum of 100 additional owners may be specified 38 | */ 39 | additionalOwners?: string[]; 40 | } 41 | 42 | export interface TwitterClientArgs { 43 | twitterClient: TwitterApi; 44 | twitterClientOauth2?: TwitterApi; 45 | twitterToken?: string; 46 | twitterTokenSecret?: string; 47 | useArcade?: boolean; 48 | textOnlyMode?: boolean; 49 | } 50 | -------------------------------------------------------------------------------- /src/clients/types.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Interface for authorizing a user to use the Twitter API 3 | */ 4 | export type AuthorizeUserResponse = 5 | | { 6 | /** 7 | * The Bearer token used to authenticate requests with the Twitter API 8 | */ 9 | token: string; 10 | /** 11 | * The URL to visit to authorize the user 12 | */ 13 | authorizationUrl?: never; 14 | } 15 | | { 16 | /** 17 | * The Bearer token used to authenticate requests with the Twitter API 18 | */ 19 | token?: never; 20 | /** 21 | * The URL to visit to authorize the user 22 | */ 23 | authorizationUrl: string; 24 | }; 25 | -------------------------------------------------------------------------------- /src/evals/general/index.ts: -------------------------------------------------------------------------------- 1 | import { type Example, Run } from "langsmith"; 2 | import { evaluate, EvaluationResult } from "langsmith/evaluation"; 3 | // eslint-disable-next-line import/no-extraneous-dependencies 4 | import "dotenv/config"; 5 | import { generatePostGraph } from "../../agents/generate-post/generate-post-graph.js"; 6 | 7 | const runGraph = async ( 8 | input: Record, 9 | ): Promise> => { 10 | return await generatePostGraph.invoke(input); 11 | }; 12 | 13 | const evaluatePost = (run: Run, example?: Example): EvaluationResult => { 14 | if (!example) { 15 | throw new Error("No example provided"); 16 | } 17 | if (!example.outputs) { 18 | throw new Error("No example outputs provided"); 19 | } 20 | if (!run.outputs) { 21 | throw new Error("No run outputs provided"); 22 | } 23 | 24 | // TODO: Implement evaluation logic 25 | throw new Error("Evaluation logic not implemented"); 26 | }; 27 | 28 | async function runEval() { 29 | const datasetName = "sma:generate-post:general"; 30 | await evaluate(runGraph, { 31 | data: datasetName, 32 | evaluators: [evaluatePost], 33 | experimentPrefix: "Post Generation-General", 34 | }); 35 | } 36 | 37 | runEval().catch(console.error); 38 | 39 | // https://x.com/LangChainAI/status/1858311912091476455 40 | // https://x.com/LangChainAI/status/1857811436984217835 41 | // https://x.com/LangChainAI/status/1856026604180242636 42 | // https://x.com/LangChainAI/status/1855437724536504482 43 | -------------------------------------------------------------------------------- /src/evals/github/index.ts: -------------------------------------------------------------------------------- 1 | import { type Example, Run } from "langsmith"; 2 | import { evaluate, EvaluationResult } from "langsmith/evaluation"; 3 | // eslint-disable-next-line import/no-extraneous-dependencies 4 | import "dotenv/config"; 5 | import { generatePostGraph } from "../../agents/generate-post/generate-post-graph.js"; 6 | 7 | const runGraph = async ( 8 | input: Record, 9 | ): Promise> => { 10 | return await generatePostGraph.invoke(input); 11 | }; 12 | 13 | const evaluatePost = (run: Run, example?: Example): EvaluationResult => { 14 | if (!example) { 15 | throw new Error("No example provided"); 16 | } 17 | if (!example.outputs) { 18 | throw new Error("No example outputs provided"); 19 | } 20 | if (!run.outputs) { 21 | throw new Error("No run outputs provided"); 22 | } 23 | console.log("\n\nGENERATED POST:\n", run.outputs.post.join("\n---\n")); 24 | console.log("\nEXAMPLE POST:\n", example.outputs.post); 25 | 26 | return { 27 | key: "correct_generation", 28 | score: true, 29 | }; 30 | }; 31 | 32 | async function runEval() { 33 | const datasetName = "sma:generate-post:github"; 34 | await evaluate(runGraph, { 35 | data: datasetName, 36 | evaluators: [evaluatePost], 37 | experimentPrefix: "Post Generation-Github", 38 | }); 39 | } 40 | 41 | runEval().catch(console.error); 42 | 43 | // Should be approved and posts generated 44 | // https://x.com/LangChainAI/status/1861108590792036799 45 | // https://x.com/LangChainAI/status/1860760295188185246 46 | // https://x.com/LangChainAI/status/1860745200668201148 47 | // https://x.com/LangChainAI/status/1860714493661106562 48 | // https://x.com/LangChainAI/status/1860485484683911584 49 | // https://x.com/LangChainAI/status/1860397908451033240 50 | 51 | // Would need review: 52 | // https://x.com/LangChainAI/status/1858175010612916272 53 | -------------------------------------------------------------------------------- /src/evals/twitter/index.ts: -------------------------------------------------------------------------------- 1 | import { type Example, Run } from "langsmith"; 2 | import { evaluate, EvaluationResult } from "langsmith/evaluation"; 3 | // eslint-disable-next-line import/no-extraneous-dependencies 4 | import "dotenv/config"; 5 | import { generatePostGraph } from "../../agents/generate-post/generate-post-graph.js"; 6 | 7 | const runGraph = async ( 8 | input: Record, 9 | ): Promise> => { 10 | return await generatePostGraph.invoke(input); 11 | }; 12 | 13 | const evaluatePost = (run: Run, example?: Example): EvaluationResult => { 14 | if (!example) { 15 | throw new Error("No example provided"); 16 | } 17 | if (!example.outputs) { 18 | throw new Error("No example outputs provided"); 19 | } 20 | if (!run.outputs) { 21 | throw new Error("No run outputs provided"); 22 | } 23 | 24 | // TODO: Implement evaluation logic 25 | throw new Error("Evaluation logic not implemented"); 26 | }; 27 | 28 | async function runEval() { 29 | const datasetName = "sma:generate-post:twitter"; 30 | await evaluate(runGraph, { 31 | data: datasetName, 32 | evaluators: [evaluatePost], 33 | experimentPrefix: "Post Generation-Twitter", 34 | }); 35 | } 36 | 37 | runEval().catch(console.error); 38 | -------------------------------------------------------------------------------- /src/evals/validate-images/validate-images.int.test.ts: -------------------------------------------------------------------------------- 1 | import * as ls from "langsmith/jest"; 2 | import { type SimpleEvaluator } from "langsmith/jest"; 3 | import { GeneratePostAnnotation } from "../../agents/generate-post/generate-post-state.js"; 4 | import { TEST_EACH_INPUTS_OUTPUTS } from "./inputs.js"; 5 | import { validateImages } from "../../agents/find-images/nodes/validate-images.js"; 6 | 7 | const checkCorrectImages: SimpleEvaluator = ({ expected, actual }) => { 8 | const expectedImageOptions = expected.imageOptions as string[]; 9 | const actualImageOptions = actual.imageOptions as string[]; 10 | let numCorrect = 0; 11 | for (const expectedUrl of expectedImageOptions) { 12 | if (actualImageOptions.find((actualUrl) => actualUrl === expectedUrl)) { 13 | numCorrect += 1; 14 | } 15 | } 16 | const score = numCorrect / expectedImageOptions.length; 17 | 18 | return { 19 | key: "correct_images", 20 | score, 21 | }; 22 | }; 23 | 24 | ls.describe("SMA - Validate Images", () => { 25 | ls.test.each(TEST_EACH_INPUTS_OUTPUTS)( 26 | "Should validate images", 27 | async ({ inputs }) => { 28 | // Import and run your app, or some part of it here 29 | const result = await validateImages( 30 | inputs as typeof GeneratePostAnnotation.State, 31 | ); 32 | console.log("result!", result); 33 | const evalResult = ls.expect(result).evaluatedBy(checkCorrectImages); 34 | // Ensure the result is greater than 0.8 and less than or equal to 1 35 | // CHECK IF THIS RUNS THE EVALUATOR TWICE 36 | await evalResult.toBeGreaterThanOrEqual(0.8); 37 | await evalResult.toBeLessThanOrEqual(1); 38 | return result; 39 | }, 40 | ); 41 | }); 42 | -------------------------------------------------------------------------------- /src/evals/youtube/index.ts: -------------------------------------------------------------------------------- 1 | import { type Example, Run } from "langsmith"; 2 | import { evaluate, EvaluationResult } from "langsmith/evaluation"; 3 | // eslint-disable-next-line import/no-extraneous-dependencies 4 | import "dotenv/config"; 5 | import { generatePostGraph } from "../../agents/generate-post/generate-post-graph.js"; 6 | 7 | const runGraph = async ( 8 | input: Record, 9 | ): Promise> => { 10 | return await generatePostGraph.invoke(input); 11 | }; 12 | 13 | const evaluatePost = (run: Run, example?: Example): EvaluationResult => { 14 | if (!example) { 15 | throw new Error("No example provided"); 16 | } 17 | if (!example.outputs) { 18 | throw new Error("No example outputs provided"); 19 | } 20 | if (!run.outputs) { 21 | throw new Error("No run outputs provided"); 22 | } 23 | 24 | // TODO: Implement evaluation logic 25 | throw new Error("Evaluation logic not implemented"); 26 | }; 27 | 28 | async function runEval() { 29 | const datasetName = "sma:generate-post:youtube"; 30 | await evaluate(runGraph, { 31 | data: datasetName, 32 | evaluators: [evaluatePost], 33 | experimentPrefix: "Post Generation-YouTube", 34 | }); 35 | } 36 | 37 | runEval().catch(console.error); 38 | 39 | // https://x.com/LangChainAI/status/1860438927892709871 40 | // https://x.com/LangChainAI/status/1860352611834069056 41 | // https://x.com/LangChainAI/status/1855629502690349326 42 | // https://x.com/LangChainAI/status/1855362227420967092 43 | // https://x.com/LangChainAI/status/1854925528031195148 44 | -------------------------------------------------------------------------------- /src/tests/data/langchain_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langchain-ai/social-media-agent/5bc28b136b1d28ef16d4a63f6b6dde1b719754d1/src/tests/data/langchain_logo.png -------------------------------------------------------------------------------- /src/tests/data/langchain_logo_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langchain-ai/social-media-agent/5bc28b136b1d28ef16d4a63f6b6dde1b719754d1/src/tests/data/langchain_logo_2.png -------------------------------------------------------------------------------- /src/tests/expected.ts: -------------------------------------------------------------------------------- 1 | export const EXPECTED_README = `# LangGraph.js Examples 2 | 3 | This repository contains a series of example TypeScript projects which implement LangGraph.js agents. 4 | Each directory focuses on a different problem which LangGraph.js aims to solve/enable solutions for. 5 | 6 | ## Prerequisites 7 | 8 | The following projects all use [LangSmith](https://smith.langchain.com/), LangGraph [Studio](https://github.com/langchain-ai/langgraph-studio) and [Cloud](https://langchain-ai.github.io/langgraph/cloud/), as well as the [LangGraph.js](https://langchain-ai.github.io/langgraphjs/) and [LangChain.js](https://js.langchain.com/v0.2/docs/introduction/) libraries. 9 | 10 | Before jumping into any of the projects, you should create a LangSmith account [here](https://smith.langchain.com/), and download the latest LangGraph Studio version [here](https://github.com/langchain-ai/langgraph-studio/releases/latest). 11 | 12 | Running LangGraph Studio locally requires [Docker](https://www.docker.com/), so ensure you have it installed _and_ running before starting the Studio (I personally use [OrbStack](https://orbstack.dev/) to manage my Docker containers, which is free to use for personal use). 13 | 14 | ## Projects 15 | 16 | - [Intro](./intro/README.md) - Introduction to LangGraph.js, Studio, and Cloud. 17 | - [Human in the Loop](./human_in_the_loop/README.md) - Introduction to Human in the Loop (HITL) concepts. 18 | - [Stockbroker](./stockbroker/README.md) - A full stack stockbroker & financial analyst app, with HITL for purchasing stocks. 19 | - Streaming Messages ([Examples](./streaming_messages/README.md), [Frontend](./streaming_messages_frontend/README.md)) - Next.js web app connected to a LangGraph Cloud deployment to show off different message streaming types. 20 | `; 21 | -------------------------------------------------------------------------------- /src/tests/github.int.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, it, expect } from "@jest/globals"; 2 | import { getFileContents } from "../utils/github-repo-contents.js"; 3 | 4 | describe("GitHub get file contents", () => { 5 | it("Can get the download_url of a Gif from a public GitHub repo", async () => { 6 | const repoUrl = "https://github.com/Integuru-AI/Integuru"; 7 | const gifFileName = "integuru_demo.gif"; 8 | const contents = await getFileContents(repoUrl, gifFileName); 9 | expect(contents.download_url).toBeDefined(); 10 | expect(contents.type).toBe("file"); 11 | }); 12 | 13 | it("Can get the download_url of an image from a public GitHub repo", async () => { 14 | const repoUrl = "https://github.com/glance-io/steer-backend"; 15 | const imgFileName = "logo_banner.png"; 16 | const contents = await getFileContents(repoUrl, imgFileName); 17 | expect(contents.download_url).toBeDefined(); 18 | expect(contents.type).toBe("file"); 19 | }); 20 | }); 21 | -------------------------------------------------------------------------------- /src/tests/linkedin.int.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, it, expect } from "@jest/globals"; 2 | import { LinkedInClient } from "../clients/linkedin.js"; 3 | 4 | describe("LinkedIN API wrapper", () => { 5 | it("Can make a text post", async () => { 6 | const linkedInClient = new LinkedInClient(); 7 | const textPostResponse = await linkedInClient.createTextPost( 8 | "Hello, this is a test post from LinkedIn API!", 9 | ); 10 | console.log("Text post created:", textPostResponse); 11 | expect(textPostResponse).toBeDefined(); 12 | }); 13 | 14 | it("Can make an image post", async () => { 15 | const linkedInClient = new LinkedInClient(); 16 | const textPostResponse = await linkedInClient.createImagePost({ 17 | text: "Hello, this is a test post from LinkedIn API!", 18 | imageUrl: 19 | "https://verdyqfuvvtxtygqekei.supabase.co/storage/v1/object/public/images/screenshot-github.com-1734639569875.jpeg", 20 | imageDescription: "A screenshot of the Open Canvas Readme", 21 | imageTitle: "Open Canvas", 22 | }); 23 | console.log("Image post created:", textPostResponse); 24 | expect(textPostResponse).toBeDefined(); 25 | }); 26 | 27 | it("Can make a text post to an organization", async () => { 28 | const linkedInClient = new LinkedInClient(); 29 | const textPostResponse = await linkedInClient.createTextPost( 30 | "Hello, this is a test post from LinkedIn API!", 31 | { 32 | postToOrganization: true, 33 | }, 34 | ); 35 | console.log("Text post created:", textPostResponse); 36 | expect(textPostResponse).toBeDefined(); 37 | }); 38 | 39 | it.only("Can make an image post to an organization", async () => { 40 | const linkedInClient = new LinkedInClient(); 41 | const textPostResponse = await linkedInClient.createImagePost( 42 | { 43 | text: "Hello, this is a test post from LinkedIn API!", 44 | imageUrl: 45 | "https://verdyqfuvvtxtygqekei.supabase.co/storage/v1/object/public/images/screenshot-github.com-1734639569875.jpeg", 46 | imageDescription: "A screenshot of the Open Canvas Readme", 47 | imageTitle: "Open Canvas", 48 | }, 49 | { 50 | postToOrganization: true, 51 | }, 52 | ); 53 | console.log("Image post created:", textPostResponse); 54 | expect(textPostResponse).toBeDefined(); 55 | }); 56 | }); 57 | -------------------------------------------------------------------------------- /src/tests/scrape-general-content.int.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, expect, it } from "@jest/globals"; 2 | import { 3 | extractAllImageUrlsFromMarkdown, 4 | getPageText, 5 | } from "../agents/utils.js"; 6 | import { getUrlContents } from "../agents/shared/nodes/verify-general.js"; 7 | 8 | describe("Get page contents", () => { 9 | it("Can return markdown from a blog URL", async () => { 10 | const url = 11 | "https://diamantai.substack.com/p/atlas-when-artificial-intelligence?r=336pe4&%3Butm_campaign=post&%3Butm_medium=web&%3BshowWelcomeOnShare=false&triedRedirect=true"; 12 | const contents = await getPageText(url); 13 | expect(contents).toBeDefined(); 14 | 15 | // Verify it can extract images from the text 16 | const allImageUrls = extractAllImageUrlsFromMarkdown(contents || ""); 17 | expect(allImageUrls).toBeDefined(); 18 | expect(allImageUrls.length).toBeGreaterThan(0); 19 | }); 20 | 21 | it("Can use firecrawl to extract markdown and images from a page", async () => { 22 | const url = "https://qdrant.tech/documentation/data-ingestion-beginners/#"; 23 | const contents = await getUrlContents(url); 24 | expect(contents).toBeDefined(); 25 | expect(contents.content).toBeGreaterThan(10); 26 | expect(contents.imageUrls).toBeDefined(); 27 | expect(contents.imageUrls?.length).toBeGreaterThan(0); 28 | }); 29 | }); 30 | -------------------------------------------------------------------------------- /src/tests/slack.int.test.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from "@jest/globals"; 2 | import { SlackClient } from "../clients/slack/client.js"; 3 | 4 | const TEST_CHANNEL_ID = "C06BU7XF5S7"; 5 | 6 | test("Slack client can fetch messages from channel name", async () => { 7 | const client = new SlackClient(); 8 | 9 | const messages = await client.getChannelMessages(TEST_CHANNEL_ID, { 10 | maxMessages: 5, 11 | }); 12 | console.log(messages); 13 | expect(messages).toBeDefined(); 14 | expect(messages.length).toBeGreaterThan(0); 15 | }); 16 | -------------------------------------------------------------------------------- /src/tests/youtube.int.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, it, expect } from "@jest/globals"; 2 | import { getVideoThumbnailUrl } from "../agents/shared/nodes/youtube.utils.js"; 3 | 4 | describe("YouTube utils", () => { 5 | it("Can get the thumbnails of YouTube videos", async () => { 6 | const youTubeUrls = [ 7 | "https://www.youtube.com/watch?v=gwE3Wv4MNLw", 8 | "https://www.youtube.com/watch?v=VyyJFrPlHfk", 9 | "https://www.youtube.com/watch?v=BGvqeRB4Jpk", 10 | "https://www.youtube.com/watch?v=u_Xm3vgBQ9Y", 11 | "https://www.youtube.com/watch?v=02IDU8eCX8o", 12 | ]; 13 | 14 | for await (const url of youTubeUrls) { 15 | const thumbnail = await getVideoThumbnailUrl(url); 16 | console.log(`url & thumbnail:\nURL: ${url}\nTHUMBNAIL: ${thumbnail}`); 17 | expect(thumbnail).toBeDefined(); 18 | } 19 | }); 20 | }); 21 | -------------------------------------------------------------------------------- /src/utils/create-dir.ts: -------------------------------------------------------------------------------- 1 | import fs from "fs"; 2 | 3 | export function createDirIfNotExists(dir: string) { 4 | if (!fs.existsSync(dir)) { 5 | fs.mkdirSync(dir, { recursive: true }); 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /src/utils/delay-run.ts: -------------------------------------------------------------------------------- 1 | import { Client, Run } from "@langchain/langgraph-sdk"; 2 | 3 | interface DelayRunInputs { 4 | /** 5 | * The number of seconds to delay the run by. 6 | */ 7 | seconds: number; 8 | /** 9 | * The node to resume on. 10 | */ 11 | resumeNode: string; 12 | /** 13 | * The ID of the thread to resume in. 14 | */ 15 | threadId: string; 16 | /** 17 | * The assistant ID to resume the run in. 18 | */ 19 | assistantId: string; 20 | /** 21 | * The run ID of the current run to cancel. 22 | */ 23 | runId: string; 24 | /** 25 | * The value of the state to resume the run with. 26 | */ 27 | state: Record; 28 | /** 29 | * Configurable values to pass to the run. 30 | */ 31 | configurable?: Record; 32 | } 33 | 34 | /** 35 | * Delay the execution of a run by a specified number of seconds. 36 | * This function will cancel the current run, and create a new run 37 | * for the same thread, with the specified state and configurable 38 | * fields to be executed after a delay. 39 | * @param param0 - The inputs to the function. 40 | * @returns The new run. 41 | */ 42 | export async function delayRun({ 43 | seconds, 44 | resumeNode, 45 | threadId, 46 | assistantId, 47 | runId, 48 | state, 49 | configurable, 50 | }: DelayRunInputs): Promise { 51 | const client = new Client({ 52 | apiUrl: 53 | process.env.LANGGRAPH_API_URL || `http://localhost:${process.env.PORT}`, 54 | }); 55 | 56 | const newRun = await client.runs.create(threadId, assistantId, { 57 | input: {}, 58 | config: { 59 | configurable: { 60 | ...(configurable || {}), 61 | }, 62 | }, 63 | command: { 64 | update: state, 65 | goto: resumeNode, 66 | }, 67 | afterSeconds: seconds, 68 | }); 69 | 70 | await client.runs.cancel(threadId, runId); 71 | return newRun; 72 | } 73 | -------------------------------------------------------------------------------- /src/utils/firecrawl.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Extracts image URLs from FireCrawl metadata by combining both regular image and OpenGraph image fields. 3 | * @param {any} metadata - The metadata object from FireCrawl containing potential image information 4 | * @param {string[]} [metadata.image] - Optional array of regular image URLs 5 | * @param {string} [metadata.ogImage] - Optional OpenGraph image URL 6 | * @returns {string[] | undefined} An array of image URLs if any images are found, undefined otherwise 7 | */ 8 | export function getImagesFromFireCrawlMetadata( 9 | metadata: any, 10 | ): string[] | undefined { 11 | const image = metadata.image || []; 12 | const ogImage = metadata.ogImage ? [metadata.ogImage] : []; 13 | if (image?.length || ogImage?.length) { 14 | return [...ogImage, ...image]; 15 | } 16 | return undefined; 17 | } 18 | -------------------------------------------------------------------------------- /src/utils/image-message.ts: -------------------------------------------------------------------------------- 1 | import { 2 | removeQueryParams, 3 | getMimeTypeFromUrl, 4 | imageUrlToBuffer, 5 | BLACKLISTED_MIME_TYPES, 6 | } from "../agents/utils.js"; 7 | 8 | export async function getImageMessageContents( 9 | imageChunk: string[], 10 | baseIndex: number, 11 | ) { 12 | const imageMessagesPromises = imageChunk.flatMap( 13 | async (fileUri, chunkIndex) => { 14 | const cleanedFileUri = removeQueryParams(fileUri); 15 | let mimeType = getMimeTypeFromUrl(fileUri); 16 | 17 | if (!mimeType) { 18 | try { 19 | const { contentType } = await imageUrlToBuffer(fileUri); 20 | if (!contentType) { 21 | throw new Error("Failed to fetch content type"); 22 | } 23 | mimeType = contentType; 24 | } catch (e) { 25 | console.warn( 26 | "No mime type found, and failed to fetch content type. File URI:\n", 27 | fileUri, 28 | "\nError:\n", 29 | e, 30 | ); 31 | } 32 | } 33 | if ( 34 | !mimeType || 35 | BLACKLISTED_MIME_TYPES.find((mt) => mimeType.startsWith(mt)) 36 | ) { 37 | return []; 38 | } 39 | 40 | return [ 41 | { 42 | type: "text", 43 | text: `The below image is index ${baseIndex + chunkIndex}`, 44 | }, 45 | { 46 | type: "media", 47 | mimeType, 48 | fileUri: cleanedFileUri, 49 | }, 50 | ]; 51 | }, 52 | ); 53 | const imageMessages = (await Promise.all(imageMessagesPromises)).flat(); 54 | return imageMessages; 55 | } 56 | -------------------------------------------------------------------------------- /src/utils/schedule-date/helpers.ts: -------------------------------------------------------------------------------- 1 | import { addDays, isSaturday, isFriday, isMonday, isSunday } from "date-fns"; 2 | 3 | export function getNextSaturday(date: Date): Date { 4 | let isDateSaturday = false; 5 | while (!isDateSaturday) { 6 | date = addDays(date, 1); 7 | isDateSaturday = isSaturday(date); 8 | } 9 | return new Date(date.setUTCHours(0, 0, 0, 0)); 10 | } 11 | 12 | export function getNextFriday(date: Date): Date { 13 | let isDateFriday = false; 14 | while (!isDateFriday) { 15 | date = addDays(date, 1); 16 | isDateFriday = isFriday(date); 17 | } 18 | return new Date(date.setUTCHours(0, 0, 0, 0)); 19 | } 20 | 21 | export function getNextMonday(date: Date): Date { 22 | let isDateMonday = false; 23 | while (!isDateMonday) { 24 | date = addDays(date, 1); 25 | isDateMonday = isMonday(date); 26 | } 27 | return new Date(date.setUTCHours(0, 0, 0, 0)); 28 | } 29 | 30 | export function isWeekend(date: Date): boolean { 31 | return isSaturday(date) || isSunday(date); 32 | } 33 | 34 | export function isMondayOrFriday(date: Date): boolean { 35 | return isMonday(date) || isFriday(date); 36 | } 37 | -------------------------------------------------------------------------------- /src/utils/schedule-date/types.ts: -------------------------------------------------------------------------------- 1 | export type TakenScheduleDates = { 2 | p1: Date[]; 3 | p2: Date[]; 4 | p3: Date[]; 5 | r1: Date[]; 6 | r2: Date[]; 7 | r3: Date[]; 8 | }; 9 | -------------------------------------------------------------------------------- /src/utils/supabase.ts: -------------------------------------------------------------------------------- 1 | import { createClient } from "@supabase/supabase-js"; 2 | 3 | export function createSupabaseClient() { 4 | if (!process.env.SUPABASE_URL || !process.env.SUPABASE_SERVICE_ROLE_KEY) { 5 | const errMsg = `Missing environment variables for supabase. 6 | SUPABASE_URL missing: ${!!process.env.SUPABASE_URL} 7 | SUPABASE_SERVICE_ROLE_KEY missing: ${!!process.env.SUPABASE_SERVICE_ROLE_KEY}`; 8 | throw new Error(errMsg); 9 | } 10 | 11 | return createClient( 12 | process.env.SUPABASE_URL, 13 | process.env.SUPABASE_SERVICE_ROLE_KEY, 14 | ); 15 | } 16 | -------------------------------------------------------------------------------- /static/agent_flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langchain-ai/social-media-agent/5bc28b136b1d28ef16d4a63f6b6dde1b719754d1/static/agent_flow.png -------------------------------------------------------------------------------- /static/graph_screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/langchain-ai/social-media-agent/5bc28b136b1d28ef16d4a63f6b6dde1b719754d1/static/graph_screenshot.png -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "@tsconfig/recommended", 3 | "compilerOptions": { 4 | "target": "ES2021", 5 | "lib": ["ES2021", "ES2022.Object", "DOM"], 6 | "module": "NodeNext", 7 | "moduleResolution": "nodenext", 8 | "esModuleInterop": true, 9 | "noImplicitReturns": true, 10 | "declaration": true, 11 | "noFallthroughCasesInSwitch": true, 12 | "noUnusedLocals": true, 13 | "noUnusedParameters": true, 14 | "useDefineForClassFields": true, 15 | "strictPropertyInitialization": false, 16 | "allowJs": true, 17 | "strict": true, 18 | "strictFunctionTypes": false, 19 | "outDir": "dist", 20 | "types": ["jest", "node"], 21 | "resolveJsonModule": true 22 | }, 23 | "include": ["**/*.ts", "**/*.js", "jest.setup.cjs"], 24 | "exclude": ["node_modules", "dist"] 25 | } 26 | --------------------------------------------------------------------------------