├── .cursor
└── rules
│ ├── core_abstraction
│ ├── async.mdc
│ ├── batch.mdc
│ ├── communication.mdc
│ ├── flow.mdc
│ ├── node.mdc
│ └── parallel.mdc
│ ├── design_pattern
│ ├── agent.mdc
│ ├── mapreduce.mdc
│ ├── multi_agent.mdc
│ ├── rag.mdc
│ ├── structure.mdc
│ └── workflow.mdc
│ ├── guide_for_pocketflow.mdc
│ └── utility_function
│ ├── chunking.mdc
│ ├── embedding.mdc
│ ├── llm.mdc
│ ├── text_to_speech.mdc
│ ├── vector.mdc
│ ├── viz.mdc
│ └── websearch.mdc
├── .cursorrules
├── .gitignore
├── LICENSE
├── README.md
├── cookbook
├── README.md
├── data
│ └── PaulGrahamEssaysLarge
│ │ ├── addiction.txt
│ │ ├── aord.txt
│ │ ├── apple.txt
│ │ ├── avg.txt
│ │ └── before.txt
├── pocketflow-a2a
│ ├── README.md
│ ├── a2a_client.py
│ ├── a2a_server.py
│ ├── common
│ │ ├── __init__.py
│ │ ├── client
│ │ │ ├── __init__.py
│ │ │ ├── card_resolver.py
│ │ │ └── client.py
│ │ ├── server
│ │ │ ├── __init__.py
│ │ │ ├── server.py
│ │ │ ├── task_manager.py
│ │ │ └── utils.py
│ │ ├── types.py
│ │ └── utils
│ │ │ ├── in_memory_cache.py
│ │ │ └── push_notification_auth.py
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ ├── task_manager.py
│ └── utils.py
├── pocketflow-agent
│ ├── README.md
│ ├── demo.ipynb
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ └── utils.py
├── pocketflow-async-basic
│ ├── README.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ └── utils.py
├── pocketflow-batch-flow
│ ├── README.md
│ ├── flow.py
│ ├── images
│ │ ├── bird.jpg
│ │ ├── cat.jpg
│ │ └── dog.jpg
│ ├── main.py
│ ├── nodes.py
│ ├── output
│ │ ├── bird_blur.jpg
│ │ ├── bird_grayscale.jpg
│ │ ├── bird_sepia.jpg
│ │ ├── cat_blur.jpg
│ │ ├── cat_grayscale.jpg
│ │ ├── cat_sepia.jpg
│ │ ├── dog_blur.jpg
│ │ ├── dog_grayscale.jpg
│ │ └── dog_sepia.jpg
│ └── requirements.txt
├── pocketflow-batch-node
│ ├── README.md
│ ├── data
│ │ └── sales.csv
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ └── requirements.txt
├── pocketflow-batch
│ ├── README.md
│ ├── main.py
│ ├── requirements.txt
│ ├── translations
│ │ ├── README_CHINESE.md
│ │ ├── README_FRENCH.md
│ │ ├── README_GERMAN.md
│ │ ├── README_JAPANESE.md
│ │ ├── README_KOREAN.md
│ │ ├── README_PORTUGUESE.md
│ │ ├── README_RUSSIAN.md
│ │ └── README_SPANISH.md
│ └── utils.py
├── pocketflow-chat-guardrail
│ ├── README.md
│ ├── main.py
│ ├── requirements.txt
│ └── utils.py
├── pocketflow-chat-memory
│ ├── README.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ └── utils
│ │ ├── __init__.py
│ │ ├── call_llm.py
│ │ ├── get_embedding.py
│ │ └── vector_index.py
├── pocketflow-chat
│ ├── README.md
│ ├── main.py
│ ├── requirements.txt
│ └── utils.py
├── pocketflow-cli-hitl
│ ├── README.md
│ ├── docs
│ │ └── design.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ └── utils
│ │ ├── __init__.py
│ │ └── call_llm.py
├── pocketflow-code-generator
│ ├── README.md
│ ├── doc
│ │ └── design.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ └── utils
│ │ ├── __init__.py
│ │ ├── call_llm.py
│ │ └── code_executor.py
├── pocketflow-communication
│ ├── README.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ └── requirements.txt
├── pocketflow-fastapi-background
│ ├── README.md
│ ├── assets
│ │ └── banner.png
│ ├── docs
│ │ └── design.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ ├── static
│ │ ├── index.html
│ │ └── progress.html
│ └── utils
│ │ ├── __init__.py
│ │ └── call_llm.py
├── pocketflow-fastapi-hitl
│ ├── README.md
│ ├── assets
│ │ └── banner.png
│ ├── docs
│ │ └── design.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ ├── server.py
│ ├── static
│ │ └── style.css
│ ├── templates
│ │ └── index.html
│ └── utils
│ │ ├── __init__.py
│ │ └── process_task.py
├── pocketflow-fastapi-websocket
│ ├── README.md
│ ├── assets
│ │ └── banner.png
│ ├── docs
│ │ └── design.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ ├── static
│ │ └── index.html
│ └── utils
│ │ ├── __init__.py
│ │ └── stream_llm.py
├── pocketflow-flow
│ ├── README.md
│ ├── flow.py
│ ├── main.py
│ └── requirements.txt
├── pocketflow-google-calendar
│ ├── .env.exemplo
│ ├── .gitignore
│ ├── Pipfile
│ ├── README.md
│ ├── main.py
│ ├── nodes.py
│ └── utils
│ │ ├── __init__.py
│ │ └── google_calendar.py
├── pocketflow-gradio-hitl
│ ├── README.md
│ ├── assets
│ │ ├── book_hotel.png
│ │ ├── change_intention.png
│ │ └── flow_visualization.png
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ └── utils
│ │ ├── call_llm.py
│ │ ├── call_mock_api.py
│ │ ├── conversation.py
│ │ └── format_chat_history.py
├── pocketflow-hello-world
│ ├── README.md
│ ├── docs
│ │ └── design.md
│ ├── flow.py
│ ├── main.py
│ └── utils
│ │ ├── __init__.py
│ │ └── call_llm.py
├── pocketflow-llm-streaming
│ ├── README.md
│ ├── main.py
│ └── utils.py
├── pocketflow-majority-vote
│ ├── README.md
│ ├── main.py
│ ├── requirements.txt
│ └── utils.py
├── pocketflow-map-reduce
│ ├── README.md
│ ├── data
│ │ ├── resume1.txt
│ │ ├── resume2.txt
│ │ ├── resume3.txt
│ │ ├── resume4.txt
│ │ └── resume5.txt
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ └── utils.py
├── pocketflow-mcp
│ ├── README.md
│ ├── main.py
│ ├── requirements.txt
│ ├── simple_server.py
│ └── utils.py
├── pocketflow-multi-agent
│ ├── README.md
│ ├── main.py
│ ├── requirements.txt
│ └── utils.py
├── pocketflow-nested-batch
│ ├── README.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ └── school
│ │ ├── class_a
│ │ ├── student1.txt
│ │ └── student2.txt
│ │ └── class_b
│ │ ├── student3.txt
│ │ └── student4.txt
├── pocketflow-node
│ ├── README.md
│ ├── flow.py
│ ├── main.py
│ ├── requirements.txt
│ └── utils
│ │ └── call_llm.py
├── pocketflow-parallel-batch-flow
│ ├── README.md
│ ├── flow.py
│ ├── images
│ │ ├── bird.jpg
│ │ ├── cat.jpg
│ │ └── dog.jpg
│ ├── main.py
│ ├── nodes.py
│ ├── output
│ │ ├── bird_blur.jpg
│ │ ├── bird_grayscale.jpg
│ │ ├── bird_sepia.jpg
│ │ ├── cat_blur.jpg
│ │ ├── cat_grayscale.jpg
│ │ ├── cat_sepia.jpg
│ │ ├── dog_blur.jpg
│ │ ├── dog_grayscale.jpg
│ │ └── dog_sepia.jpg
│ └── requirements.txt
├── pocketflow-parallel-batch
│ ├── README.md
│ ├── main.py
│ ├── requirements.txt
│ ├── translations
│ │ ├── README_CHINESE.md
│ │ ├── README_FRENCH.md
│ │ ├── README_GERMAN.md
│ │ ├── README_JAPANESE.md
│ │ ├── README_KOREAN.md
│ │ ├── README_PORTUGUESE.md
│ │ ├── README_RUSSIAN.md
│ │ └── README_SPANISH.md
│ └── utils.py
├── pocketflow-rag
│ ├── README.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ └── utils.py
├── pocketflow-streamlit-fsm
│ ├── README.md
│ ├── app.py
│ ├── assets
│ │ └── banner.png
│ ├── docs
│ │ └── design.md
│ ├── flow.py
│ ├── nodes.py
│ ├── requirements.txt
│ └── utils
│ │ ├── __init__.py
│ │ ├── generate_image.py
│ │ └── test_generated_image.png
├── pocketflow-structured-output
│ ├── README.md
│ ├── data.txt
│ ├── main.py
│ ├── requirements.txt
│ └── utils.py
├── pocketflow-supervisor
│ ├── README.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ └── utils.py
├── pocketflow-text2sql
│ ├── README.md
│ ├── docs
│ │ └── design.md
│ ├── ecommerce.db
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── populate_db.py
│ ├── requirements.txt
│ └── utils
│ │ └── call_llm.py
├── pocketflow-thinking
│ ├── README.md
│ ├── design.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ └── utils.py
├── pocketflow-tool-crawler
│ ├── README.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ ├── tools
│ │ ├── crawler.py
│ │ └── parser.py
│ └── utils
│ │ ├── __init__.py
│ │ └── call_llm.py
├── pocketflow-tool-database
│ ├── README.md
│ ├── example.db
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ ├── tools
│ │ └── database.py
│ └── utils
│ │ └── __init__.py
├── pocketflow-tool-embeddings
│ ├── README.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ ├── tools
│ │ └── embeddings.py
│ └── utils
│ │ ├── __init__.py
│ │ └── call_llm.py
├── pocketflow-tool-pdf-vision
│ ├── README.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── pdfs
│ │ └── pocket-flow.pdf
│ ├── requirements.txt
│ ├── tools
│ │ ├── pdf.py
│ │ └── vision.py
│ └── utils
│ │ ├── __init__.py
│ │ └── call_llm.py
├── pocketflow-tool-search
│ ├── README.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ ├── tools
│ │ ├── parser.py
│ │ └── search.py
│ └── utils
│ │ ├── __init__.py
│ │ └── call_llm.py
├── pocketflow-visualization
│ ├── README.md
│ ├── async_flow.py
│ ├── visualize.py
│ └── viz
│ │ ├── flow_visualization.html
│ │ └── flow_visualization.json
├── pocketflow-voice-chat
│ ├── README.md
│ ├── docs
│ │ └── design.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ └── utils
│ │ ├── __init__.py
│ │ ├── audio_utils.py
│ │ ├── call_llm.py
│ │ ├── speech_to_text.py
│ │ ├── text_to_speech.py
│ │ └── tts_output.mp3
├── pocketflow-workflow
│ ├── README.md
│ ├── flow.py
│ ├── main.py
│ ├── nodes.py
│ ├── requirements.txt
│ └── utils
│ │ └── call_llm.py
└── pocketflow_demo.ipynb
├── docs
├── _config.yml
├── core_abstraction
│ ├── async.md
│ ├── batch.md
│ ├── communication.md
│ ├── flow.md
│ ├── index.md
│ ├── node.md
│ └── parallel.md
├── design_pattern
│ ├── agent.md
│ ├── index.md
│ ├── mapreduce.md
│ ├── multi_agent.md
│ ├── rag.md
│ ├── structure.md
│ └── workflow.md
├── guide.md
├── index.md
└── utility_function
│ ├── chunking.md
│ ├── embedding.md
│ ├── index.md
│ ├── llm.md
│ ├── text_to_speech.md
│ ├── vector.md
│ ├── viz.md
│ └── websearch.md
├── pocketflow
└── __init__.py
├── setup.py
├── tests
├── test_async_batch_flow.py
├── test_async_batch_node.py
├── test_async_flow.py
├── test_async_parallel_batch_flow.py
├── test_async_parallel_batch_node.py
├── test_batch_flow.py
├── test_batch_node.py
├── test_fall_back.py
├── test_flow_basic.py
└── test_flow_composition.py
└── utils
└── update_pocketflow_mdc.py
/.cursor/rules/core_abstraction/async.mdc:
--------------------------------------------------------------------------------
1 | ---
2 | description: Guidelines for using PocketFlow, Core Abstraction, (Advanced) Async
3 | globs:
4 | alwaysApply: false
5 | ---
6 | # (Advanced) Async
7 |
8 | **Async** Nodes implement `prep_async()`, `exec_async()`, `exec_fallback_async()`, and/or `post_async()`. This is useful for:
9 |
10 | 1. **prep_async()**: For *fetching/reading data (files, APIs, DB)* in an I/O-friendly way.
11 | 2. **exec_async()**: Typically used for async LLM calls.
12 | 3. **post_async()**: For *awaiting user feedback*, *coordinating across multi-agents* or any additional async steps after `exec_async()`.
13 |
14 | **Note**: `AsyncNode` must be wrapped in `AsyncFlow`. `AsyncFlow` can also include regular (sync) nodes.
15 |
16 | ### Example
17 |
18 | ```python
19 | class SummarizeThenVerify(AsyncNode):
20 | async def prep_async(self, shared):
21 | # Example: read a file asynchronously
22 | doc_text = await read_file_async(shared["doc_path"])
23 | return doc_text
24 |
25 | async def exec_async(self, prep_res):
26 | # Example: async LLM call
27 | summary = await call_llm_async(f"Summarize: {prep_res}")
28 | return summary
29 |
30 | async def post_async(self, shared, prep_res, exec_res):
31 | # Example: wait for user feedback
32 | decision = await gather_user_feedback(exec_res)
33 | if decision == "approve":
34 | shared["summary"] = exec_res
35 | return "approve"
36 | return "deny"
37 |
38 | summarize_node = SummarizeThenVerify()
39 | final_node = Finalize()
40 |
41 | # Define transitions
42 | summarize_node - "approve" >> final_node
43 | summarize_node - "deny" >> summarize_node # retry
44 |
45 | flow = AsyncFlow(start=summarize_node)
46 |
47 | async def main():
48 | shared = {"doc_path": "document.txt"}
49 | await flow.run_async(shared)
50 | print("Final Summary:", shared.get("summary"))
51 |
52 | asyncio.run(main())
53 | ```
--------------------------------------------------------------------------------
/.cursor/rules/design_pattern/workflow.mdc:
--------------------------------------------------------------------------------
1 | ---
2 | description: Guidelines for using PocketFlow, Design Pattern, Workflow
3 | globs:
4 | alwaysApply: false
5 | ---
6 | # Workflow
7 |
8 | Many real-world tasks are too complex for one LLM call. The solution is to **Task Decomposition**: decompose them into a [chain](../core_abstraction/flow.md) of multiple Nodes.
9 |
10 |
11 |
12 | > - You don't want to make each task **too coarse**, because it may be *too complex for one LLM call*.
13 | > - You don't want to make each task **too granular**, because then *the LLM call doesn't have enough context* and results are *not consistent across nodes*.
14 | >
15 | > You usually need multiple *iterations* to find the *sweet spot*. If the task has too many *edge cases*, consider using [Agents](mdc:./agent.md).
16 | {: .best-practice }
17 |
18 | ### Example: Article Writing
19 |
20 | ```python
21 | class GenerateOutline(Node):
22 | def prep(self, shared): return shared["topic"]
23 | def exec(self, topic): return call_llm(f"Create a detailed outline for an article about {topic}")
24 | def post(self, shared, prep_res, exec_res): shared["outline"] = exec_res
25 |
26 | class WriteSection(Node):
27 | def prep(self, shared): return shared["outline"]
28 | def exec(self, outline): return call_llm(f"Write content based on this outline: {outline}")
29 | def post(self, shared, prep_res, exec_res): shared["draft"] = exec_res
30 |
31 | class ReviewAndRefine(Node):
32 | def prep(self, shared): return shared["draft"]
33 | def exec(self, draft): return call_llm(f"Review and improve this draft: {draft}")
34 | def post(self, shared, prep_res, exec_res): shared["final_article"] = exec_res
35 |
36 | # Connect nodes
37 | outline = GenerateOutline()
38 | write = WriteSection()
39 | review = ReviewAndRefine()
40 |
41 | outline >> write >> review
42 |
43 | # Create and run flow
44 | writing_flow = Flow(start=outline)
45 | shared = {"topic": "AI Safety"}
46 | writing_flow.run(shared)
47 | ```
48 |
49 | For *dynamic cases*, consider using [Agents](mdc:./agent.md).
--------------------------------------------------------------------------------
/.cursor/rules/utility_function/chunking.mdc:
--------------------------------------------------------------------------------
1 | ---
2 | description: Guidelines for using PocketFlow, Utility Function, Text Chunking
3 | globs:
4 | alwaysApply: false
5 | ---
6 | # Text Chunking
7 |
8 | We recommend some implementations of commonly used text chunking approaches.
9 |
10 |
11 | > Text Chunking is more a micro optimization, compared to the Flow Design.
12 | >
13 | > It's recommended to start with the Naive Chunking and optimize later.
14 | {: .best-practice }
15 |
16 | ---
17 |
18 | ## Example Python Code Samples
19 |
20 | ### 1. Naive (Fixed-Size) Chunking
21 | Splits text by a fixed number of words, ignoring sentence or semantic boundaries.
22 |
23 | ```python
24 | def fixed_size_chunk(text, chunk_size=100):
25 | chunks = []
26 | for i in range(0, len(text), chunk_size):
27 | chunks.append(text[i : i + chunk_size])
28 | return chunks
29 | ```
30 |
31 | However, sentences are often cut awkwardly, losing coherence.
32 |
33 | ### 2. Sentence-Based Chunking
34 |
35 | ```python
36 | import nltk
37 |
38 | def sentence_based_chunk(text, max_sentences=2):
39 | sentences = nltk.sent_tokenize(text)
40 | chunks = []
41 | for i in range(0, len(sentences), max_sentences):
42 | chunks.append(" ".join(sentences[i : i + max_sentences]))
43 | return chunks
44 | ```
45 |
46 | However, might not handle very long sentences or paragraphs well.
47 |
48 | ### 3. Other Chunking
49 |
50 | - **Paragraph-Based**: Split text by paragraphs (e.g., newlines). Large paragraphs can create big chunks.
51 | - **Semantic**: Use embeddings or topic modeling to chunk by semantic boundaries.
52 | - **Agentic**: Use an LLM to decide chunk boundaries based on context or meaning.
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # OS generated files
2 | .DS_Store
3 | .DS_Store?
4 | ._*
5 | .Spotlight-V100
6 | .Trashes
7 | ehthumbs.db
8 | Thumbs.db
9 |
10 |
11 | # IDE specific files
12 | .idea/
13 | .vscode/
14 | *.swp
15 | *.swo
16 | *~
17 |
18 | # Node
19 | node_modules/
20 | npm-debug.log
21 | yarn-debug.log
22 | yarn-error.log
23 | .env
24 | .env.local
25 | .env.development.local
26 | .env.test.local
27 | .env.production.local
28 |
29 | # Python
30 | __pycache__/
31 | *.py[cod]
32 | *$py.class
33 | *.so
34 | .Python
35 | build/
36 | develop-eggs/
37 | dist/
38 | downloads/
39 | eggs/
40 | .eggs/
41 | lib/
42 | lib64/
43 | parts/
44 | sdist/
45 | var/
46 | wheels/
47 | *.egg-info/
48 | .installed.cfg
49 | *.egg
50 | venv/
51 | ENV/
52 |
53 | # Logs and databases
54 | *.log
55 | *.sql
56 | *.sqlite
57 |
58 | # Build output
59 | dist/
60 | build/
61 | out/
62 |
63 | # Coverage reports
64 | coverage/
65 | .coverage
66 | .coverage.*
67 | htmlcov/
68 |
69 | # Misc
70 | *.bak
71 | *.tmp
72 | *.temp
73 |
74 |
75 | test.ipynb
76 | .pytest_cache/
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Zachary Huang
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-a2a/common/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-a2a/common/__init__.py
--------------------------------------------------------------------------------
/cookbook/pocketflow-a2a/common/client/__init__.py:
--------------------------------------------------------------------------------
1 | from .client import A2AClient
2 | from .card_resolver import A2ACardResolver
3 |
4 | __all__ = ["A2AClient", "A2ACardResolver"]
5 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-a2a/common/client/card_resolver.py:
--------------------------------------------------------------------------------
1 | import httpx
2 | from common.types import (
3 | AgentCard,
4 | A2AClientJSONError,
5 | )
6 | import json
7 |
8 |
9 | class A2ACardResolver:
10 | def __init__(self, base_url, agent_card_path="/.well-known/agent.json"):
11 | self.base_url = base_url.rstrip("/")
12 | self.agent_card_path = agent_card_path.lstrip("/")
13 |
14 | def get_agent_card(self) -> AgentCard:
15 | with httpx.Client() as client:
16 | response = client.get(self.base_url + "/" + self.agent_card_path)
17 | response.raise_for_status()
18 | try:
19 | return AgentCard(**response.json())
20 | except json.JSONDecodeError as e:
21 | raise A2AClientJSONError(str(e)) from e
22 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-a2a/common/server/__init__.py:
--------------------------------------------------------------------------------
1 | from .server import A2AServer
2 | from .task_manager import TaskManager, InMemoryTaskManager
3 |
4 | __all__ = ["A2AServer", "TaskManager", "InMemoryTaskManager"]
5 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-a2a/common/server/utils.py:
--------------------------------------------------------------------------------
1 | from common.types import (
2 | JSONRPCResponse,
3 | ContentTypeNotSupportedError,
4 | UnsupportedOperationError,
5 | )
6 | from typing import List
7 |
8 |
9 | def are_modalities_compatible(
10 | server_output_modes: List[str], client_output_modes: List[str]
11 | ):
12 | """Modalities are compatible if they are both non-empty
13 | and there is at least one common element."""
14 | if client_output_modes is None or len(client_output_modes) == 0:
15 | return True
16 |
17 | if server_output_modes is None or len(server_output_modes) == 0:
18 | return True
19 |
20 | return any(x in server_output_modes for x in client_output_modes)
21 |
22 |
23 | def new_incompatible_types_error(request_id):
24 | return JSONRPCResponse(id=request_id, error=ContentTypeNotSupportedError())
25 |
26 |
27 | def new_not_implemented_error(request_id):
28 | return JSONRPCResponse(id=request_id, error=UnsupportedOperationError())
29 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-a2a/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import DecideAction, SearchWeb, AnswerQuestion
3 |
4 | def create_agent_flow():
5 | """
6 | Create and connect the nodes to form a complete agent flow.
7 |
8 | The flow works like this:
9 | 1. DecideAction node decides whether to search or answer
10 | 2. If search, go to SearchWeb node
11 | 3. If answer, go to AnswerQuestion node
12 | 4. After SearchWeb completes, go back to DecideAction
13 |
14 | Returns:
15 | Flow: A complete research agent flow
16 | """
17 | # Create instances of each node
18 | decide = DecideAction()
19 | search = SearchWeb()
20 | answer = AnswerQuestion()
21 |
22 | # Connect the nodes
23 | # If DecideAction returns "search", go to SearchWeb
24 | decide - "search" >> search
25 |
26 | # If DecideAction returns "answer", go to AnswerQuestion
27 | decide - "answer" >> answer
28 |
29 | # After SearchWeb completes and returns "decide", go back to DecideAction
30 | search - "decide" >> decide
31 |
32 | # Create and return the flow, starting with the DecideAction node
33 | return Flow(start=decide)
--------------------------------------------------------------------------------
/cookbook/pocketflow-a2a/main.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from flow import create_agent_flow
3 |
4 | def main():
5 | """Simple function to process a question."""
6 | # Default question
7 | default_question = "Who won the Nobel Prize in Physics 2024?"
8 |
9 | # Get question from command line if provided with --
10 | question = default_question
11 | for arg in sys.argv[1:]:
12 | if arg.startswith("--"):
13 | question = arg[2:]
14 | break
15 |
16 | # Create the agent flow
17 | agent_flow = create_agent_flow()
18 |
19 | # Process the question
20 | shared = {"question": question}
21 | print(f"🤔 Processing question: {question}")
22 | agent_flow.run(shared)
23 | print("\n🎯 Final Answer:")
24 | print(shared.get("answer", "No answer found"))
25 |
26 | if __name__ == "__main__":
27 | main()
--------------------------------------------------------------------------------
/cookbook/pocketflow-a2a/requirements.txt:
--------------------------------------------------------------------------------
1 | # For PocketFlow Agent Logic
2 | pocketflow>=0.0.1
3 | openai>=1.0.0
4 | duckduckgo-search>=7.5.2
5 | pyyaml>=5.1
6 |
7 | # For A2A Server Infrastructure (from common)
8 | starlette>=0.37.2,<0.38.0
9 | uvicorn[standard]>=0.29.0,<0.30.0
10 | sse-starlette>=1.8.2,<2.0.0
11 | pydantic>=2.0.0,<3.0.0
12 | httpx>=0.27.0,<0.28.0
13 | anyio>=3.0.0,<5.0.0 # Dependency of starlette/httpx
14 |
15 | # For running __main__.py
16 | click>=8.0.0,<9.0.0
17 |
18 | # For A2A Client
19 | httpx>=0.27.0,<0.28.0
20 | httpx-sse>=0.4.0
21 | asyncclick>=8.1.8 # Or just 'click' if you prefer asyncio.run
22 | pydantic>=2.0.0,<3.0.0 # For common.types
--------------------------------------------------------------------------------
/cookbook/pocketflow-a2a/utils.py:
--------------------------------------------------------------------------------
1 | from openai import OpenAI
2 | import os
3 | from duckduckgo_search import DDGS
4 |
5 | def call_llm(prompt):
6 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
7 | r = client.chat.completions.create(
8 | model="gpt-4o",
9 | messages=[{"role": "user", "content": prompt}]
10 | )
11 | return r.choices[0].message.content
12 |
13 | def search_web(query):
14 | results = DDGS().text(query, max_results=5)
15 | # Convert results to a string
16 | results_str = "\n\n".join([f"Title: {r['title']}\nURL: {r['href']}\nSnippet: {r['body']}" for r in results])
17 | return results_str
18 |
19 | if __name__ == "__main__":
20 | print("## Testing call_llm")
21 | prompt = "In a few words, what is the meaning of life?"
22 | print(f"## Prompt: {prompt}")
23 | response = call_llm(prompt)
24 | print(f"## Response: {response}")
25 |
26 | print("## Testing search_web")
27 | query = "Who won the Nobel Prize in Physics 2024?"
28 | print(f"## Query: {query}")
29 | results = search_web(query)
30 | print(f"## Results: {results}")
--------------------------------------------------------------------------------
/cookbook/pocketflow-agent/README.md:
--------------------------------------------------------------------------------
1 | # Research Agent
2 |
3 | This project demonstrates a simple yet powerful LLM-powered research agent. This implementation is based directly on the tutorial: [LLM Agents are simply Graph — Tutorial For Dummies](https://zacharyhuang.substack.com/p/llm-agent-internal-as-a-graph-tutorial).
4 |
5 | 👉 Run the tutorial in your browser: [Try Google Colab Notebook](
6 | https://colab.research.google.com/github/The-Pocket/PocketFlow/blob/main/cookbook/pocketflow-agent/demo.ipynb)
7 |
8 | ## Features
9 |
10 | - Performs web searches to gather information
11 | - Makes decisions about when to search vs. when to answer
12 | - Generates comprehensive answers based on research findings
13 |
14 | ## Getting Started
15 |
16 | 1. Install the packages you need with this simple command:
17 | ```bash
18 | pip install -r requirements.txt
19 | ```
20 |
21 | 2. Let's get your OpenAI API key ready:
22 |
23 | ```bash
24 | export OPENAI_API_KEY="your-api-key-here"
25 | ```
26 |
27 | 3. Let's do a quick check to make sure your API key is working properly:
28 |
29 | ```bash
30 | python utils.py
31 | ```
32 |
33 | This will test both the LLM call and web search features. If you see responses, you're good to go!
34 |
35 | 4. Try out the agent with the default question (about Nobel Prize winners):
36 |
37 | ```bash
38 | python main.py
39 | ```
40 |
41 | 5. Got a burning question? Ask anything you want by using the `--` prefix:
42 |
43 | ```bash
44 | python main.py --"What is quantum computing?"
45 | ```
46 |
47 | ## How It Works?
48 |
49 | The magic happens through a simple but powerful graph structure with three main parts:
50 |
51 | ```mermaid
52 | graph TD
53 | A[DecideAction] -->|"search"| B[SearchWeb]
54 | A -->|"answer"| C[AnswerQuestion]
55 | B -->|"decide"| A
56 | ```
57 |
58 | Here's what each part does:
59 | 1. **DecideAction**: The brain that figures out whether to search or answer
60 | 2. **SearchWeb**: The researcher that goes out and finds information
61 | 3. **AnswerQuestion**: The writer that crafts the final answer
62 |
63 | Here's what's in each file:
64 | - [`main.py`](./main.py): The starting point - runs the whole show!
65 | - [`flow.py`](./flow.py): Connects everything together into a smart agent
66 | - [`nodes.py`](./nodes.py): The building blocks that make decisions and take actions
67 | - [`utils.py`](./utils.py): Helper functions for talking to the LLM and searching the web
68 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-agent/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import DecideAction, SearchWeb, AnswerQuestion
3 |
4 | def create_agent_flow():
5 | """
6 | Create and connect the nodes to form a complete agent flow.
7 |
8 | The flow works like this:
9 | 1. DecideAction node decides whether to search or answer
10 | 2. If search, go to SearchWeb node
11 | 3. If answer, go to AnswerQuestion node
12 | 4. After SearchWeb completes, go back to DecideAction
13 |
14 | Returns:
15 | Flow: A complete research agent flow
16 | """
17 | # Create instances of each node
18 | decide = DecideAction()
19 | search = SearchWeb()
20 | answer = AnswerQuestion()
21 |
22 | # Connect the nodes
23 | # If DecideAction returns "search", go to SearchWeb
24 | decide - "search" >> search
25 |
26 | # If DecideAction returns "answer", go to AnswerQuestion
27 | decide - "answer" >> answer
28 |
29 | # After SearchWeb completes and returns "decide", go back to DecideAction
30 | search - "decide" >> decide
31 |
32 | # Create and return the flow, starting with the DecideAction node
33 | return Flow(start=decide)
--------------------------------------------------------------------------------
/cookbook/pocketflow-agent/main.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from flow import create_agent_flow
3 |
4 | def main():
5 | """Simple function to process a question."""
6 | # Default question
7 | default_question = "Who won the Nobel Prize in Physics 2024?"
8 |
9 | # Get question from command line if provided with --
10 | question = default_question
11 | for arg in sys.argv[1:]:
12 | if arg.startswith("--"):
13 | question = arg[2:]
14 | break
15 |
16 | # Create the agent flow
17 | agent_flow = create_agent_flow()
18 |
19 | # Process the question
20 | shared = {"question": question}
21 | print(f"🤔 Processing question: {question}")
22 | agent_flow.run(shared)
23 | print("\n🎯 Final Answer:")
24 | print(shared.get("answer", "No answer found"))
25 |
26 | if __name__ == "__main__":
27 | main()
--------------------------------------------------------------------------------
/cookbook/pocketflow-agent/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.1
2 | aiohttp>=3.8.0 # For HTTP requests
3 | openai>=1.0.0 # For LLM calls
4 | duckduckgo-search>=7.5.2 # For web search
5 | requests>=2.25.1 # For HTTP requests
--------------------------------------------------------------------------------
/cookbook/pocketflow-agent/utils.py:
--------------------------------------------------------------------------------
1 | from openai import OpenAI
2 | import os
3 | from duckduckgo_search import DDGS
4 | import requests
5 |
6 | def call_llm(prompt):
7 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
8 | r = client.chat.completions.create(
9 | model="gpt-4o",
10 | messages=[{"role": "user", "content": prompt}]
11 | )
12 | return r.choices[0].message.content
13 |
14 | def search_web_duckduckgo(query):
15 | results = DDGS().text(query, max_results=5)
16 | # Convert results to a string
17 | results_str = "\n\n".join([f"Title: {r['title']}\nURL: {r['href']}\nSnippet: {r['body']}" for r in results])
18 | return results_str
19 |
20 | def search_web_brave(query):
21 |
22 | url = f"https://api.search.brave.com/res/v1/web/search?q={query}"
23 | api_key = "your brave search api key"
24 |
25 | headers = {
26 | "accept": "application/json",
27 | "Accept-Encoding": "gzip",
28 | "x-subscription-token": api_key
29 | }
30 |
31 | response = requests.get(url, headers=headers)
32 |
33 | if response.status_code == 200:
34 | data = response.json()
35 | results = data['web']['results']
36 | results_str = "\n\n".join([f"Title: {r['title']}\nURL: {r['url']}\nDescription: {r['description']}" for r in results])
37 | else:
38 | print(f"Request failed with status code: {response.status_code}")
39 | return results_str
40 |
41 | if __name__ == "__main__":
42 | print("## Testing call_llm")
43 | prompt = "In a few words, what is the meaning of life?"
44 | print(f"## Prompt: {prompt}")
45 | response = call_llm(prompt)
46 | print(f"## Response: {response}")
47 |
48 | print("## Testing search_web")
49 | query = "Who won the Nobel Prize in Physics 2024?"
50 | print(f"## Query: {query}")
51 | results = search_web(query)
52 | print(f"## Results: {results}")
--------------------------------------------------------------------------------
/cookbook/pocketflow-async-basic/flow.py:
--------------------------------------------------------------------------------
1 | """AsyncFlow implementation for recipe finder."""
2 |
3 | from pocketflow import AsyncFlow, Node
4 | from nodes import FetchRecipes, SuggestRecipe, GetApproval
5 |
6 | class NoOp(Node):
7 | """Node that does nothing, used to properly end the flow."""
8 | pass
9 |
10 | def create_flow():
11 | """Create and connect nodes into a flow."""
12 |
13 | # Create nodes
14 | fetch = FetchRecipes()
15 | suggest = SuggestRecipe()
16 | approve = GetApproval()
17 | end = NoOp()
18 |
19 | # Connect nodes
20 | fetch - "suggest" >> suggest
21 | suggest - "approve" >> approve
22 | approve - "retry" >> suggest # Loop back for another suggestion
23 | approve - "accept" >> end # Properly end the flow
24 |
25 | # Create flow starting with fetch
26 | flow = AsyncFlow(start=fetch)
27 | return flow
--------------------------------------------------------------------------------
/cookbook/pocketflow-async-basic/main.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from flow import create_flow
3 |
4 | async def main():
5 | """Run the recipe finder flow."""
6 | # Create flow
7 | flow = create_flow()
8 |
9 | # Create shared store
10 | shared = {}
11 |
12 | # Run flow
13 | print("\nWelcome to Recipe Finder!")
14 | print("------------------------")
15 | await flow.run_async(shared)
16 | print("\nThanks for using Recipe Finder!")
17 |
18 | if __name__ == "__main__":
19 | # Run the async main function
20 | asyncio.run(main())
--------------------------------------------------------------------------------
/cookbook/pocketflow-async-basic/nodes.py:
--------------------------------------------------------------------------------
1 | from pocketflow import AsyncNode
2 | from utils import fetch_recipes, call_llm_async, get_user_input
3 |
4 | class FetchRecipes(AsyncNode):
5 | """AsyncNode that fetches recipes."""
6 |
7 | async def prep_async(self, shared):
8 | """Get ingredient from user."""
9 | ingredient = await get_user_input("Enter ingredient: ")
10 | return ingredient
11 |
12 | async def exec_async(self, ingredient):
13 | """Fetch recipes asynchronously."""
14 | recipes = await fetch_recipes(ingredient)
15 | return recipes
16 |
17 | async def post_async(self, shared, prep_res, recipes):
18 | """Store recipes and continue."""
19 | shared["recipes"] = recipes
20 | shared["ingredient"] = prep_res
21 | return "suggest"
22 |
23 | class SuggestRecipe(AsyncNode):
24 | """AsyncNode that suggests a recipe using LLM."""
25 |
26 | async def prep_async(self, shared):
27 | """Get recipes from shared store."""
28 | return shared["recipes"]
29 |
30 | async def exec_async(self, recipes):
31 | """Get suggestion from LLM."""
32 | suggestion = await call_llm_async(
33 | f"Choose best recipe from: {', '.join(recipes)}"
34 | )
35 | return suggestion
36 |
37 | async def post_async(self, shared, prep_res, suggestion):
38 | """Store suggestion and continue."""
39 | shared["suggestion"] = suggestion
40 | return "approve"
41 |
42 | class GetApproval(AsyncNode):
43 | """AsyncNode that gets user approval."""
44 |
45 | async def prep_async(self, shared):
46 | """Get current suggestion."""
47 | return shared["suggestion"]
48 |
49 | async def exec_async(self, suggestion):
50 | """Ask for user approval."""
51 | answer = await get_user_input(f"\nAccept this recipe? (y/n): ")
52 | return answer
53 |
54 | async def post_async(self, shared, prep_res, answer):
55 | """Handle user's decision."""
56 | if answer == "y":
57 | print("\nGreat choice! Here's your recipe...")
58 | print(f"Recipe: {shared['suggestion']}")
59 | print(f"Ingredient: {shared['ingredient']}")
60 | return "accept"
61 | else:
62 | print("\nLet's try another recipe...")
63 | return "retry"
--------------------------------------------------------------------------------
/cookbook/pocketflow-async-basic/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow
2 | aiohttp>=3.8.0 # For async HTTP requests
3 | openai>=1.0.0 # For async LLM calls
--------------------------------------------------------------------------------
/cookbook/pocketflow-async-basic/utils.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import aiohttp
3 | from openai import AsyncOpenAI
4 |
5 | async def fetch_recipes(ingredient):
6 | """Fetch recipes from an API asynchronously."""
7 | print(f"Fetching recipes for {ingredient}...")
8 |
9 | # Simulate API call with delay
10 | await asyncio.sleep(1)
11 |
12 | # Mock recipes (in real app, would fetch from API)
13 | recipes = [
14 | f"{ingredient} Stir Fry",
15 | f"Grilled {ingredient} with Herbs",
16 | f"Baked {ingredient} with Vegetables"
17 | ]
18 |
19 | print(f"Found {len(recipes)} recipes.")
20 |
21 | return recipes
22 |
23 | async def call_llm_async(prompt):
24 | """Make async LLM call."""
25 | print("\nSuggesting best recipe...")
26 |
27 | # Simulate LLM call with delay
28 | await asyncio.sleep(1)
29 |
30 | # Mock LLM response (in real app, would call OpenAI)
31 | recipes = prompt.split(": ")[1].split(", ")
32 | suggestion = recipes[1] # Always suggest second recipe
33 |
34 | print(f"How about: {suggestion}")
35 | return suggestion
36 |
37 | async def get_user_input(prompt):
38 | """Get user input asynchronously."""
39 | # Create event loop to handle async input
40 | loop = asyncio.get_event_loop()
41 |
42 | # Get input in a non-blocking way
43 | answer = await loop.run_in_executor(None, input, prompt)
44 |
45 | return answer.lower()
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-flow/README.md:
--------------------------------------------------------------------------------
1 | # PocketFlow BatchFlow Example
2 |
3 | This example demonstrates the BatchFlow concept in PocketFlow by implementing an image processor that applies different filters to multiple images.
4 |
5 | ## What this Example Demonstrates
6 |
7 | - How to use BatchFlow to run a Flow multiple times with different parameters
8 | - Key concepts of BatchFlow:
9 | 1. Creating a base Flow for single-item processing
10 | 2. Using BatchFlow to process multiple items with different parameters
11 | 3. Managing parameters across multiple Flow executions
12 |
13 | ## Project Structure
14 | ```
15 | pocketflow-batch-flow/
16 | ├── README.md
17 | ├── requirements.txt
18 | ├── images/
19 | │ ├── cat.jpg # Sample image 1
20 | │ ├── dog.jpg # Sample image 2
21 | │ └── bird.jpg # Sample image 3
22 | ├── main.py # Entry point
23 | ├── flow.py # Flow and BatchFlow definitions
24 | └── nodes.py # Node implementations for image processing
25 | ```
26 |
27 | ## How it Works
28 |
29 | The example processes multiple images with different filters:
30 |
31 | 1. **Base Flow**: Processes a single image
32 | - Load image
33 | - Apply filter (grayscale, blur, or sepia)
34 | - Save processed image
35 |
36 | 2. **BatchFlow**: Processes multiple image-filter combinations
37 | - Takes a list of parameters (image + filter combinations)
38 | - Runs the base Flow for each parameter set
39 | - Organizes output in a structured way
40 |
41 | ## Installation
42 |
43 | ```bash
44 | pip install -r requirements.txt
45 | ```
46 |
47 | ## Usage
48 |
49 | ```bash
50 | python main.py
51 | ```
52 |
53 | ## Sample Output
54 |
55 | ```
56 | Processing images with filters...
57 |
58 | Processing cat.jpg with grayscale filter...
59 | Processing cat.jpg with blur filter...
60 | Processing dog.jpg with sepia filter...
61 | ...
62 |
63 | All images processed successfully!
64 | Check the 'output' directory for results.
65 | ```
66 |
67 | ## Key Concepts Illustrated
68 |
69 | 1. **Parameter Management**: Shows how BatchFlow manages different parameter sets
70 | 2. **Flow Reuse**: Demonstrates running the same Flow multiple times
71 | 3. **Batch Processing**: Shows how to process multiple items efficiently
72 | 4. **Real-world Application**: Provides a practical example of batch processing
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-flow/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow, BatchFlow
2 | from nodes import LoadImage, ApplyFilter, SaveImage
3 |
4 | def create_base_flow():
5 | """Create the base Flow for processing a single image."""
6 | # Create nodes
7 | load = LoadImage()
8 | filter_node = ApplyFilter()
9 | save = SaveImage()
10 |
11 | # Connect nodes
12 | load - "apply_filter" >> filter_node
13 | filter_node - "save" >> save
14 |
15 | # Create and return flow
16 | return Flow(start=load)
17 |
18 | class ImageBatchFlow(BatchFlow):
19 | """BatchFlow for processing multiple images with different filters."""
20 |
21 | def prep(self, shared):
22 | """Generate parameters for each image-filter combination."""
23 | # List of images to process
24 | images = ["cat.jpg", "dog.jpg", "bird.jpg"]
25 |
26 | # List of filters to apply
27 | filters = ["grayscale", "blur", "sepia"]
28 |
29 | # Generate all combinations
30 | params = []
31 | for img in images:
32 | for f in filters:
33 | params.append({
34 | "input": img,
35 | "filter": f
36 | })
37 |
38 | return params
39 |
40 | def create_flow():
41 | """Create the complete batch processing flow."""
42 | # Create base flow for single image processing
43 | base_flow = create_base_flow()
44 |
45 | # Wrap in BatchFlow for multiple images
46 | batch_flow = ImageBatchFlow(start=base_flow)
47 |
48 | return batch_flow
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-flow/images/bird.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-batch-flow/images/bird.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-flow/images/cat.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-batch-flow/images/cat.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-flow/images/dog.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-batch-flow/images/dog.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-flow/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | from PIL import Image
3 | import numpy as np
4 | from flow import create_flow
5 |
6 | def main():
7 | # Create and run flow
8 | print("Processing images with filters...")
9 |
10 | flow = create_flow()
11 | flow.run({})
12 |
13 | print("\nAll images processed successfully!")
14 | print("Check the 'output' directory for results.")
15 |
16 | if __name__ == "__main__":
17 | main()
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-flow/output/bird_blur.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-batch-flow/output/bird_blur.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-flow/output/bird_grayscale.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-batch-flow/output/bird_grayscale.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-flow/output/bird_sepia.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-batch-flow/output/bird_sepia.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-flow/output/cat_blur.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-batch-flow/output/cat_blur.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-flow/output/cat_grayscale.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-batch-flow/output/cat_grayscale.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-flow/output/cat_sepia.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-batch-flow/output/cat_sepia.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-flow/output/dog_blur.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-batch-flow/output/dog_blur.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-flow/output/dog_grayscale.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-batch-flow/output/dog_grayscale.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-flow/output/dog_sepia.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-batch-flow/output/dog_sepia.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-flow/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow
2 | Pillow>=10.0.0
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-node/README.md:
--------------------------------------------------------------------------------
1 | # PocketFlow BatchNode Example
2 |
3 | This example demonstrates the BatchNode concept in PocketFlow by implementing a CSV processor that handles large files by processing them in chunks.
4 |
5 | ## What this Example Demonstrates
6 |
7 | - How to use BatchNode to process large inputs in chunks
8 | - The three key methods of BatchNode:
9 | 1. `prep`: Splits input into chunks
10 | 2. `exec`: Processes each chunk independently
11 | 3. `post`: Combines results from all chunks
12 |
13 | ## Project Structure
14 | ```
15 | pocketflow-batch-node/
16 | ├── README.md
17 | ├── requirements.txt
18 | ├── data/
19 | │ └── sales.csv # Sample large CSV file
20 | ├── main.py # Entry point
21 | ├── flow.py # Flow definition
22 | └── nodes.py # BatchNode implementation
23 | ```
24 |
25 | ## How it Works
26 |
27 | The example processes a large CSV file containing sales data:
28 |
29 | 1. **Chunking (prep)**: The CSV file is read and split into chunks of N rows
30 | 2. **Processing (exec)**: Each chunk is processed to calculate:
31 | - Total sales
32 | - Average sale value
33 | - Number of transactions
34 | 3. **Combining (post)**: Results from all chunks are aggregated into final statistics
35 |
36 | ## Installation
37 |
38 | ```bash
39 | pip install -r requirements.txt
40 | ```
41 |
42 | ## Usage
43 |
44 | ```bash
45 | python main.py
46 | ```
47 |
48 | ## Sample Output
49 |
50 | ```
51 | Processing sales.csv in chunks...
52 |
53 | Final Statistics:
54 | - Total Sales: $1,234,567.89
55 | - Average Sale: $123.45
56 | - Total Transactions: 10,000
57 | ```
58 |
59 | ## Key Concepts Illustrated
60 |
61 | 1. **Chunk-based Processing**: Shows how BatchNode handles large inputs by breaking them into manageable pieces
62 | 2. **Independent Processing**: Demonstrates how each chunk is processed separately
63 | 3. **Result Aggregation**: Shows how individual results are combined into a final output
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-node/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow, Node
2 | from nodes import CSVProcessor
3 |
4 | class ShowStats(Node):
5 | """Node to display the final statistics."""
6 |
7 | def prep(self, shared):
8 | """Get statistics from shared store."""
9 | return shared["statistics"]
10 |
11 | def post(self, shared, prep_res, exec_res):
12 | """Display the statistics."""
13 | stats = prep_res
14 | print("\nFinal Statistics:")
15 | print(f"- Total Sales: ${stats['total_sales']:,.2f}")
16 | print(f"- Average Sale: ${stats['average_sale']:,.2f}")
17 | print(f"- Total Transactions: {stats['total_transactions']:,}\n")
18 | return "end"
19 |
20 | def create_flow():
21 | """Create and return the processing flow."""
22 | # Create nodes
23 | processor = CSVProcessor(chunk_size=1000)
24 | show_stats = ShowStats()
25 |
26 | # Connect nodes
27 | processor - "show_stats" >> show_stats
28 |
29 | # Create and return flow
30 | return Flow(start=processor)
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-node/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | from flow import create_flow
3 |
4 | def main():
5 | """Run the batch processing example."""
6 | # Create data directory if it doesn't exist
7 | os.makedirs("data", exist_ok=True)
8 |
9 | # Create sample CSV if it doesn't exist
10 | if not os.path.exists("data/sales.csv"):
11 | print("Creating sample sales.csv...")
12 | import pandas as pd
13 | import numpy as np
14 |
15 | # Generate sample data
16 | np.random.seed(42)
17 | n_rows = 10000
18 | df = pd.DataFrame({
19 | "date": pd.date_range("2024-01-01", periods=n_rows),
20 | "amount": np.random.normal(100, 30, n_rows).round(2),
21 | "product": np.random.choice(["A", "B", "C"], n_rows)
22 | })
23 | df.to_csv("data/sales.csv", index=False)
24 |
25 | # Initialize shared store
26 | shared = {
27 | "input_file": "data/sales.csv"
28 | }
29 |
30 | # Create and run flow
31 | print(f"Processing sales.csv in chunks...")
32 | flow = create_flow()
33 | flow.run(shared)
34 |
35 | if __name__ == "__main__":
36 | main()
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-node/nodes.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | from pocketflow import BatchNode
3 |
4 | class CSVProcessor(BatchNode):
5 | """BatchNode that processes a large CSV file in chunks."""
6 |
7 | def __init__(self, chunk_size=1000):
8 | """Initialize with chunk size."""
9 | super().__init__()
10 | self.chunk_size = chunk_size
11 |
12 | def prep(self, shared):
13 | """Split CSV file into chunks.
14 |
15 | Returns an iterator of DataFrames, each containing chunk_size rows.
16 | """
17 | # Read CSV in chunks
18 | chunks = pd.read_csv(
19 | shared["input_file"],
20 | chunksize=self.chunk_size
21 | )
22 | return chunks
23 |
24 | def exec(self, chunk):
25 | """Process a single chunk of the CSV.
26 |
27 | Args:
28 | chunk: pandas DataFrame containing chunk_size rows
29 |
30 | Returns:
31 | dict: Statistics for this chunk
32 | """
33 | return {
34 | "total_sales": chunk["amount"].sum(),
35 | "num_transactions": len(chunk),
36 | "total_amount": chunk["amount"].sum()
37 | }
38 |
39 | def post(self, shared, prep_res, exec_res_list):
40 | """Combine results from all chunks.
41 |
42 | Args:
43 | prep_res: Original chunks iterator
44 | exec_res_list: List of results from each chunk
45 |
46 | Returns:
47 | str: Action to take next
48 | """
49 | # Combine statistics from all chunks
50 | total_sales = sum(res["total_sales"] for res in exec_res_list)
51 | total_transactions = sum(res["num_transactions"] for res in exec_res_list)
52 | total_amount = sum(res["total_amount"] for res in exec_res_list)
53 |
54 | # Calculate final statistics
55 | shared["statistics"] = {
56 | "total_sales": total_sales,
57 | "average_sale": total_amount / total_transactions,
58 | "total_transactions": total_transactions
59 | }
60 |
61 | return "show_stats"
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch-node/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow
2 | pandas>=2.0.0
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.1
2 | anthropic>=0.15.0
3 | pyyaml>=6.0
--------------------------------------------------------------------------------
/cookbook/pocketflow-batch/utils.py:
--------------------------------------------------------------------------------
1 | from anthropic import Anthropic
2 | import os
3 |
4 | def call_llm(prompt):
5 | client = Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY", "your-api-key"))
6 | response = client.messages.create(
7 | model="claude-3-7-sonnet-20250219",
8 | max_tokens=20000,
9 | thinking={
10 | "type": "enabled",
11 | "budget_tokens": 16000
12 | },
13 | messages=[
14 | {"role": "user", "content": prompt}
15 | ]
16 | )
17 | return response.content[1].text
18 |
19 | if __name__ == "__main__":
20 | print("## Testing call_llm")
21 | prompt = "In a few words, what is the meaning of life?"
22 | print(f"## Prompt: {prompt}")
23 | response = call_llm(prompt)
24 | print(f"## Response: {response}")
--------------------------------------------------------------------------------
/cookbook/pocketflow-chat-guardrail/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.1
2 | openai>=1.0.0
--------------------------------------------------------------------------------
/cookbook/pocketflow-chat-guardrail/utils.py:
--------------------------------------------------------------------------------
1 | from openai import OpenAI
2 | import os
3 |
4 | def call_llm(messages):
5 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
6 |
7 | response = client.chat.completions.create(
8 | model="gpt-4o",
9 | messages=messages,
10 | temperature=0.7
11 | )
12 |
13 | return response.choices[0].message.content
14 |
15 | if __name__ == "__main__":
16 | # Test the LLM call
17 | messages = [{"role": "user", "content": "In a few words, what's the meaning of life?"}]
18 | response = call_llm(messages)
19 | print(f"Prompt: {messages[0]['content']}")
20 | print(f"Response: {response}")
21 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-chat-memory/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import GetUserQuestionNode, RetrieveNode, AnswerNode, EmbedNode
3 |
4 | def create_chat_flow():
5 | # Create the nodes
6 | question_node = GetUserQuestionNode()
7 | retrieve_node = RetrieveNode()
8 | answer_node = AnswerNode()
9 | embed_node = EmbedNode()
10 |
11 | # Connect the flow:
12 | # 1. Start with getting a question
13 | # 2. Retrieve relevant conversations
14 | # 3. Generate an answer
15 | # 4. Optionally embed old conversations
16 | # 5. Loop back to get the next question
17 |
18 | # Main flow path
19 | question_node - "retrieve" >> retrieve_node
20 | retrieve_node - "answer" >> answer_node
21 |
22 | # When we need to embed old conversations
23 | answer_node - "embed" >> embed_node
24 |
25 | # Loop back for next question
26 | answer_node - "question" >> question_node
27 | embed_node - "question" >> question_node
28 |
29 | # Create the flow starting with question node
30 | return Flow(start=question_node)
31 |
32 | # Initialize the flow
33 | chat_flow = create_chat_flow()
--------------------------------------------------------------------------------
/cookbook/pocketflow-chat-memory/main.py:
--------------------------------------------------------------------------------
1 | from flow import chat_flow
2 |
3 | def run_chat_memory_demo():
4 | """
5 | Run an interactive chat interface with memory retrieval.
6 |
7 | Features:
8 | 1. Maintains a window of the 3 most recent conversation pairs
9 | 2. Archives older conversations with embeddings
10 | 3. Retrieves 1 relevant past conversation when needed
11 | 4. Total context to LLM: 3 recent pairs + 1 retrieved pair
12 | """
13 |
14 | print("=" * 50)
15 | print("PocketFlow Chat with Memory")
16 | print("=" * 50)
17 | print("This chat keeps your 3 most recent conversations")
18 | print("and brings back relevant past conversations when helpful")
19 | print("Type 'exit' to end the conversation")
20 | print("=" * 50)
21 |
22 | # Run the chat flow
23 | chat_flow.run({})
24 |
25 | if __name__ == "__main__":
26 | run_chat_memory_demo()
--------------------------------------------------------------------------------
/cookbook/pocketflow-chat-memory/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.2
2 | numpy>=1.20.0
3 | faiss-cpu>=1.7.0
4 | openai>=1.0.0
5 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-chat-memory/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-chat-memory/utils/call_llm.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import OpenAI
3 |
4 | def call_llm(messages):
5 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
6 |
7 | response = client.chat.completions.create(
8 | model="gpt-4o",
9 | messages=messages,
10 | temperature=0.7
11 | )
12 |
13 | return response.choices[0].message.content
14 |
15 | if __name__ == "__main__":
16 | # Test the LLM call
17 | messages = [{"role": "user", "content": "In a few words, what's the meaning of life?"}]
18 | response = call_llm(messages)
19 | print(f"Prompt: {messages[0]['content']}")
20 | print(f"Response: {response}")
--------------------------------------------------------------------------------
/cookbook/pocketflow-chat-memory/utils/get_embedding.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | from openai import OpenAI
4 |
5 | def get_embedding(text):
6 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "YOUR_API_KEY"))
7 |
8 | response = client.embeddings.create(
9 | model="text-embedding-ada-002",
10 | input=text
11 | )
12 |
13 | # Extract the embedding vector from the response
14 | embedding = response.data[0].embedding
15 |
16 | # Convert to numpy array for consistency with other embedding functions
17 | return np.array(embedding, dtype=np.float32)
18 |
19 |
20 | if __name__ == "__main__":
21 | # Test the embedding function
22 | text1 = "The quick brown fox jumps over the lazy dog."
23 | text2 = "Python is a popular programming language for data science."
24 |
25 | emb1 = get_embedding(text1)
26 | emb2 = get_embedding(text2)
27 |
28 | print(f"Embedding 1 shape: {emb1.shape}")
29 | print(f"Embedding 2 shape: {emb2.shape}")
30 |
31 | # Calculate similarity (dot product)
32 | similarity = np.dot(emb1, emb2)
33 | print(f"Similarity between texts: {similarity:.4f}")
--------------------------------------------------------------------------------
/cookbook/pocketflow-chat-memory/utils/vector_index.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import faiss
3 |
4 | def create_index(dimension=1536):
5 | return faiss.IndexFlatL2(dimension)
6 |
7 | def add_vector(index, vector):
8 | # Make sure the vector is a numpy array with the right shape for FAISS
9 | vector = np.array(vector).reshape(1, -1).astype(np.float32)
10 |
11 | # Add the vector to the index
12 | index.add(vector)
13 |
14 | # Return the position (index.ntotal is the total number of vectors in the index)
15 | return index.ntotal - 1
16 |
17 | def search_vectors(index, query_vector, k=1):
18 | """Search for the k most similar vectors to the query vector
19 |
20 | Args:
21 | index: The FAISS index
22 | query_vector: The query vector (numpy array or list)
23 | k: Number of results to return (default: 1)
24 |
25 | Returns:
26 | tuple: (indices, distances) where:
27 | - indices is a list of positions in the index
28 | - distances is a list of the corresponding distances
29 | """
30 | # Make sure we don't try to retrieve more vectors than exist in the index
31 | k = min(k, index.ntotal)
32 | if k == 0:
33 | return [], []
34 |
35 | # Make sure the query is a numpy array with the right shape for FAISS
36 | query_vector = np.array(query_vector).reshape(1, -1).astype(np.float32)
37 |
38 | # Search the index
39 | distances, indices = index.search(query_vector, k)
40 |
41 | return indices[0].tolist(), distances[0].tolist()
42 |
43 | # Example usage
44 | if __name__ == "__main__":
45 | # Create a new index
46 | index = create_index(dimension=3)
47 |
48 | # Add some random vectors and track them separately
49 | items = []
50 | for i in range(5):
51 | vector = np.random.random(3)
52 | position = add_vector(index, vector)
53 | items.append(f"Item {i}")
54 | print(f"Added vector at position {position}")
55 |
56 | print(f"Index contains {index.ntotal} vectors")
57 |
58 | # Search for a similar vector
59 | query = np.random.random(3)
60 | indices, distances = search_vectors(index, query, k=2)
61 |
62 | print("Query:", query)
63 | print("Found indices:", indices)
64 | print("Distances:", distances)
65 | print("Retrieved items:", [items[idx] for idx in indices])
--------------------------------------------------------------------------------
/cookbook/pocketflow-chat/README.md:
--------------------------------------------------------------------------------
1 | # Simple PocketFlow Chat
2 |
3 | A basic chat application using PocketFlow with OpenAI's GPT-4o model.
4 |
5 | ## Features
6 |
7 | - Conversational chat interface in the terminal
8 | - Maintains full conversation history for context
9 | - Simple implementation demonstrating PocketFlow's node and flow concepts
10 |
11 | ## Run It
12 |
13 | 1. Make sure your OpenAI API key is set:
14 | ```bash
15 | export OPENAI_API_KEY="your-api-key-here"
16 | ```
17 | Alternatively, you can edit the `utils.py` file to include your API key directly.
18 |
19 | 2. Install requirements and run the application:
20 | ```bash
21 | pip install -r requirements.txt
22 | python main.py
23 | ```
24 |
25 | ## How It Works
26 |
27 | ```mermaid
28 | flowchart LR
29 | chat[ChatNode] -->|continue| chat
30 | ```
31 |
32 | The chat application uses:
33 | - A single `ChatNode` with a self-loop that:
34 | - Takes user input in the `prep` method
35 | - Sends the complete conversation history to GPT-4o
36 | - Adds responses to the conversation history
37 | - Loops back to continue the chat until the user types 'exit'
38 |
39 |
40 | ## Files
41 |
42 | - [`main.py`](./main.py): Implementation of the ChatNode and chat flow
43 | - [`utils.py`](./utils.py): Simple wrapper for calling the OpenAI API
44 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-chat/main.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Node, Flow
2 | from utils import call_llm
3 |
4 | class ChatNode(Node):
5 | def prep(self, shared):
6 | # Initialize messages if this is the first run
7 | if "messages" not in shared:
8 | shared["messages"] = []
9 | print("Welcome to the chat! Type 'exit' to end the conversation.")
10 |
11 | # Get user input
12 | user_input = input("\nYou: ")
13 |
14 | # Check if user wants to exit
15 | if user_input.lower() == 'exit':
16 | return None
17 |
18 | # Add user message to history
19 | shared["messages"].append({"role": "user", "content": user_input})
20 |
21 | # Return all messages for the LLM
22 | return shared["messages"]
23 |
24 | def exec(self, messages):
25 | if messages is None:
26 | return None
27 |
28 | # Call LLM with the entire conversation history
29 | response = call_llm(messages)
30 | return response
31 |
32 | def post(self, shared, prep_res, exec_res):
33 | if prep_res is None or exec_res is None:
34 | print("\nGoodbye!")
35 | return None # End the conversation
36 |
37 | # Print the assistant's response
38 | print(f"\nAssistant: {exec_res}")
39 |
40 | # Add assistant message to history
41 | shared["messages"].append({"role": "assistant", "content": exec_res})
42 |
43 | # Loop back to continue the conversation
44 | return "continue"
45 |
46 | # Create the flow with self-loop
47 | chat_node = ChatNode()
48 | chat_node - "continue" >> chat_node # Loop back to continue conversation
49 |
50 | flow = Flow(start=chat_node)
51 |
52 | # Start the chat
53 | if __name__ == "__main__":
54 | shared = {}
55 | flow.run(shared)
56 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-chat/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.1
2 | openai>=1.0.0
--------------------------------------------------------------------------------
/cookbook/pocketflow-chat/utils.py:
--------------------------------------------------------------------------------
1 | from openai import OpenAI
2 | import os
3 |
4 | def call_llm(messages):
5 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
6 |
7 | response = client.chat.completions.create(
8 | model="gpt-4o",
9 | messages=messages,
10 | temperature=0.7
11 | )
12 |
13 | return response.choices[0].message.content
14 |
15 | if __name__ == "__main__":
16 | # Test the LLM call
17 | messages = [{"role": "user", "content": "In a few words, what's the meaning of life?"}]
18 | response = call_llm(messages)
19 | print(f"Prompt: {messages[0]['content']}")
20 | print(f"Response: {response}")
21 |
22 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-cli-hitl/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import GetTopicNode, GenerateJokeNode, GetFeedbackNode
3 |
4 | def create_joke_flow() -> Flow:
5 | """Creates and returns the joke generation flow."""
6 | get_topic_node = GetTopicNode()
7 | generate_joke_node = GenerateJokeNode()
8 | get_feedback_node = GetFeedbackNode()
9 |
10 | get_topic_node >> generate_joke_node
11 | generate_joke_node >> get_feedback_node
12 | get_feedback_node - "Disapprove" >> generate_joke_node
13 |
14 | joke_flow = Flow(start=get_topic_node)
15 | return joke_flow
--------------------------------------------------------------------------------
/cookbook/pocketflow-cli-hitl/main.py:
--------------------------------------------------------------------------------
1 | from flow import create_joke_flow
2 |
3 | def main():
4 | """Main function to run the joke generator application."""
5 | print("Welcome to the Command-Line Joke Generator!")
6 |
7 | shared = {
8 | "topic": None,
9 | "current_joke": None,
10 | "disliked_jokes": [],
11 | "user_feedback": None
12 | }
13 |
14 | joke_flow = create_joke_flow()
15 | joke_flow.run(shared)
16 |
17 | print("\nThanks for using the Joke Generator!")
18 |
19 | if __name__ == "__main__":
20 | main()
--------------------------------------------------------------------------------
/cookbook/pocketflow-cli-hitl/nodes.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Node
2 | from utils.call_llm import call_llm
3 |
4 | class GetTopicNode(Node):
5 | """Prompts the user to enter the topic for the joke."""
6 | def exec(self, _shared):
7 | return input("What topic would you like a joke about? ")
8 |
9 | def post(self, shared, _prep_res, exec_res):
10 | shared["topic"] = exec_res
11 |
12 | class GenerateJokeNode(Node):
13 | """Generates a joke based on the topic and any previous feedback."""
14 | def prep(self, shared):
15 | topic = shared.get("topic", "anything")
16 | disliked_jokes = shared.get("disliked_jokes", [])
17 |
18 | prompt = f"Please generate an one-liner joke about: {topic}. Make it short and funny."
19 | if disliked_jokes:
20 | disliked_str = "; ".join(disliked_jokes)
21 | prompt = f"The user did not like the following jokes: [{disliked_str}]. Please generate a new, different joke about {topic}."
22 | return prompt
23 |
24 | def exec(self, prep_res):
25 | return call_llm(prep_res)
26 |
27 | def post(self, shared, _prep_res, exec_res):
28 | shared["current_joke"] = exec_res
29 | print(f"\nJoke: {exec_res}")
30 |
31 | class GetFeedbackNode(Node):
32 | """Presents the joke to the user and asks for approval."""
33 | def exec(self, _prep_res):
34 | while True:
35 | feedback = input("Did you like this joke? (yes/no): ").strip().lower()
36 | if feedback in ["yes", "y", "no", "n"]:
37 | return feedback
38 | print("Invalid input. Please type 'yes' or 'no'.")
39 |
40 | def post(self, shared, _prep_res, exec_res):
41 | if exec_res in ["yes", "y"]:
42 | shared["user_feedback"] = "approve"
43 | print("Great! Glad you liked it.")
44 | return "Approve"
45 | else:
46 | shared["user_feedback"] = "disapprove"
47 | current_joke = shared.get("current_joke")
48 | if current_joke:
49 | if "disliked_jokes" not in shared:
50 | shared["disliked_jokes"] = []
51 | shared["disliked_jokes"].append(current_joke)
52 | print("Okay, let me try another one.")
53 | return "Disapprove"
--------------------------------------------------------------------------------
/cookbook/pocketflow-cli-hitl/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.1
2 | anthropic>=0.20.0 # Or a recent version
--------------------------------------------------------------------------------
/cookbook/pocketflow-cli-hitl/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-cli-hitl/utils/__init__.py
--------------------------------------------------------------------------------
/cookbook/pocketflow-cli-hitl/utils/call_llm.py:
--------------------------------------------------------------------------------
1 | from anthropic import Anthropic
2 | import os
3 |
4 | def call_llm(prompt: str) -> str:
5 | client = Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY", "your-anthropic-api-key")) # Default if key not found
6 | response = client.messages.create(
7 | model="claude-3-haiku-20240307", # Using a smaller model for jokes
8 | max_tokens=150, # Jokes don't need to be very long
9 | messages=[
10 | {"role": "user", "content": prompt}
11 | ]
12 | )
13 | return response.content[0].text
14 |
15 | if __name__ == "__main__":
16 | print("Testing Anthropic LLM call for jokes:")
17 | joke_prompt = "Tell me a one-liner joke about a cat."
18 | print(f"Prompt: {joke_prompt}")
19 | try:
20 | response = call_llm(joke_prompt)
21 | print(f"Response: {response}")
22 | except Exception as e:
23 | print(f"Error calling LLM: {e}")
24 | print("Please ensure your ANTHROPIC_API_KEY environment variable is set correctly.")
--------------------------------------------------------------------------------
/cookbook/pocketflow-code-generator/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import GenerateTestCases, ImplementFunction, RunTests, Revise
3 |
4 | def create_code_generator_flow():
5 | """Creates and returns the code generator flow."""
6 | # Create nodes
7 | generate_tests = GenerateTestCases()
8 | implement_function = ImplementFunction()
9 | run_tests = RunTests()
10 | revise = Revise()
11 |
12 | # Define transitions
13 | generate_tests >> implement_function
14 | implement_function >> run_tests
15 | run_tests - "failure" >> revise
16 | revise >> run_tests
17 |
18 | # Create flow starting with test generation
19 | flow = Flow(start=generate_tests)
20 | return flow
--------------------------------------------------------------------------------
/cookbook/pocketflow-code-generator/main.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from flow import create_code_generator_flow
3 |
4 | def main():
5 | """Runs the PocketFlow Code Generator application."""
6 | print("Starting PocketFlow Code Generator...")
7 |
8 | # Check if problem is provided as argument
9 | if len(sys.argv) > 1:
10 | problem = " ".join(sys.argv[1:])
11 | else:
12 | # Default Two Sum problem
13 | problem = """Two Sum
14 |
15 | Given an array of integers nums and an integer target, return indices of the two numbers such that they add up to target.
16 |
17 | You may assume that each input would have exactly one solution, and you may not use the same element twice.
18 |
19 | Example 1:
20 | Input: nums = [2,7,11,15], target = 9
21 | Output: [0,1]
22 |
23 | Example 2:
24 | Input: nums = [3,2,4], target = 6
25 | Output: [1,2]
26 |
27 | Example 3:
28 | Input: nums = [3,3], target = 6
29 | Output: [0,1]"""
30 |
31 | shared = {
32 | "problem": problem,
33 | "test_cases": [], # Will be populated with [{name, input, expected}, ...]
34 | "function_code": "",
35 | "test_results": [],
36 | "iteration_count": 0,
37 | "max_iterations": 5
38 | }
39 |
40 | # Create and run the flow
41 | flow = create_code_generator_flow()
42 | flow.run(shared)
43 |
44 | print("\n=== Final Results ===")
45 | print(f"Problem: {shared['problem'][:50]}...")
46 | print(f"Iterations: {shared['iteration_count']}")
47 | print(f"Function:\n{shared['function_code']}")
48 | print(f"Test Results: {len([r for r in shared['test_results'] if r['passed']])}/{len(shared['test_results'])} passed")
49 |
50 | if __name__ == "__main__":
51 | main()
--------------------------------------------------------------------------------
/cookbook/pocketflow-code-generator/requirements.txt:
--------------------------------------------------------------------------------
1 | anthropic
2 | pocketflow
3 | pyyaml
--------------------------------------------------------------------------------
/cookbook/pocketflow-code-generator/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-code-generator/utils/__init__.py
--------------------------------------------------------------------------------
/cookbook/pocketflow-code-generator/utils/call_llm.py:
--------------------------------------------------------------------------------
1 | from anthropic import Anthropic
2 | import os
3 |
4 | def call_llm(prompt):
5 | client = Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY", "your-api-key"))
6 | response = client.messages.create(
7 | model="claude-sonnet-4-20250514",
8 | max_tokens=6000,
9 | messages=[
10 | {"role": "user", "content": prompt}
11 | ]
12 | )
13 | return response.content[0].text
14 |
15 | if __name__ == "__main__":
16 | print("## Testing call_llm")
17 | prompt = "In a few words, what is the meaning of life?"
18 | print(f"## Prompt: {prompt}")
19 | response = call_llm(prompt)
20 | print(f"## Response: {response}")
--------------------------------------------------------------------------------
/cookbook/pocketflow-code-generator/utils/code_executor.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import io
3 | import traceback
4 | from contextlib import redirect_stdout, redirect_stderr
5 |
6 | def execute_python(function_code, input):
7 | try:
8 | namespace = {"__builtins__": __builtins__}
9 | stdout_capture = io.StringIO()
10 | stderr_capture = io.StringIO()
11 |
12 | with redirect_stdout(stdout_capture), redirect_stderr(stderr_capture):
13 | exec(function_code, namespace)
14 |
15 | if "run_code" not in namespace:
16 | return None, "Function 'run_code' not found"
17 |
18 | run_code = namespace["run_code"]
19 |
20 | if isinstance(input, dict):
21 | result = run_code(**input)
22 | elif isinstance(input, (list, tuple)):
23 | result = run_code(*input)
24 | else:
25 | result = run_code(input)
26 |
27 | return result, None
28 |
29 | except Exception as e:
30 | return None, f"{type(e).__name__}: {str(e)}"
31 |
32 | if __name__ == "__main__":
33 | # Test 1: Working function
34 | function_code = """
35 | def run_code(nums, target):
36 | for i in range(len(nums)):
37 | for j in range(i + 1, len(nums)):
38 | if nums[i] + nums[j] == target:
39 | return [i, j]
40 | return []
41 | """
42 |
43 | input = {"nums": [2, 7, 11, 15], "target": 9}
44 | output, error = execute_python(function_code, input)
45 | print(f"Output: {output}")
46 | print(f"Error: {error}")
47 |
48 | # Test 2: Function with error
49 | broken_function_code = """
50 | def run_code(nums, target):
51 | return nums[100] # Index error
52 | """
53 |
54 | output2, error2 = execute_python(broken_function_code, input)
55 | print(f"Output: {output2}")
56 | print(f"Error: {error2}")
--------------------------------------------------------------------------------
/cookbook/pocketflow-communication/README.md:
--------------------------------------------------------------------------------
1 | # PocketFlow Communication Example
2 |
3 | This example demonstrates the [Communication](https://the-pocket.github.io/PocketFlow/communication.html) concept in PocketFlow, specifically focusing on the Shared Store pattern.
4 |
5 | ## Overview
6 |
7 | The example implements a simple word counter that shows how nodes can communicate using a shared store. It demonstrates:
8 |
9 | - How to initialize and structure a shared store
10 | - How nodes can read from and write to the shared store
11 | - How to maintain state across multiple node executions
12 | - Best practices for shared store usage
13 |
14 | ## Project Structure
15 |
16 | ```
17 | pocketflow-communication/
18 | ├── README.md
19 | ├── requirements.txt
20 | ├── main.py
21 | ├── flow.py
22 | └── nodes.py
23 | ```
24 |
25 | ## Installation
26 |
27 | ```bash
28 | pip install -r requirements.txt
29 | ```
30 |
31 | ## Usage
32 |
33 | ```bash
34 | python main.py
35 | ```
36 |
37 | Enter text when prompted. The program will:
38 | 1. Count words in the text
39 | 2. Store statistics in the shared store
40 | 3. Display running statistics (total texts, total words, average)
41 |
42 | Enter 'q' to quit.
43 |
44 | ## How it Works
45 |
46 | The example uses three nodes:
47 |
48 | 1. `TextInput`: Reads user input and initializes the shared store
49 | 2. `WordCounter`: Counts words and updates statistics in the shared store
50 | 3. `ShowStats`: Displays statistics from the shared store
51 |
52 | This demonstrates how nodes can share and maintain state using the shared store pattern.
--------------------------------------------------------------------------------
/cookbook/pocketflow-communication/flow.py:
--------------------------------------------------------------------------------
1 | """Flow configuration for the communication example."""
2 |
3 | from pocketflow import Flow
4 | from nodes import TextInput, WordCounter, ShowStats, EndNode
5 |
6 | def create_flow():
7 | """Create and configure the flow with all nodes."""
8 | # Create nodes
9 | text_input = TextInput()
10 | word_counter = WordCounter()
11 | show_stats = ShowStats()
12 | end_node = EndNode()
13 |
14 | # Configure transitions
15 | text_input - "count" >> word_counter
16 | word_counter - "show" >> show_stats
17 | show_stats - "continue" >> text_input
18 | text_input - "exit" >> end_node
19 |
20 | # Create and return flow
21 | return Flow(start=text_input)
--------------------------------------------------------------------------------
/cookbook/pocketflow-communication/main.py:
--------------------------------------------------------------------------------
1 | from flow import create_flow
2 |
3 | def main():
4 | """Run the communication example."""
5 | flow = create_flow()
6 | shared = {}
7 | flow.run(shared)
8 |
9 | if __name__ == "__main__":
10 | main()
--------------------------------------------------------------------------------
/cookbook/pocketflow-communication/nodes.py:
--------------------------------------------------------------------------------
1 | """Node implementations for the communication example."""
2 |
3 | from pocketflow import Node
4 |
5 | class EndNode(Node):
6 | """Node that handles flow termination."""
7 | pass
8 |
9 | class TextInput(Node):
10 | """Node that reads text input and initializes the shared store."""
11 |
12 | def prep(self, shared):
13 | """Get user input and ensure shared store is initialized."""
14 | return input("Enter text (or 'q' to quit): ")
15 |
16 | def post(self, shared, prep_res, exec_res):
17 | """Store text and initialize/update statistics."""
18 | if prep_res == 'q':
19 | return "exit"
20 |
21 | # Store the text
22 | shared["text"] = prep_res
23 |
24 | # Initialize statistics if they don't exist
25 | if "stats" not in shared:
26 | shared["stats"] = {
27 | "total_texts": 0,
28 | "total_words": 0
29 | }
30 | shared["stats"]["total_texts"] += 1
31 |
32 | return "count"
33 |
34 | class WordCounter(Node):
35 | """Node that counts words in the text."""
36 |
37 | def prep(self, shared):
38 | """Get text from shared store."""
39 | return shared["text"]
40 |
41 | def exec(self, text):
42 | """Count words in the text."""
43 | return len(text.split())
44 |
45 | def post(self, shared, prep_res, exec_res):
46 | """Update word count statistics."""
47 | shared["stats"]["total_words"] += exec_res
48 | return "show"
49 |
50 | class ShowStats(Node):
51 | """Node that displays statistics from the shared store."""
52 |
53 | def prep(self, shared):
54 | """Get statistics from shared store."""
55 | return shared["stats"]
56 |
57 | def post(self, shared, prep_res, exec_res):
58 | """Display statistics and continue the flow."""
59 | stats = prep_res
60 | print(f"\nStatistics:")
61 | print(f"- Texts processed: {stats['total_texts']}")
62 | print(f"- Total words: {stats['total_words']}")
63 | print(f"- Average words per text: {stats['total_words'] / stats['total_texts']:.1f}\n")
64 | return "continue"
--------------------------------------------------------------------------------
/cookbook/pocketflow-communication/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow==0.1.0
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-background/assets/banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-fastapi-background/assets/banner.png
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-background/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import GenerateOutline, WriteContent, ApplyStyle
3 |
4 | def create_article_flow():
5 | """
6 | Create and configure the article writing workflow
7 | """
8 | # Create node instances
9 | outline_node = GenerateOutline()
10 | content_node = WriteContent()
11 | style_node = ApplyStyle()
12 |
13 | # Connect nodes in sequence
14 | outline_node >> content_node >> style_node
15 |
16 | # Create flow starting with outline node
17 | article_flow = Flow(start=outline_node)
18 |
19 | return article_flow
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-background/requirements.txt:
--------------------------------------------------------------------------------
1 | fastapi
2 | uvicorn
3 | openai
4 | pyyaml
5 | python-multipart
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-background/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-fastapi-background/utils/__init__.py
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-background/utils/call_llm.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import OpenAI
3 |
4 | def call_llm(prompt):
5 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
6 | r = client.chat.completions.create(
7 | model="gpt-4o",
8 | messages=[{"role": "user", "content": prompt}]
9 | )
10 | return r.choices[0].message.content
11 |
12 | if __name__ == "__main__":
13 | print(call_llm("Tell me a short joke"))
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-hitl/assets/banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-fastapi-hitl/assets/banner.png
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-hitl/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import AsyncFlow
2 | from nodes import ProcessNode, ReviewNode, ResultNode
3 |
4 | def create_feedback_flow():
5 | """Creates the minimal feedback workflow."""
6 | process_node = ProcessNode()
7 | review_node = ReviewNode()
8 | result_node = ResultNode()
9 |
10 | # Define transitions
11 | process_node >> review_node
12 | review_node - "approved" >> result_node
13 | review_node - "rejected" >> process_node # Loop back
14 |
15 | # Create the AsyncFlow
16 | flow = AsyncFlow(start=process_node)
17 | print("Minimal feedback flow created.")
18 | return flow
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-hitl/main.py:
--------------------------------------------------------------------------------
1 | from flow import qa_flow
2 |
3 | # Example main function
4 | # Please replace this with your own main function
5 | def main():
6 | shared = {
7 | "question": "In one sentence, what's the end of universe?",
8 | "answer": None
9 | }
10 |
11 | qa_flow.run(shared)
12 | print("Question:", shared["question"])
13 | print("Answer:", shared["answer"])
14 |
15 | if __name__ == "__main__":
16 | main()
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-hitl/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.1
2 | fastapi
3 | uvicorn[standard] # ASGI server for FastAPI
4 | jinja2 # For HTML templating
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-hitl/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-fastapi-hitl/utils/__init__.py
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-hitl/utils/process_task.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | def process_task(input_data):
4 | """Minimal simulation of processing the input data."""
5 | print(f"Processing: '{input_data[:50]}...'")
6 |
7 | # Simulate work
8 | time.sleep(2)
9 |
10 | processed_result = f"Processed: {input_data}"
11 | print(f"Finished processing.")
12 | return processed_result
13 |
14 | # We don't need a separate utils/call_llm.py for this minimal example,
15 | # but you would add it here if ProcessNode used an LLM.
16 |
17 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-websocket/README.md:
--------------------------------------------------------------------------------
1 | # PocketFlow FastAPI WebSocket Chat
2 |
3 | Real-time chat interface with streaming LLM responses using PocketFlow, FastAPI, and WebSocket.
4 |
5 |
6 |
9 |
10 |
11 | ## Features
12 |
13 | - **Real-time Streaming**: See AI responses typed out in real-time as the LLM generates them
14 | - **Conversation Memory**: Maintains chat history across messages
15 | - **Modern UI**: Clean, responsive chat interface with gradient design
16 | - **WebSocket Connection**: Persistent connection for instant communication
17 | - **PocketFlow Integration**: Uses PocketFlow `AsyncNode` and `AsyncFlow` for streaming
18 |
19 | ## How to Run
20 |
21 | 1. **Set OpenAI API Key:**
22 | ```bash
23 | export OPENAI_API_KEY="your-openai-api-key"
24 | ```
25 |
26 | 2. **Install Dependencies:**
27 | ```bash
28 | pip install -r requirements.txt
29 | ```
30 |
31 | 3. **Run the Application:**
32 | ```bash
33 | python main.py
34 | ```
35 |
36 | 4. **Access the Web UI:**
37 | Open `http://localhost:8000` in your browser.
38 |
39 | ## Usage
40 |
41 | 1. **Type Message**: Enter your message in the input field
42 | 2. **Send**: Press Enter or click Send button
43 | 3. **Watch Streaming**: See the AI response appear in real-time
44 | 4. **Continue Chat**: Conversation history is maintained automatically
45 |
46 | ## Files
47 |
48 | - [`main.py`](./main.py): FastAPI application with WebSocket endpoint
49 | - [`nodes.py`](./nodes.py): PocketFlow `StreamingChatNode` definition
50 | - [`flow.py`](./flow.py): PocketFlow `AsyncFlow` for chat processing
51 | - [`utils/stream_llm.py`](./utils/stream_llm.py): OpenAI streaming utility
52 | - [`static/index.html`](./static/index.html): Modern chat interface
53 | - [`requirements.txt`](./requirements.txt): Project dependencies
54 | - [`docs/design.md`](./docs/design.md): System design documentation
55 | - [`README.md`](./README.md): This file
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-websocket/assets/banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-fastapi-websocket/assets/banner.png
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-websocket/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import AsyncFlow
2 | from nodes import StreamingChatNode
3 |
4 | def create_streaming_chat_flow():
5 | chat_node = StreamingChatNode()
6 | return AsyncFlow(start=chat_node)
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-websocket/main.py:
--------------------------------------------------------------------------------
1 | import json
2 | from fastapi import FastAPI, WebSocket, WebSocketDisconnect
3 | from fastapi.staticfiles import StaticFiles
4 | from fastapi.responses import FileResponse
5 | from flow import create_streaming_chat_flow
6 |
7 | app = FastAPI()
8 | app.mount("/static", StaticFiles(directory="static"), name="static")
9 |
10 | @app.get("/")
11 | async def get_chat_interface():
12 | return FileResponse("static/index.html")
13 |
14 | @app.websocket("/ws")
15 | async def websocket_endpoint(websocket: WebSocket):
16 | await websocket.accept()
17 |
18 | # Initialize conversation history for this connection
19 | shared_store = {
20 | "websocket": websocket,
21 | "conversation_history": []
22 | }
23 |
24 | try:
25 | while True:
26 | data = await websocket.receive_text()
27 | message = json.loads(data)
28 |
29 | # Update only the current message, keep conversation history
30 | shared_store["user_message"] = message.get("content", "")
31 |
32 | flow = create_streaming_chat_flow()
33 | await flow.run_async(shared_store)
34 |
35 | except WebSocketDisconnect:
36 | pass
37 |
38 | if __name__ == "__main__":
39 | import uvicorn
40 | uvicorn.run(app, host="0.0.0.0", port=8000)
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-websocket/nodes.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | from pocketflow import AsyncNode
4 | from utils.stream_llm import stream_llm
5 |
6 | class StreamingChatNode(AsyncNode):
7 | async def prep_async(self, shared):
8 | user_message = shared.get("user_message", "")
9 | websocket = shared.get("websocket")
10 |
11 | conversation_history = shared.get("conversation_history", [])
12 | conversation_history.append({"role": "user", "content": user_message})
13 |
14 | return conversation_history, websocket
15 |
16 | async def exec_async(self, prep_res):
17 | messages, websocket = prep_res
18 |
19 | await websocket.send_text(json.dumps({"type": "start", "content": ""}))
20 |
21 | full_response = ""
22 | async for chunk_content in stream_llm(messages):
23 | full_response += chunk_content
24 | await websocket.send_text(json.dumps({
25 | "type": "chunk",
26 | "content": chunk_content
27 | }))
28 |
29 | await websocket.send_text(json.dumps({"type": "end", "content": ""}))
30 |
31 | return full_response, websocket
32 |
33 | async def post_async(self, shared, prep_res, exec_res):
34 | full_response, websocket = exec_res
35 |
36 | conversation_history = shared.get("conversation_history", [])
37 | conversation_history.append({"role": "assistant", "content": full_response})
38 | shared["conversation_history"] = conversation_history
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-websocket/requirements.txt:
--------------------------------------------------------------------------------
1 | fastapi==0.104.1
2 | uvicorn[standard]==0.24.0
3 | openai==1.3.8
4 | pocketflow
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-websocket/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Utils package for FastAPI WebSocket Chat Interface
--------------------------------------------------------------------------------
/cookbook/pocketflow-fastapi-websocket/utils/stream_llm.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import AsyncOpenAI
3 |
4 | async def stream_llm(messages):
5 | client = AsyncOpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
6 |
7 | stream = await client.chat.completions.create(
8 | model="gpt-4o-mini",
9 | messages=messages,
10 | stream=True,
11 | temperature=0.7
12 | )
13 |
14 | async for chunk in stream:
15 | if chunk.choices[0].delta.content is not None:
16 | yield chunk.choices[0].delta.content
17 |
18 | if __name__ == "__main__":
19 | import asyncio
20 |
21 | async def test():
22 | messages = [{"role": "user", "content": "Hello!"}]
23 | async for chunk in stream_llm(messages):
24 | print(chunk, end="", flush=True)
25 | print()
26 |
27 | asyncio.run(test())
--------------------------------------------------------------------------------
/cookbook/pocketflow-flow/README.md:
--------------------------------------------------------------------------------
1 | # Text Converter Flow
2 |
3 | This project demonstrates an interactive text transformation tool built with PocketFlow.
4 |
5 | ## Features
6 |
7 | - Convert text to UPPERCASE
8 | - Convert text to lowercase
9 | - Reverse text
10 | - Remove extra spaces
11 | - Interactive command-line interface
12 | - Continuous flow with option to process multiple texts
13 |
14 | ## Getting Started
15 |
16 | 1. Install the required dependencies:
17 |
18 | ```bash
19 | pip install -r requirements.txt
20 | ```
21 |
22 | 2. Run the application:
23 |
24 | ```bash
25 | python main.py
26 | ```
27 |
28 | ## How It Works
29 |
30 | The workflow features an interactive loop with branching paths:
31 |
32 | ```mermaid
33 | graph TD
34 | Input[TextInput Node] -->|transform| Transform[TextTransform Node]
35 | Transform -->|input| Input
36 | Transform -->|exit| End[End]
37 | Input -->|exit| End
38 | ```
39 |
40 | Here's what each part does:
41 | 1. **TextInput Node**: Collects text input and handles menu choices
42 | 2. **TextTransform Node**: Applies the selected transformation to the text
43 |
44 | ## Example Output
45 |
46 | ```
47 | Welcome to Text Converter!
48 | =========================
49 |
50 | Enter text to convert: Pocket Flow is a 100-line LLM framework
51 |
52 | Choose transformation:
53 | 1. Convert to UPPERCASE
54 | 2. Convert to lowercase
55 | 3. Reverse text
56 | 4. Remove extra spaces
57 | 5. Exit
58 |
59 | Your choice (1-5): 1
60 |
61 | Result: POCKET FLOW IS A 100-LINE LLM FRAMEWORK
62 |
63 | Convert another text? (y/n): n
64 |
65 | Thank you for using Text Converter!
66 | ```
67 |
68 | ## Files
69 |
70 | - [`main.py`](./main.py): Main entry point for running the text converter
71 | - [`flow.py`](./flow.py): Defines the nodes and flow for text transformation
72 | - [`requirements.txt`](./requirements.txt): Lists the required dependencies
73 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-flow/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Node, Flow
2 |
3 | class TextInput(Node):
4 | def prep(self, shared):
5 | """Get text input from user."""
6 | if "text" not in shared:
7 | text = input("\nEnter text to convert: ")
8 | shared["text"] = text
9 | return shared["text"]
10 |
11 | def post(self, shared, prep_res, exec_res):
12 | print("\nChoose transformation:")
13 | print("1. Convert to UPPERCASE")
14 | print("2. Convert to lowercase")
15 | print("3. Reverse text")
16 | print("4. Remove extra spaces")
17 | print("5. Exit")
18 |
19 | choice = input("\nYour choice (1-5): ")
20 |
21 | if choice == "5":
22 | return "exit"
23 |
24 | shared["choice"] = choice
25 | return "transform"
26 |
27 | class TextTransform(Node):
28 | def prep(self, shared):
29 | return shared["text"], shared["choice"]
30 |
31 | def exec(self, inputs):
32 | text, choice = inputs
33 |
34 | if choice == "1":
35 | return text.upper()
36 | elif choice == "2":
37 | return text.lower()
38 | elif choice == "3":
39 | return text[::-1]
40 | elif choice == "4":
41 | return " ".join(text.split())
42 | else:
43 | return "Invalid option!"
44 |
45 | def post(self, shared, prep_res, exec_res):
46 | print("\nResult:", exec_res)
47 |
48 | if input("\nConvert another text? (y/n): ").lower() == 'y':
49 | shared.pop("text", None) # Remove previous text
50 | return "input"
51 | return "exit"
52 |
53 | class EndNode(Node):
54 | pass
55 |
56 | # Create nodes
57 | text_input = TextInput()
58 | text_transform = TextTransform()
59 | end_node = EndNode()
60 |
61 | # Connect nodes
62 | text_input - "transform" >> text_transform
63 | text_transform - "input" >> text_input
64 | text_transform - "exit" >> end_node
65 |
66 | # Create flow
67 | flow = Flow(start=text_input)
--------------------------------------------------------------------------------
/cookbook/pocketflow-flow/main.py:
--------------------------------------------------------------------------------
1 | from flow import flow
2 |
3 | def main():
4 | print("\nWelcome to Text Converter!")
5 | print("=========================")
6 |
7 | # Initialize shared store
8 | shared = {}
9 |
10 | # Run the flow
11 | flow.run(shared)
12 |
13 | print("\nThank you for using Text Converter!")
14 |
15 | if __name__ == "__main__":
16 | main()
--------------------------------------------------------------------------------
/cookbook/pocketflow-flow/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.1.0
--------------------------------------------------------------------------------
/cookbook/pocketflow-google-calendar/.env.exemplo:
--------------------------------------------------------------------------------
1 | # Google Calendar API Configuration
2 | GOOGLE_CALENDAR_ID=your_calendar_id@group.calendar.google.com
3 | GOOGLE_APPLICATION_CREDENTIALS=credentials.json
4 |
5 | # Application Configuration
6 | TIMEZONE=America/Sao_Paulo # or your preferred timezone
7 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-google-calendar/.gitignore:
--------------------------------------------------------------------------------
1 | .env
2 | Pipfile.lock
3 | credentials.json
4 | token.pickle
--------------------------------------------------------------------------------
/cookbook/pocketflow-google-calendar/Pipfile:
--------------------------------------------------------------------------------
1 | [[source]]
2 | url = "https://pypi.org/simple"
3 | verify_ssl = true
4 | name = "pypi"
5 |
6 | [packages]
7 | python-dotenv = ">=0.19.0"
8 | pocketflow = ">=0.0.2"
9 | google-auth-oauthlib = ">=1.0.0"
10 | google-auth-httplib2 = ">=0.1.0"
11 | google-api-python-client = ">=2.0.0"
12 |
13 | [dev-packages]
14 |
15 | [requires]
16 | python_version = "3.13"
17 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-google-calendar/main.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import CreateCalendarEventNode, ListCalendarEventsNode, ListCalendarsNode
3 | from datetime import datetime, timedelta
4 |
5 | def create_calendar_flow():
6 | """Creates a flow to manage calendar events."""
7 | # Create nodes
8 | create_event_node = CreateCalendarEventNode()
9 | list_events_node = ListCalendarEventsNode()
10 |
11 | # Connect nodes
12 | create_event_node - "success" >> list_events_node
13 | create_event_node - "error" >> None
14 |
15 | # Create flow
16 | return Flow(start=create_event_node)
17 |
18 | def list_calendars_flow():
19 | """Creates a flow to list all user calendars."""
20 | list_calendars_node = ListCalendarsNode()
21 | return Flow(start=list_calendars_node)
22 |
23 | def main():
24 | # Example: List all calendars
25 | print("=== Listing your calendars ===")
26 | flow = list_calendars_flow()
27 | shared = {}
28 | flow.run(shared)
29 |
30 | if 'available_calendars' in shared:
31 | for cal in shared['available_calendars']:
32 | print(f"- {cal.get('summary')}")
33 |
34 | # Example: Create a simple event
35 | print("\n=== Creating an example event ===")
36 | flow = create_calendar_flow()
37 |
38 | shared = {
39 | 'event_summary': 'Example Meeting',
40 | 'event_description': 'An example meeting created by PocketFlow',
41 | 'event_start_time': datetime.now() + timedelta(days=1),
42 | 'event_end_time': datetime.now() + timedelta(days=1, hours=1),
43 | 'days_to_list': 7
44 | }
45 |
46 | flow.run(shared)
47 |
48 | if 'last_created_event' in shared:
49 | print("Event created successfully!")
50 | print(f"Event ID: {shared['last_created_event']['id']}")
51 |
52 | if __name__ == "__main__":
53 | main()
--------------------------------------------------------------------------------
/cookbook/pocketflow-google-calendar/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-google-calendar/utils/__init__.py
--------------------------------------------------------------------------------
/cookbook/pocketflow-gradio-hitl/assets/book_hotel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-gradio-hitl/assets/book_hotel.png
--------------------------------------------------------------------------------
/cookbook/pocketflow-gradio-hitl/assets/change_intention.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-gradio-hitl/assets/change_intention.png
--------------------------------------------------------------------------------
/cookbook/pocketflow-gradio-hitl/assets/flow_visualization.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-gradio-hitl/assets/flow_visualization.png
--------------------------------------------------------------------------------
/cookbook/pocketflow-gradio-hitl/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 |
3 | from nodes import (
4 | DecideAction,
5 | CheckWeather,
6 | BookHotel,
7 | FollowUp,
8 | ResultNotification,
9 | )
10 |
11 |
12 | def create_flow():
13 | """
14 | Create and connect the nodes to form a complete agent flow.
15 | """
16 | decide_action = DecideAction()
17 | check_weather = CheckWeather()
18 | book_hotel = BookHotel()
19 | follow_up = FollowUp()
20 | result_notification = ResultNotification()
21 |
22 | decide_action - "check-weather" >> check_weather
23 | check_weather >> decide_action
24 | decide_action - "book-hotel" >> book_hotel
25 | book_hotel >> decide_action
26 | decide_action - "follow-up" >> follow_up
27 | decide_action - "result-notification" >> result_notification
28 |
29 | return Flow(start=decide_action)
30 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-gradio-hitl/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.2
2 | gradio>=5.29.1
3 | openai>=1.78.1
--------------------------------------------------------------------------------
/cookbook/pocketflow-gradio-hitl/utils/call_llm.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from openai import OpenAI
4 | from openai.types.chat.chat_completion import ChatCompletion
5 |
6 | api_key = os.getenv("OPENAI_API_KEY")
7 | base_url = "https://api.openai.com/v1"
8 | model = "gpt-4o"
9 |
10 |
11 | def call_llm(message: str):
12 | print(f"Calling LLM with message: \n{message}")
13 | client = OpenAI(api_key=api_key, base_url=base_url)
14 | response: ChatCompletion = client.chat.completions.create(
15 | model=model, messages=[{"role": "user", "content": message}]
16 | )
17 | return response.choices[0].message.content
18 |
19 |
20 | if __name__ == "__main__":
21 | print(call_llm("Hello, how are you?"))
22 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-gradio-hitl/utils/call_mock_api.py:
--------------------------------------------------------------------------------
1 | import random
2 | from datetime import date, datetime
3 |
4 |
5 | def call_check_weather_api(city: str, date: date | None):
6 | if date is None:
7 | date = datetime.now().date()
8 |
9 | current_date = datetime.now().date()
10 |
11 | # calculate date difference
12 | date_diff = (date - current_date).days
13 |
14 | # check if the date is within the allowed range
15 | if abs(date_diff) > 7:
16 | return f"Failed to check weather: Date {date} is more than 7 days away from current date."
17 |
18 | return f"The weather in {city} on {date} is {random.choice(['sunny', 'cloudy', 'rainy', 'snowy'])}, and the temperature is {random.randint(10, 30)}°C."
19 |
20 |
21 | def call_book_hotel_api(hotel: str, checkin_date: date, checkout_date: date):
22 | current_date = datetime.now().date()
23 |
24 | # check if the checkin date is after the current date
25 | if checkin_date <= current_date:
26 | return (
27 | f"Failed to book hotel {hotel}: Check-in date must be after current date."
28 | )
29 |
30 | # check if the checkin date is before the checkout date
31 | if checkin_date >= checkout_date:
32 | return f"Failed to book hotel {hotel}, because the checkin date is after the checkout date."
33 |
34 | # check if the date difference is more than 7 days
35 | date_diff = (checkout_date - checkin_date).days
36 | if date_diff > 7:
37 | return f"Failed to book hotel {hotel}: Stay duration cannot exceed 7 days."
38 |
39 | return f"Booked hotel {hotel} from {checkin_date.strftime('%Y-%m-%d')} to {checkout_date.strftime('%Y-%m-%d')} successfully."
40 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-gradio-hitl/utils/conversation.py:
--------------------------------------------------------------------------------
1 | conversation_cache = {}
2 |
3 |
4 | def load_conversation(conversation_id: str):
5 | print(f"Loading conversation {conversation_id}")
6 | return conversation_cache.get(conversation_id, {})
7 |
8 |
9 | def save_conversation(conversation_id: str, session: dict):
10 | print(f"Saving conversation {session}")
11 | conversation_cache[conversation_id] = session
12 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-gradio-hitl/utils/format_chat_history.py:
--------------------------------------------------------------------------------
1 | def format_chat_history(history):
2 | """
3 | Format the chat history for LLM
4 |
5 | Args:
6 | history (list): The chat history list, each element contains role and content
7 |
8 | Returns:
9 | str: The formatted chat history string
10 | """
11 | if not history:
12 | return "No history"
13 |
14 | formatted_history = []
15 | for message in history:
16 | role = "user" if message["role"] == "user" else "assistant"
17 | content = message["content"]
18 | # filter out the thinking content
19 | if role == "assistant":
20 | if (
21 | content.startswith("- 🤔")
22 | or content.startswith("- ➡️")
23 | or content.startswith("- ⬅️")
24 | ):
25 | continue
26 | formatted_history.append(f"{role}: {content}")
27 |
28 | return "\n".join(formatted_history)
29 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-hello-world/README.md:
--------------------------------------------------------------------------------
1 | # PocketFlow Hello World
2 |
3 | Your first PocketFlow application! This simple example demonstrates how to create a basic PocketFlow app from scratch.
4 |
5 | ## Project Structure
6 |
7 | ```
8 | .
9 | ├── docs/ # Documentation files
10 | ├── utils/ # Utility functions
11 | ├── flow.py # PocketFlow implementation
12 | ├── main.py # Main application entry point
13 | └── README.md # Project documentation
14 | ```
15 |
16 | ## Setup
17 |
18 | 1. Create a virtual environment:
19 | ```bash
20 | python -m venv venv
21 | source venv/bin/activate # On Windows: venv\Scripts\activate
22 | ```
23 |
24 | 2. Install dependencies:
25 | ```bash
26 | pip install -r requirements.txt
27 | ```
28 |
29 | 3. Run the example:
30 | ```bash
31 | python main.py
32 | ```
33 |
34 | ## What This Example Demonstrates
35 |
36 | - How to create your first PocketFlow application
37 | - Basic PocketFlow concepts and usage
38 | - Simple example of PocketFlow's capabilities
39 |
40 | ## Additional Resources
41 |
42 | - [PocketFlow Documentation](https://the-pocket.github.io/PocketFlow/)
--------------------------------------------------------------------------------
/cookbook/pocketflow-hello-world/docs/design.md:
--------------------------------------------------------------------------------
1 | # Your Project Title
2 |
3 | ## Project Requirements
4 | A description of the project requirements.
5 |
6 | ## Utility Functions
7 |
8 | 1. **Call LLM** (`utils/call_llm.py`)
9 |
10 | ## Flow Design
11 |
12 | 1. **First Node**
13 | 2. **Second Node**
14 | 3. **Third Node**
15 |
16 | ### Flow Diagram
17 |
18 | ```mermaid
19 | flowchart TD
20 | firstNode[First Node] --> secondNode[Second Node]
21 | secondNode --> thirdNode[Third Node]
22 | ```
23 |
24 | ## Data Structure
25 |
26 | The shared memory structure will be organized as follows:
27 |
28 | ```python
29 | shared = {
30 | "key": "value"
31 | }
32 | ```
33 |
34 | ## Node Designs
35 |
36 | ### 1. First Node
37 | - **Purpose**: What the node does
38 | - **Design**: Regular Node (no Batch/Async)
39 | - **Data Access**:
40 | - Read: "key" from shared store
41 | - Write: "key" to shared store
42 |
43 | ### 2. Second Node
44 | ...
45 |
46 | ### 3. Third Node
47 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-hello-world/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Node, Flow
2 | from utils.call_llm import call_llm
3 |
4 | # An example node and flow
5 | # Please replace this with your own node and flow
6 | class AnswerNode(Node):
7 | def prep(self, shared):
8 | # Read question from shared
9 | return shared["question"]
10 |
11 | def exec(self, question):
12 | return call_llm(question)
13 |
14 | def post(self, shared, prep_res, exec_res):
15 | # Store the answer in shared
16 | shared["answer"] = exec_res
17 |
18 | answer_node = AnswerNode()
19 | qa_flow = Flow(start=answer_node)
--------------------------------------------------------------------------------
/cookbook/pocketflow-hello-world/main.py:
--------------------------------------------------------------------------------
1 | from flow import qa_flow
2 |
3 | # Example main function
4 | # Please replace this with your own main function
5 | def main():
6 | shared = {
7 | "question": "In one sentence, what's the end of universe?",
8 | "answer": None
9 | }
10 |
11 | qa_flow.run(shared)
12 | print("Question:", shared["question"])
13 | print("Answer:", shared["answer"])
14 |
15 | if __name__ == "__main__":
16 | main()
--------------------------------------------------------------------------------
/cookbook/pocketflow-hello-world/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-hello-world/utils/__init__.py
--------------------------------------------------------------------------------
/cookbook/pocketflow-hello-world/utils/call_llm.py:
--------------------------------------------------------------------------------
1 | from openai import OpenAI
2 |
3 | def call_llm(prompt):
4 | client = OpenAI(api_key="YOUR_API_KEY_HERE")
5 | r = client.chat.completions.create(
6 | model="gpt-4o",
7 | messages=[{"role": "user", "content": prompt}]
8 | )
9 | return r.choices[0].message.content
10 |
11 | if __name__ == "__main__":
12 | prompt = "What is the meaning of life?"
13 | print(call_llm(prompt))
--------------------------------------------------------------------------------
/cookbook/pocketflow-llm-streaming/README.md:
--------------------------------------------------------------------------------
1 | # LLM Streaming and Interruption
2 |
3 | Demonstrates real-time LLM response streaming with user interrupt capability.
4 |
5 | - Check out the [Substack Post Tutorial](https://zacharyhuang.substack.com/p/streaming-llm-responses-tutorial) for more!
6 |
7 | ## Features
8 |
9 | - Real-time display of LLM responses as they're generated
10 | - User interrupt with ENTER key at any time
11 |
12 | ## Run It
13 |
14 | ```bash
15 | pip install -r requirements.txt
16 | python main.py
17 | ```
18 |
19 | ## How It Works
20 |
21 | StreamNode:
22 | 1. Creates interrupt listener thread
23 | 2. Fetches content chunks from LLM
24 | 3. Displays chunks in real-time
25 | 4. Handles user interruption
26 |
27 | ## API Key
28 |
29 | By default, demo uses fake streaming responses. To use real OpenAI streaming:
30 |
31 | 1. Edit main.py to replace the fake_stream_llm with stream_llm:
32 | ```python
33 | # Change this line:
34 | chunks = fake_stream_llm(prompt)
35 | # To this:
36 | chunks = stream_llm(prompt)
37 | ```
38 |
39 | 2. Make sure your OpenAI API key is set:
40 | ```bash
41 | export OPENAI_API_KEY="your-api-key-here"
42 | ```
43 |
44 | ## Files
45 |
46 | - `main.py`: StreamNode implementation
47 | - `utils.py`: Real and fake LLM streaming functions
48 |
49 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-llm-streaming/main.py:
--------------------------------------------------------------------------------
1 | import time
2 | import threading
3 | from pocketflow import Node, Flow
4 | from utils import fake_stream_llm, stream_llm
5 |
6 | class StreamNode(Node):
7 | def prep(self, shared):
8 | # Create interrupt event
9 | interrupt_event = threading.Event()
10 |
11 | # Start a thread to listen for user interrupt
12 | def wait_for_interrupt():
13 | input("Press ENTER at any time to interrupt streaming...\n")
14 | interrupt_event.set()
15 | listener_thread = threading.Thread(target=wait_for_interrupt)
16 | listener_thread.start()
17 |
18 | # Get prompt from shared store
19 | prompt = shared["prompt"]
20 | # Get chunks from LLM function
21 | chunks = stream_llm(prompt)
22 | return chunks, interrupt_event, listener_thread
23 |
24 | def exec(self, prep_res):
25 | chunks, interrupt_event, listener_thread = prep_res
26 | for chunk in chunks:
27 | if interrupt_event.is_set():
28 | print("User interrupted streaming.")
29 | break
30 |
31 | if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content is not None:
32 | chunk_content = chunk.choices[0].delta.content
33 | print(chunk_content, end="", flush=True)
34 | time.sleep(0.1) # simulate latency
35 | return interrupt_event, listener_thread
36 |
37 | def post(self, shared, prep_res, exec_res):
38 | interrupt_event, listener_thread = exec_res
39 | # Join the interrupt listener so it doesn't linger
40 | interrupt_event.set()
41 | listener_thread.join()
42 | return "default"
43 |
44 | # Usage:
45 | node = StreamNode()
46 | flow = Flow(start=node)
47 |
48 | shared = {"prompt": "What's the meaning of life?"}
49 | flow.run(shared)
50 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-majority-vote/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.1
2 | anthropic>=0.15.0
3 | pyyaml>=6.0
--------------------------------------------------------------------------------
/cookbook/pocketflow-majority-vote/utils.py:
--------------------------------------------------------------------------------
1 | from anthropic import Anthropic
2 | import os
3 |
4 | def call_llm(prompt):
5 | client = Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY", "your-api-key"))
6 | response = client.messages.create(
7 | model="claude-3-7-sonnet-20250219",
8 | max_tokens=10000,
9 | messages=[
10 | {"role": "user", "content": prompt}
11 | ]
12 | )
13 | return response.content[0].text
14 |
15 | if __name__ == "__main__":
16 | print("## Testing call_llm")
17 | prompt = "In a few words, what is the meaning of life?"
18 | print(f"## Prompt: {prompt}")
19 | response = call_llm(prompt)
20 | print(f"## Response: {response}")
--------------------------------------------------------------------------------
/cookbook/pocketflow-map-reduce/data/resume1.txt:
--------------------------------------------------------------------------------
1 | John Smith
2 | Software Engineer
3 |
4 | Education:
5 | - Master of Computer Science, Stanford University, 2018
6 | - Bachelor of Computer Science, MIT, 2016
7 |
8 | Experience:
9 | - Senior Software Engineer, Google, 2019-present
10 | * Led the development of cloud infrastructure projects
11 | * Implemented scalable solutions using Kubernetes and Docker
12 | * Reduced system latency by 40% through optimization
13 |
14 | - Software Developer, Microsoft, 2016-2019
15 | * Worked on Azure cloud services
16 | * Built RESTful APIs for enterprise solutions
17 |
18 | Skills:
19 | - Programming: Python, Java, C++, JavaScript
20 | - Technologies: Docker, Kubernetes, AWS, Azure
21 | - Tools: Git, Jenkins, Jira
22 |
23 | Projects:
24 | - Developed a recommendation engine that increased user engagement by 25%
25 | - Created a sentiment analysis tool using NLP techniques
--------------------------------------------------------------------------------
/cookbook/pocketflow-map-reduce/data/resume2.txt:
--------------------------------------------------------------------------------
1 | Emily Johnson
2 | Data Scientist
3 |
4 | Education:
5 | - Ph.D. in Statistics, UC Berkeley, 2020
6 | - Master of Science in Mathematics, UCLA, 2016
7 |
8 | Experience:
9 | - Data Scientist, Netflix, 2020-present
10 | * Developed machine learning models for content recommendation
11 | * Implemented A/B testing frameworks to optimize user experience
12 | * Collaborated with product teams to define metrics and KPIs
13 |
14 | - Data Analyst, Amazon, 2016-2020
15 | * Analyzed user behavior patterns to improve conversion rates
16 | * Created dashboards and visualizations for executive decision-making
17 |
18 | Skills:
19 | - Programming: R, Python, SQL
20 | - Machine Learning: TensorFlow, PyTorch, scikit-learn
21 | - Data Visualization: Tableau, PowerBI, matplotlib
22 |
23 | Publications:
24 | - "Advances in Recommendation Systems" - Journal of Machine Learning, 2021
25 | - "Statistical Methods for Big Data" - Conference on Data Science, 2019
--------------------------------------------------------------------------------
/cookbook/pocketflow-map-reduce/data/resume3.txt:
--------------------------------------------------------------------------------
1 | Michael Williams
2 | Marketing Manager
3 |
4 | Education:
5 | - MBA, Harvard Business School, 2015
6 | - Bachelor of Arts in Communications, NYU, 2010
7 |
8 | Experience:
9 | - Marketing Director, Apple, 2018-present
10 | * Managed a team of 15 marketing professionals
11 | * Developed and executed global marketing campaigns
12 | * Increased brand awareness by 30% through digital initiatives
13 |
14 | - Marketing Manager, Coca-Cola, 2015-2018
15 | * Led product launches across North America
16 | * Coordinated with external agencies on advertising campaigns
17 |
18 | Skills:
19 | - Digital Marketing: SEO, SEM, Social Media Marketing
20 | - Analytics: Google Analytics, Adobe Analytics
21 | - Tools: HubSpot, Salesforce, Marketo
22 |
23 | Achievements:
24 | - Marketing Excellence Award, 2020
25 | - Led campaign that won Cannes Lions Award, 2019
--------------------------------------------------------------------------------
/cookbook/pocketflow-map-reduce/data/resume4.txt:
--------------------------------------------------------------------------------
1 | Lisa Chen
2 | Frontend Developer
3 |
4 | Education:
5 | - Bachelor of Fine Arts, Rhode Island School of Design, 2019
6 |
7 | Experience:
8 | - UI/UX Designer, Airbnb, 2020-present
9 | * Designed user interfaces for mobile and web applications
10 | * Created wireframes and prototypes for new features
11 | * Conducted user research and usability testing
12 |
13 | - Junior Designer, Freelance, 2019-2020
14 | * Worked with small businesses on branding and website design
15 | * Developed responsive web designs using HTML, CSS, and JavaScript
16 |
17 | Skills:
18 | - Design: Figma, Sketch, Adobe XD
19 | - Development: HTML, CSS, JavaScript, React
20 | - Tools: Git, Zeplin
21 |
22 | Portfolio Highlights:
23 | - Redesigned checkout flow resulting in 15% conversion increase
24 | - Created custom icon set for mobile application
25 | - Designed responsive email templates
26 |
27 | Certifications:
28 | - UI/UX Design Certificate, Coursera, 2019
--------------------------------------------------------------------------------
/cookbook/pocketflow-map-reduce/data/resume5.txt:
--------------------------------------------------------------------------------
1 | Robert Taylor
2 | Sales Representative
3 |
4 | Education:
5 | - Bachelor of Business Administration, University of Texas, 2017
6 |
7 | Experience:
8 | - Account Executive, Salesforce, 2019-present
9 | * Exceeded sales targets by 25% for three consecutive quarters
10 | * Managed a portfolio of 50+ enterprise clients
11 | * Developed and implemented strategic account plans
12 |
13 | - Sales Associate, Oracle, 2017-2019
14 | * Generated new business opportunities through cold calling
15 | * Assisted senior sales representatives with client presentations
16 |
17 | Skills:
18 | - CRM Systems: Salesforce, HubSpot
19 | - Communication: Negotiation, Public Speaking
20 | - Tools: Microsoft Office Suite, Google Workspace
21 |
22 | Achievements:
23 | - Top Sales Representative Award, Q2 2020
24 | - President's Club, 2021
25 |
26 | Interests:
27 | - Volunteer sales coach for local small businesses
28 | - Member of Toastmasters International
--------------------------------------------------------------------------------
/cookbook/pocketflow-map-reduce/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import ReadResumesNode, EvaluateResumesNode, ReduceResultsNode
3 |
4 | def create_resume_processing_flow():
5 | """Create a map-reduce flow for processing resumes."""
6 | # Create nodes
7 | read_resumes_node = ReadResumesNode()
8 | evaluate_resumes_node = EvaluateResumesNode()
9 | reduce_results_node = ReduceResultsNode()
10 |
11 | # Connect nodes
12 | read_resumes_node >> evaluate_resumes_node >> reduce_results_node
13 |
14 | # Create flow
15 | return Flow(start=read_resumes_node)
--------------------------------------------------------------------------------
/cookbook/pocketflow-map-reduce/main.py:
--------------------------------------------------------------------------------
1 | from flow import create_resume_processing_flow
2 |
3 | def main():
4 | # Initialize shared store
5 | shared = {}
6 |
7 | # Create the resume processing flow
8 | resume_flow = create_resume_processing_flow()
9 |
10 | # Run the flow
11 | print("Starting resume qualification processing...")
12 | resume_flow.run(shared)
13 |
14 | # Display final summary information (additional to what's already printed in ReduceResultsNode)
15 | if "summary" in shared:
16 | print("\nDetailed evaluation results:")
17 | for filename, evaluation in shared.get("evaluations", {}).items():
18 | qualified = "✓" if evaluation.get("qualifies", False) else "✗"
19 | name = evaluation.get("candidate_name", "Unknown")
20 | print(f"{qualified} {name} ({filename})")
21 |
22 | print("\nResume processing complete!")
23 |
24 | if __name__ == "__main__":
25 | main()
--------------------------------------------------------------------------------
/cookbook/pocketflow-map-reduce/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.1
2 | openai>=1.0.0
3 | pyyaml>=6.0
--------------------------------------------------------------------------------
/cookbook/pocketflow-map-reduce/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import OpenAI
3 |
4 | def call_llm(prompt):
5 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
6 | r = client.chat.completions.create(
7 | model="gpt-4o",
8 | messages=[{"role": "user", "content": prompt}]
9 | )
10 | return r.choices[0].message.content
11 |
12 | # Example usage
13 | if __name__ == "__main__":
14 | print(call_llm("Tell me a short joke"))
--------------------------------------------------------------------------------
/cookbook/pocketflow-mcp/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.1
2 | openai>=1.0.0
3 | fastmcp
4 | pyyaml
5 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-mcp/simple_server.py:
--------------------------------------------------------------------------------
1 | from fastmcp import FastMCP
2 |
3 | # Create a named server
4 | mcp = FastMCP("Math Operations Server")
5 |
6 | # Define mathematical operation tools
7 | @mcp.tool()
8 | def add(a: int, b: int) -> int:
9 | """Add two numbers together"""
10 | return a + b
11 |
12 | @mcp.tool()
13 | def subtract(a: int, b: int) -> int:
14 | """Subtract b from a"""
15 | return a - b
16 |
17 | @mcp.tool()
18 | def multiply(a: int, b: int) -> int:
19 | """Multiply two numbers together"""
20 | return a * b
21 |
22 | @mcp.tool()
23 | def divide(a: int, b: int) -> float:
24 | """Divide a by b"""
25 | if b == 0:
26 | raise ValueError("Division by zero is not allowed")
27 | return a / b
28 |
29 | # Start the server
30 | if __name__ == "__main__":
31 | mcp.run()
--------------------------------------------------------------------------------
/cookbook/pocketflow-multi-agent/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.1
2 | openai>=1.0.0
3 | pyyaml>=6.0
--------------------------------------------------------------------------------
/cookbook/pocketflow-multi-agent/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import OpenAI
3 |
4 | def call_llm(prompt):
5 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
6 | r = client.chat.completions.create(
7 | model="gpt-4o-mini",
8 | messages=[{"role": "user", "content": prompt}]
9 | )
10 | return r.choices[0].message.content
11 |
12 | # Example usage
13 | if __name__ == "__main__":
14 | print(call_llm("Tell me a short joke"))
--------------------------------------------------------------------------------
/cookbook/pocketflow-nested-batch/README.md:
--------------------------------------------------------------------------------
1 | # PocketFlow Nested BatchFlow Example
2 |
3 | This example demonstrates Nested BatchFlow using a simple school grades calculator.
4 |
5 | ## What this Example Does
6 |
7 | Calculates average grades for:
8 | 1. Each student in a class
9 | 2. Each class in the school
10 |
11 | ## Structure
12 | ```
13 | school/
14 | ├── class_a/
15 | │ ├── student1.txt (grades: 7.5, 8.0, 9.0)
16 | │ └── student2.txt (grades: 8.5, 7.0, 9.5)
17 | └── class_b/
18 | ├── student3.txt (grades: 6.5, 8.5, 7.0)
19 | └── student4.txt (grades: 9.0, 9.5, 8.0)
20 | ```
21 |
22 | ## How it Works
23 |
24 | 1. **Outer BatchFlow (SchoolBatchFlow)**
25 | - Processes each class folder
26 | - Returns parameters like: `{"class": "class_a"}`
27 |
28 | 2. **Inner BatchFlow (ClassBatchFlow)**
29 | - Processes each student file in a class
30 | - Returns parameters like: `{"student": "student1.txt"}`
31 |
32 | 3. **Base Flow**
33 | - Loads student grades
34 | - Calculates average
35 | - Saves result
36 |
37 | ## Running the Example
38 |
39 | ```bash
40 | pip install -r requirements.txt
41 | python main.py
42 | ```
43 |
44 | ## Expected Output
45 |
46 | ```
47 | Processing class_a...
48 | - student1: Average = 8.2
49 | - student2: Average = 8.3
50 | Class A Average: 8.25
51 |
52 | Processing class_b...
53 | - student3: Average = 7.3
54 | - student4: Average = 8.8
55 | Class B Average: 8.05
56 |
57 | School Average: 8.15
58 | ```
59 |
60 | ## Key Concepts
61 |
62 | 1. **Nested BatchFlow**: One BatchFlow inside another
63 | 2. **Parameter Inheritance**: Inner flow gets parameters from outer flow
64 | 3. **Hierarchical Processing**: Process data in a tree-like structure
--------------------------------------------------------------------------------
/cookbook/pocketflow-nested-batch/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | from flow import create_flow
3 |
4 | def create_sample_data():
5 | """Create sample grade files."""
6 | # Create directory structure
7 | os.makedirs("school/class_a", exist_ok=True)
8 | os.makedirs("school/class_b", exist_ok=True)
9 |
10 | # Sample grades
11 | data = {
12 | "class_a": {
13 | "student1.txt": [7.5, 8.0, 9.0],
14 | "student2.txt": [8.5, 7.0, 9.5]
15 | },
16 | "class_b": {
17 | "student3.txt": [6.5, 8.5, 7.0],
18 | "student4.txt": [9.0, 9.5, 8.0]
19 | }
20 | }
21 |
22 | # Create files
23 | for class_name, students in data.items():
24 | for student, grades in students.items():
25 | file_path = os.path.join("school", class_name, student)
26 | with open(file_path, 'w') as f:
27 | for grade in grades:
28 | f.write(f"{grade}\n")
29 |
30 | def main():
31 | """Run the nested batch example."""
32 | # Create sample data
33 | create_sample_data()
34 |
35 | print("Processing school grades...\n")
36 |
37 | # Create and run flow
38 | flow = create_flow()
39 | flow.run({})
40 |
41 | if __name__ == "__main__":
42 | main()
--------------------------------------------------------------------------------
/cookbook/pocketflow-nested-batch/nodes.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pocketflow import Node
3 |
4 | class LoadGrades(Node):
5 | """Node that loads grades from a student's file."""
6 |
7 | def prep(self, shared):
8 | """Get file path from parameters."""
9 | class_name = self.params["class"]
10 | student_file = self.params["student"]
11 | return os.path.join("school", class_name, student_file)
12 |
13 | def exec(self, file_path):
14 | """Load and parse grades from file."""
15 | with open(file_path, 'r') as f:
16 | # Each line is a grade
17 | grades = [float(line.strip()) for line in f]
18 | return grades
19 |
20 | def post(self, shared, prep_res, grades):
21 | """Store grades in shared store."""
22 | shared["grades"] = grades
23 | return "calculate"
24 |
25 | class CalculateAverage(Node):
26 | """Node that calculates average grade."""
27 |
28 | def prep(self, shared):
29 | """Get grades from shared store."""
30 | return shared["grades"]
31 |
32 | def exec(self, grades):
33 | """Calculate average."""
34 | return sum(grades) / len(grades)
35 |
36 | def post(self, shared, prep_res, average):
37 | """Store and print result."""
38 | # Store in results dictionary
39 | if "results" not in shared:
40 | shared["results"] = {}
41 |
42 | class_name = self.params["class"]
43 | student = self.params["student"]
44 |
45 | if class_name not in shared["results"]:
46 | shared["results"][class_name] = {}
47 |
48 | shared["results"][class_name][student] = average
49 |
50 | # Print individual result
51 | print(f"- {student}: Average = {average:.1f}")
52 | return "default"
--------------------------------------------------------------------------------
/cookbook/pocketflow-nested-batch/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow
--------------------------------------------------------------------------------
/cookbook/pocketflow-nested-batch/school/class_a/student1.txt:
--------------------------------------------------------------------------------
1 | 7.5
2 | 8.0
3 | 9.0
4 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-nested-batch/school/class_a/student2.txt:
--------------------------------------------------------------------------------
1 | 8.5
2 | 7.0
3 | 9.5
4 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-nested-batch/school/class_b/student3.txt:
--------------------------------------------------------------------------------
1 | 6.5
2 | 8.5
3 | 7.0
4 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-nested-batch/school/class_b/student4.txt:
--------------------------------------------------------------------------------
1 | 9.0
2 | 9.5
3 | 8.0
4 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-node/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Node, Flow
2 | from utils.call_llm import call_llm
3 |
4 | class Summarize(Node):
5 | def prep(self, shared):
6 | """Read and preprocess data from shared store."""
7 | return shared["data"]
8 |
9 | def exec(self, prep_res):
10 | """Execute the summarization using LLM."""
11 | if not prep_res:
12 | return "Empty text"
13 | prompt = f"Summarize this text in 10 words: {prep_res}"
14 | summary = call_llm(prompt) # might fail
15 | return summary
16 |
17 | def exec_fallback(self, shared, prep_res, exc):
18 | """Provide a simple fallback instead of crashing."""
19 | return "There was an error processing your request."
20 |
21 | def post(self, shared, prep_res, exec_res):
22 | """Store the summary in shared store."""
23 | shared["summary"] = exec_res
24 | # Return "default" by not returning
25 |
26 | # Create the flow
27 | summarize_node = Summarize(max_retries=3)
28 | flow = Flow(start=summarize_node)
--------------------------------------------------------------------------------
/cookbook/pocketflow-node/main.py:
--------------------------------------------------------------------------------
1 | from flow import flow
2 |
3 | def main():
4 | # Example text to summarize
5 | text = """
6 | PocketFlow is a minimalist LLM framework that models workflows as a Nested Directed Graph.
7 | Nodes handle simple LLM tasks, connecting through Actions for Agents.
8 | Flows orchestrate these nodes for Task Decomposition, and can be nested.
9 | It also supports Batch processing and Async execution.
10 | """
11 |
12 | # Initialize shared store
13 | shared = {"data": text}
14 |
15 | # Run the flow
16 | flow.run(shared)
17 |
18 | # Print result
19 | print("\nInput text:", text)
20 | print("\nSummary:", shared["summary"])
21 |
22 | if __name__ == "__main__":
23 | main()
--------------------------------------------------------------------------------
/cookbook/pocketflow-node/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow
2 | openai>=1.0.0
--------------------------------------------------------------------------------
/cookbook/pocketflow-node/utils/call_llm.py:
--------------------------------------------------------------------------------
1 | from openai import OpenAI
2 |
3 | def call_llm(prompt):
4 | client = OpenAI(api_key="YOUR_API_KEY_HERE")
5 | r = client.chat.completions.create(
6 | model="gpt-4o",
7 | messages=[{"role": "user", "content": prompt}]
8 | )
9 | return r.choices[0].message.content
10 |
11 | if __name__ == "__main__":
12 | prompt = "What is the meaning of life?"
13 | print(call_llm(prompt))
--------------------------------------------------------------------------------
/cookbook/pocketflow-parallel-batch-flow/README.md:
--------------------------------------------------------------------------------
1 | # Parallel Image Processor
2 |
3 | Demonstrates how AsyncParallelBatchFlow processes multiple images with multiple filters >8x faster than sequential processing.
4 |
5 | ## Features
6 |
7 | ```mermaid
8 | graph TD
9 | subgraph AsyncParallelBatchFlow[Image Processing Flow]
10 | subgraph AsyncFlow[Per Image-Filter Flow]
11 | A[Load Image] --> B[Apply Filter]
12 | B --> C[Save Image]
13 | end
14 | end
15 | ```
16 |
17 | - Processes images with multiple filters in parallel
18 | - Applies three different filters (grayscale, blur, sepia)
19 | - Shows significant speed improvement over sequential processing
20 | - Manages system resources with semaphores
21 |
22 | ## Run It
23 |
24 | ```bash
25 | pip install -r requirements.txt
26 | python main.py
27 | ```
28 |
29 | ## Output
30 |
31 | ```=== Processing Images in Parallel ===
32 | Parallel Image Processor
33 | ------------------------------
34 | Found 3 images:
35 | - images/bird.jpg
36 | - images/cat.jpg
37 | - images/dog.jpg
38 |
39 | Running sequential batch flow...
40 | Processing 3 images with 3 filters...
41 | Total combinations: 9
42 | Loading image: images/bird.jpg
43 | Applying grayscale filter...
44 | Saved: output/bird_grayscale.jpg
45 | ...etc
46 |
47 | Timing Results:
48 | Sequential batch processing: 13.76 seconds
49 | Parallel batch processing: 1.71 seconds
50 | Speedup: 8.04x
51 |
52 | Processing complete! Check the output/ directory for results.
53 | ```
54 |
55 | ## Key Points
56 |
57 | - **Sequential**: Total time = sum of all item times
58 | - Good for: Rate-limited APIs, maintaining order
59 |
60 | - **Parallel**: Total time ≈ longest single item time
61 | - Good for: I/O-bound tasks, independent operations
62 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-parallel-batch-flow/images/bird.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-parallel-batch-flow/images/bird.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-parallel-batch-flow/images/cat.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-parallel-batch-flow/images/cat.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-parallel-batch-flow/images/dog.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-parallel-batch-flow/images/dog.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-parallel-batch-flow/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import asyncio
3 | import time
4 | from flow import create_flows
5 |
6 | def get_image_paths():
7 | """Get paths of existing images in the images directory."""
8 | images_dir = "images"
9 | if not os.path.exists(images_dir):
10 | raise ValueError(f"Directory '{images_dir}' not found!")
11 |
12 | # List all jpg files in the images directory
13 | image_paths = []
14 | for filename in os.listdir(images_dir):
15 | if filename.lower().endswith(('.jpg', '.jpeg', '.png')):
16 | image_paths.append(os.path.join(images_dir, filename))
17 |
18 | if not image_paths:
19 | raise ValueError(f"No images found in '{images_dir}' directory!")
20 |
21 | print(f"Found {len(image_paths)} images:")
22 | for path in image_paths:
23 | print(f"- {path}")
24 |
25 | return image_paths
26 |
27 | async def main():
28 | """Run the parallel image processing example."""
29 | print("Parallel Image Processor")
30 | print("-" * 30)
31 |
32 | # Get existing image paths
33 | image_paths = get_image_paths()
34 |
35 | # Create shared store with image paths
36 | shared = {"images": image_paths}
37 |
38 | # Create both flows
39 | batch_flow, parallel_batch_flow = create_flows()
40 |
41 | # Run and time batch flow
42 | start_time = time.time()
43 | print("\nRunning sequential batch flow...")
44 | await batch_flow.run_async(shared)
45 | batch_time = time.time() - start_time
46 |
47 | # Run and time parallel batch flow
48 | start_time = time.time()
49 | print("\nRunning parallel batch flow...")
50 | await parallel_batch_flow.run_async(shared)
51 | parallel_time = time.time() - start_time
52 |
53 | # Print timing results
54 | print("\nTiming Results:")
55 | print(f"Sequential batch processing: {batch_time:.2f} seconds")
56 | print(f"Parallel batch processing: {parallel_time:.2f} seconds")
57 | print(f"Speedup: {batch_time/parallel_time:.2f}x")
58 |
59 | print("\nProcessing complete! Check the output/ directory for results.")
60 |
61 | if __name__ == "__main__":
62 | asyncio.run(main())
--------------------------------------------------------------------------------
/cookbook/pocketflow-parallel-batch-flow/output/bird_blur.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-parallel-batch-flow/output/bird_blur.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-parallel-batch-flow/output/bird_grayscale.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-parallel-batch-flow/output/bird_grayscale.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-parallel-batch-flow/output/bird_sepia.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-parallel-batch-flow/output/bird_sepia.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-parallel-batch-flow/output/cat_blur.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-parallel-batch-flow/output/cat_blur.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-parallel-batch-flow/output/cat_grayscale.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-parallel-batch-flow/output/cat_grayscale.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-parallel-batch-flow/output/cat_sepia.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-parallel-batch-flow/output/cat_sepia.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-parallel-batch-flow/output/dog_blur.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-parallel-batch-flow/output/dog_blur.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-parallel-batch-flow/output/dog_grayscale.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-parallel-batch-flow/output/dog_grayscale.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-parallel-batch-flow/output/dog_sepia.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-parallel-batch-flow/output/dog_sepia.jpg
--------------------------------------------------------------------------------
/cookbook/pocketflow-parallel-batch-flow/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow
2 | Pillow>=10.0.0 # For image processing
3 | numpy>=1.24.0 # For image array operations
--------------------------------------------------------------------------------
/cookbook/pocketflow-parallel-batch/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.2
2 | anthropic>=0.15.0
3 | python-dotenv
4 | httpx
5 | aiofiles
--------------------------------------------------------------------------------
/cookbook/pocketflow-parallel-batch/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import asyncio
3 | from anthropic import AsyncAnthropic
4 |
5 | # Async version of the simple wrapper, using Anthropic
6 | async def call_llm(prompt):
7 | """Async wrapper for Anthropic API call."""
8 | client = AsyncAnthropic(api_key=os.environ.get("ANTHROPIC_API_KEY", "your-api-key"))
9 | response = await client.messages.create(
10 | model="claude-3-7-sonnet-20250219",
11 | max_tokens=20000,
12 | thinking={
13 | "type": "enabled",
14 | "budget_tokens": 16000
15 | },
16 | messages=[
17 | {"role": "user", "content": prompt}
18 | ],
19 | )
20 | return response.content[1].text
21 |
22 | if __name__ == "__main__":
23 | async def run_test():
24 | print("## Testing async call_llm with Anthropic")
25 | prompt = "In a few words, what is the meaning of life?"
26 | print(f"## Prompt: {prompt}")
27 | response = await call_llm(prompt)
28 | print(f"## Response: {response}")
29 |
30 | asyncio.run(run_test())
--------------------------------------------------------------------------------
/cookbook/pocketflow-rag/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import EmbedDocumentsNode, CreateIndexNode, EmbedQueryNode, RetrieveDocumentNode, ChunkDocumentsNode, GenerateAnswerNode
3 |
4 | def get_offline_flow():
5 | # Create offline flow for document indexing
6 | chunk_docs_node = ChunkDocumentsNode()
7 | embed_docs_node = EmbedDocumentsNode()
8 | create_index_node = CreateIndexNode()
9 |
10 | # Connect the nodes
11 | chunk_docs_node >> embed_docs_node >> create_index_node
12 |
13 | offline_flow = Flow(start=chunk_docs_node)
14 | return offline_flow
15 |
16 | def get_online_flow():
17 | # Create online flow for document retrieval and answer generation
18 | embed_query_node = EmbedQueryNode()
19 | retrieve_doc_node = RetrieveDocumentNode()
20 | generate_answer_node = GenerateAnswerNode()
21 |
22 | # Connect the nodes
23 | embed_query_node >> retrieve_doc_node >> generate_answer_node
24 |
25 | online_flow = Flow(start=embed_query_node)
26 | return online_flow
27 |
28 | # Initialize flows
29 | offline_flow = get_offline_flow()
30 | online_flow = get_online_flow()
--------------------------------------------------------------------------------
/cookbook/pocketflow-rag/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.1
2 | numpy>=1.20.0
3 | faiss-cpu>=1.7.0
4 | openai>=1.0.0
--------------------------------------------------------------------------------
/cookbook/pocketflow-rag/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | from openai import OpenAI
4 |
5 | def call_llm(prompt):
6 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
7 | r = client.chat.completions.create(
8 | model="gpt-4o",
9 | messages=[{"role": "user", "content": prompt}]
10 | )
11 | return r.choices[0].message.content
12 |
13 | def get_embedding(text):
14 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
15 |
16 | response = client.embeddings.create(
17 | model="text-embedding-ada-002",
18 | input=text
19 | )
20 |
21 | # Extract the embedding vector from the response
22 | embedding = response.data[0].embedding
23 |
24 | # Convert to numpy array for consistency with other embedding functions
25 | return np.array(embedding, dtype=np.float32)
26 |
27 | def fixed_size_chunk(text, chunk_size=2000):
28 | chunks = []
29 | for i in range(0, len(text), chunk_size):
30 | chunks.append(text[i : i + chunk_size])
31 | return chunks
32 |
33 | if __name__ == "__main__":
34 | print("=== Testing call_llm ===")
35 | prompt = "In a few words, what is the meaning of life?"
36 | print(f"Prompt: {prompt}")
37 | response = call_llm(prompt)
38 | print(f"Response: {response}")
39 |
40 | print("=== Testing embedding function ===")
41 |
42 | text1 = "The quick brown fox jumps over the lazy dog."
43 | text2 = "Python is a popular programming language for data science."
44 |
45 | oai_emb1 = get_embedding(text1)
46 | oai_emb2 = get_embedding(text2)
47 | print(f"OpenAI Embedding 1 shape: {oai_emb1.shape}")
48 | oai_similarity = np.dot(oai_emb1, oai_emb2)
49 | print(f"OpenAI similarity between texts: {oai_similarity:.4f}")
--------------------------------------------------------------------------------
/cookbook/pocketflow-streamlit-fsm/README.md:
--------------------------------------------------------------------------------
1 | # PocketFlow Streamlit Image Generation HITL
2 |
3 | Human-in-the-Loop (HITL) image generation application using PocketFlow and Streamlit. Enter text prompts, generate images with OpenAI, and approve/regenerate results.
4 |
5 |
6 |
9 |
10 |
11 | ## Features
12 |
13 | - **Image Generation:** Uses OpenAI's `gpt-image-1` model to generate images from text prompts
14 | - **Human Review:** Interactive interface to approve or regenerate images
15 | - **State Machine:** Clean state-based workflow (`initial_input` → `user_feedback` → `final`)
16 | - **PocketFlow Integration:** Uses PocketFlow `Node` and `Flow` for image generation with built-in retries
17 | - **Session State Management:** Streamlit session state acts as PocketFlow's shared store
18 | - **In-Memory Images:** Images stored as base64 strings, no disk storage required
19 |
20 | ## How to Run
21 |
22 | 1. **Set OpenAI API Key:**
23 | ```bash
24 | export OPENAI_API_KEY="your-openai-api-key"
25 | ```
26 |
27 | 2. **Install Dependencies:**
28 | ```bash
29 | pip install -r requirements.txt
30 | ```
31 |
32 | 3. **Run the Streamlit Application:**
33 | ```bash
34 | streamlit run app.py
35 | ```
36 |
37 | 4. **Access the Web UI:**
38 | Open the URL provided by Streamlit (usually `http://localhost:8501`).
39 |
40 | ## Usage
41 |
42 | 1. **Enter Prompt**: Describe the image you want to generate
43 | 2. **Generate**: Click "Generate Image" to create the image
44 | 3. **Review**: View the generated image and choose:
45 | - **Approve**: Accept the image and move to final result
46 | - **Regenerate**: Generate a new image with the same prompt
47 | 4. **Final**: View approved image and optionally start over
48 |
49 | ## Files
50 |
51 | - [`app.py`](./app.py): Main Streamlit application with state-based UI
52 | - [`nodes.py`](./nodes.py): PocketFlow `GenerateImageNode` definition
53 | - [`flow.py`](./flow.py): PocketFlow `Flow` for image generation
54 | - [`utils/generate_image.py`](./utils/generate_image.py): OpenAI image generation utility
55 | - [`requirements.txt`](./requirements.txt): Project dependencies
56 | - [`docs/design.md`](./docs/design.md): System design documentation
57 | - [`README.md`](./README.md): This file
58 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-streamlit-fsm/assets/banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-streamlit-fsm/assets/banner.png
--------------------------------------------------------------------------------
/cookbook/pocketflow-streamlit-fsm/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import GenerateImageNode
3 |
4 | def create_generation_flow():
5 | """Creates a flow for image generation (initial or regeneration)."""
6 | generate_image_node = GenerateImageNode()
7 | return Flow(start=generate_image_node)
8 |
9 |
10 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-streamlit-fsm/nodes.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Node
2 | from utils.generate_image import generate_image
3 |
4 | class GenerateImageNode(Node):
5 | """Generates image from text prompt using OpenAI API."""
6 |
7 | def prep(self, shared):
8 | return shared.get("task_input", "")
9 |
10 | def exec(self, prompt):
11 | return generate_image(prompt)
12 |
13 | def post(self, shared, prep_res, exec_res):
14 | shared["input_used_by_process"] = prep_res
15 | shared["generated_image"] = exec_res
16 | shared["stage"] = "user_feedback"
17 | return "default"
--------------------------------------------------------------------------------
/cookbook/pocketflow-streamlit-fsm/requirements.txt:
--------------------------------------------------------------------------------
1 | streamlit
2 | pocketflow
3 | openai
4 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-streamlit-fsm/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-streamlit-fsm/utils/__init__.py
--------------------------------------------------------------------------------
/cookbook/pocketflow-streamlit-fsm/utils/generate_image.py:
--------------------------------------------------------------------------------
1 | from openai import OpenAI
2 | import os
3 | import base64
4 |
5 | def generate_image(prompt: str) -> str:
6 | client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
7 |
8 | response = client.images.generate(
9 | model="gpt-image-1",
10 | prompt=prompt,
11 | n=1,
12 | size="1024x1024"
13 | )
14 |
15 | image_b64 = response.data[0].b64_json
16 | print(f"Generated image ({len(image_b64)} chars)")
17 | return image_b64
18 |
19 | if __name__ == "__main__":
20 | test_prompt = "A gray tabby cat hugging an otter with an orange scarf"
21 | print(f"Generating image for prompt: {test_prompt[:50]}...")
22 |
23 | image_base64 = generate_image(test_prompt)
24 | print(f"Success! Generated {len(image_base64)} characters of base64 data")
25 |
26 | # Write image to local file for testing
27 | image_bytes = base64.b64decode(image_base64)
28 | with open("test_generated_image.png", "wb") as f:
29 | f.write(image_bytes)
30 | print("Test image saved as test_generated_image.png")
--------------------------------------------------------------------------------
/cookbook/pocketflow-streamlit-fsm/utils/test_generated_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-streamlit-fsm/utils/test_generated_image.png
--------------------------------------------------------------------------------
/cookbook/pocketflow-structured-output/data.txt:
--------------------------------------------------------------------------------
1 | # JOHN SMTIH
2 |
3 | **Email:** johnsmtih1983@gnail.com
4 | **Phone:** (555) 123-4556
5 | **Address:** 123 Main st, Anytown, USA
6 |
7 | ## PROFFESIONAL SUMMARY
8 |
9 | Dedicated and hardworking professional with over 10 years of exprience in business manegement. Known for finding creatve solutions to complex problems and excelent communication skills. Seeking new opportunites to leverage my expertise in a dynamic environment.
10 |
11 | ## WORK EXPERENCE
12 |
13 | ### SALES MANAGER
14 | **ABC Corportaion** | Anytown, USA | June 2018 - Present
15 | - Oversee a team of 12 sales represenatives and achieve quarterly targets
16 | - Increased departmnet revenue by 24% in fiscal year 2019-2020
17 | - Implemneted new CRM system that improved efficiency by 15%
18 | - Collabarate with Marketing team on product launch campaigns
19 | - Developed training materials for new hiers
20 |
21 | ### ASST. MANAGER
22 | **XYZ Industries** | Somewhere Else, USA | March 2015 - may 2018
23 | - Assisted the Regional Manager in daily operations and reporting
24 | - managed inventory and vendor relations
25 | - Trained and mentored junior staff members
26 | - Recieved "Employee of the Month" award 4 times
27 |
28 | ### CUSTOMER SERVICE REPRESENTATIVE
29 | **Fast Solutions Inc** | Another City, USA | January 2010 - February 2015
30 | * Responded to customer inquiries via phone email, and in-person
31 | * Resolved customer complaints and escalated issues when necessary
32 | * Maintained a 95% customer satsfaction rating
33 |
34 |
35 | ## EDUCATIONS
36 |
37 | **Bachelor of Buisness Administration**
38 | University of Somewhere | 2006 - 2010
39 | GPA: 3.6/4.0
40 |
41 | **Assosiate Degree in Communications**
42 | Community College | 2004-2006
43 |
44 | ## SKILSS
45 |
46 | - Microsoft Office: *Excel, Word, Powerpoint* (Advanced)
47 | - Customer relationship management (CRM) software
48 | - Team leadership & managment
49 | - Project management
50 | - Public speking
51 | - Time managemant
52 |
53 | ## REFERENCES
54 |
55 | Available upon reqeust
56 |
57 | ### OTHER ACTVITIES
58 | - Volunteer at the local food bank (2016-present)
59 | - Member of Toastmasters International
60 | - Enjoy hiking and photografy
--------------------------------------------------------------------------------
/cookbook/pocketflow-structured-output/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.1
2 | openai>=1.0.0
--------------------------------------------------------------------------------
/cookbook/pocketflow-structured-output/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import OpenAI
3 |
4 | def call_llm(prompt):
5 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
6 | r = client.chat.completions.create(
7 | model="gpt-4o",
8 | messages=[{"role": "user", "content": prompt}]
9 | )
10 | return r.choices[0].message.content
11 |
12 | # Example usage
13 | if __name__ == "__main__":
14 | print(call_llm("Tell me a short joke"))
--------------------------------------------------------------------------------
/cookbook/pocketflow-supervisor/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import DecideAction, SearchWeb, UnreliableAnswerNode, SupervisorNode
3 |
4 | def create_agent_inner_flow():
5 | """
6 | Create the inner research agent flow without supervision.
7 |
8 | This flow handles the research cycle:
9 | 1. DecideAction node decides whether to search or answer
10 | 2. If search, go to SearchWeb node and return to decide
11 | 3. If answer, go to UnreliableAnswerNode
12 |
13 | Returns:
14 | Flow: A research agent flow
15 | """
16 | # Create instances of each node
17 | decide = DecideAction()
18 | search = SearchWeb()
19 | answer = UnreliableAnswerNode()
20 |
21 | # Connect the nodes
22 | # If DecideAction returns "search", go to SearchWeb
23 | decide - "search" >> search
24 |
25 | # If DecideAction returns "answer", go to UnreliableAnswerNode
26 | decide - "answer" >> answer
27 |
28 | # After SearchWeb completes and returns "decide", go back to DecideAction
29 | search - "decide" >> decide
30 |
31 | # Create and return the inner flow, starting with the DecideAction node
32 | return Flow(start=decide)
33 |
34 | def create_agent_flow():
35 | """
36 | Create a supervised agent flow by treating the entire agent flow as a node
37 | and placing the supervisor outside of it.
38 |
39 | The flow works like this:
40 | 1. Inner agent flow does research and generates an answer
41 | 2. SupervisorNode checks if the answer is valid
42 | 3. If answer is valid, flow completes
43 | 4. If answer is invalid, restart the inner agent flow
44 |
45 | Returns:
46 | Flow: A complete research agent flow with supervision
47 | """
48 | # Create the inner flow
49 | agent_flow = create_agent_inner_flow()
50 |
51 | # Create the supervisor node
52 | supervisor = SupervisorNode()
53 |
54 | # Connect the components
55 | # After agent_flow completes, go to supervisor
56 | agent_flow >> supervisor
57 |
58 | # If supervisor rejects the answer, go back to agent_flow
59 | supervisor - "retry" >> agent_flow
60 |
61 | # Create and return the outer flow, starting with the agent_flow
62 | return Flow(start=agent_flow)
--------------------------------------------------------------------------------
/cookbook/pocketflow-supervisor/main.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from flow import create_agent_flow
3 |
4 | def main():
5 | """Simple function to process a question with supervised answers."""
6 | # Default question
7 | default_question = "Who won the Nobel Prize in Physics 2024?"
8 |
9 | # Get question from command line if provided with --
10 | question = default_question
11 | for arg in sys.argv[1:]:
12 | if arg.startswith("--"):
13 | question = arg[2:]
14 | break
15 |
16 | # Create the agent flow with supervision
17 | agent_flow = create_agent_flow()
18 |
19 | # Process the question
20 | shared = {"question": question}
21 | print(f"🤔 Processing question: {question}")
22 | agent_flow.run(shared)
23 | print("\n🎯 Final Answer:")
24 | print(shared.get("answer", "No answer found"))
25 |
26 | if __name__ == "__main__":
27 | main()
--------------------------------------------------------------------------------
/cookbook/pocketflow-supervisor/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.1
2 | aiohttp>=3.8.0 # For async HTTP requests
3 | openai>=1.0.0 # For async LLM calls
4 | duckduckgo-search>=7.5.2 # For web search
--------------------------------------------------------------------------------
/cookbook/pocketflow-supervisor/utils.py:
--------------------------------------------------------------------------------
1 | from openai import OpenAI
2 | import os
3 | from duckduckgo_search import DDGS
4 |
5 | def call_llm(prompt):
6 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
7 | r = client.chat.completions.create(
8 | model="gpt-4o",
9 | messages=[{"role": "user", "content": prompt}]
10 | )
11 | return r.choices[0].message.content
12 |
13 | def search_web(query):
14 | results = DDGS().text(query, max_results=5)
15 | # Convert results to a string
16 | results_str = "\n\n".join([f"Title: {r['title']}\nURL: {r['href']}\nSnippet: {r['body']}" for r in results])
17 | return results_str
18 |
19 | if __name__ == "__main__":
20 | print("## Testing call_llm")
21 | prompt = "In a few words, what is the meaning of life?"
22 | print(f"## Prompt: {prompt}")
23 | response = call_llm(prompt)
24 | print(f"## Response: {response}")
25 |
26 | print("## Testing search_web")
27 | query = "Who won the Nobel Prize in Physics 2024?"
28 | print(f"## Query: {query}")
29 | results = search_web(query)
30 | print(f"## Results: {results}")
--------------------------------------------------------------------------------
/cookbook/pocketflow-text2sql/ecommerce.db:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-text2sql/ecommerce.db
--------------------------------------------------------------------------------
/cookbook/pocketflow-text2sql/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow, Node
2 | from nodes import GetSchema, GenerateSQL, ExecuteSQL, DebugSQL
3 |
4 | def create_text_to_sql_flow():
5 | """Creates the text-to-SQL workflow with a debug loop."""
6 | get_schema_node = GetSchema()
7 | generate_sql_node = GenerateSQL()
8 | execute_sql_node = ExecuteSQL()
9 | debug_sql_node = DebugSQL()
10 |
11 | # Define the main flow sequence using the default transition operator
12 | get_schema_node >> generate_sql_node >> execute_sql_node
13 |
14 | # --- Define the debug loop connections using the correct operator ---
15 | # If ExecuteSQL returns "error_retry", go to DebugSQL
16 | execute_sql_node - "error_retry" >> debug_sql_node
17 |
18 | # If DebugSQL returns "default", go back to ExecuteSQL
19 | # debug_sql_node - "default" >> execute_sql_node # Explicitly for "default"
20 | # OR using the shorthand for default:
21 | debug_sql_node >> execute_sql_node
22 |
23 | # Create the flow
24 | text_to_sql_flow = Flow(start=get_schema_node)
25 | return text_to_sql_flow
--------------------------------------------------------------------------------
/cookbook/pocketflow-text2sql/main.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 | from flow import create_text_to_sql_flow
4 | from populate_db import populate_database, DB_FILE
5 |
6 | def run_text_to_sql(natural_query, db_path=DB_FILE, max_debug_retries=3):
7 | if not os.path.exists(db_path) or os.path.getsize(db_path) == 0:
8 | print(f"Database at {db_path} missing or empty. Populating...")
9 | populate_database(db_path)
10 |
11 | shared = {
12 | "db_path": db_path,
13 | "natural_query": natural_query,
14 | "max_debug_attempts": max_debug_retries,
15 | "debug_attempts": 0,
16 | "final_result": None,
17 | "final_error": None
18 | }
19 |
20 | print(f"\n=== Starting Text-to-SQL Workflow ===")
21 | print(f"Query: '{natural_query}'")
22 | print(f"Database: {db_path}")
23 | print(f"Max Debug Retries on SQL Error: {max_debug_retries}")
24 | print("=" * 45)
25 |
26 | flow = create_text_to_sql_flow()
27 | flow.run(shared) # Let errors inside the loop be handled by the flow logic
28 |
29 | # Check final state based on shared data
30 | if shared.get("final_error"):
31 | print("\n=== Workflow Completed with Error ===")
32 | print(f"Error: {shared['final_error']}")
33 | elif shared.get("final_result") is not None:
34 | print("\n=== Workflow Completed Successfully ===")
35 | # Result already printed by ExecuteSQL node
36 | else:
37 | # Should not happen if flow logic is correct and covers all end states
38 | print("\n=== Workflow Completed (Unknown State) ===")
39 |
40 | print("=" * 36)
41 | return shared
42 |
43 | if __name__ == "__main__":
44 | if len(sys.argv) > 1:
45 | query = " ".join(sys.argv[1:])
46 | else:
47 | query = "total products per category"
48 |
49 | run_text_to_sql(query)
--------------------------------------------------------------------------------
/cookbook/pocketflow-text2sql/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.1
2 | openai>=1.0.0
3 | pyyaml>=6.0
4 | sqlite3>=3.0
5 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-text2sql/utils/call_llm.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import OpenAI
3 |
4 | def call_llm(prompt):
5 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
6 | r = client.chat.completions.create(
7 | model="gpt-4o",
8 | messages=[{"role": "user", "content": prompt}]
9 | )
10 | return r.choices[0].message.content
11 |
12 | # Example usage
13 | if __name__ == "__main__":
14 | print(call_llm("Tell me a short joke"))
--------------------------------------------------------------------------------
/cookbook/pocketflow-thinking/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import ChainOfThoughtNode
3 |
4 | def create_chain_of_thought_flow():
5 | # Create a ChainOfThoughtNode
6 | cot_node = ChainOfThoughtNode(max_retries=3, wait=10)
7 |
8 | # Connect the node to itself for the "continue" action
9 | cot_node - "continue" >> cot_node
10 |
11 | # Create the flow
12 | cot_flow = Flow(start=cot_node)
13 | return cot_flow
--------------------------------------------------------------------------------
/cookbook/pocketflow-thinking/main.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from flow import create_chain_of_thought_flow
3 |
4 | def main():
5 | # Default question
6 | default_question = "You keep rolling a fair die until you roll three, four, five in that order consecutively on three rolls. What is the probability that you roll the die an odd number of times?"
7 |
8 | # Get question from command line if provided with --
9 | question = default_question
10 | for arg in sys.argv[1:]:
11 | if arg.startswith("--"):
12 | question = arg[2:]
13 | break
14 |
15 | print(f"🤔 Processing question: {question}")
16 |
17 | # Create the flow
18 | cot_flow = create_chain_of_thought_flow()
19 |
20 | # Set up shared state
21 | shared = {
22 | "problem": question,
23 | "thoughts": [],
24 | "current_thought_number": 0,
25 | "total_thoughts_estimate": 10,
26 | "solution": None
27 | }
28 |
29 | # Run the flow
30 | cot_flow.run(shared)
31 |
32 | if __name__ == "__main__":
33 | main()
--------------------------------------------------------------------------------
/cookbook/pocketflow-thinking/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.1
2 | anthropic>=0.15.0 # For Claude API access
--------------------------------------------------------------------------------
/cookbook/pocketflow-thinking/utils.py:
--------------------------------------------------------------------------------
1 | from anthropic import Anthropic
2 | import os
3 |
4 | def call_llm(prompt):
5 | client = Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY", "your-api-key"))
6 | response = client.messages.create(
7 | model="claude-3-7-sonnet-20250219",
8 | max_tokens=6000,
9 | messages=[
10 | {"role": "user", "content": prompt}
11 | ]
12 | )
13 | return response.content[0].text
14 |
15 | if __name__ == "__main__":
16 | print("## Testing call_llm")
17 | prompt = "In a few words, what is the meaning of life?"
18 | print(f"## Prompt: {prompt}")
19 | response = call_llm(prompt)
20 | print(f"## Response: {response}")
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-crawler/README.md:
--------------------------------------------------------------------------------
1 | # Web Crawler with Content Analysis
2 |
3 | A web crawler tool built with PocketFlow that crawls websites and analyzes content using LLM.
4 |
5 | ## Features
6 |
7 | - Crawls websites while respecting domain boundaries
8 | - Extracts text content and links from pages
9 | - Analyzes content using GPT-4 to generate:
10 | - Page summaries
11 | - Main topics/keywords
12 | - Content type classification
13 | - Processes pages in batches for efficiency
14 | - Generates a comprehensive analysis report
15 |
16 | ## Installation
17 |
18 | 1. Clone the repository
19 | 2. Install dependencies:
20 | ```bash
21 | pip install -r requirements.txt
22 | ```
23 | 3. Set your OpenAI API key:
24 | ```bash
25 | export OPENAI_API_KEY='your-api-key'
26 | ```
27 |
28 | ## Usage
29 |
30 | Run the crawler:
31 | ```bash
32 | python main.py
33 | ```
34 |
35 | You will be prompted to:
36 | 1. Enter the website URL to crawl
37 | 2. Specify maximum number of pages to crawl (default: 10)
38 |
39 | The tool will then:
40 | 1. Crawl the specified website
41 | 2. Extract and analyze content using GPT-4
42 | 3. Generate a report with findings
43 |
44 | ## Project Structure
45 |
46 | ```
47 | pocketflow-tool-crawler/
48 | ├── tools/
49 | │ ├── crawler.py # Web crawling functionality
50 | │ └── parser.py # Content analysis using LLM
51 | ├── utils/
52 | │ └── call_llm.py # LLM API wrapper
53 | ├── nodes.py # PocketFlow nodes
54 | ├── flow.py # Flow configuration
55 | ├── main.py # Main script
56 | └── requirements.txt # Dependencies
57 | ```
58 |
59 | ## Limitations
60 |
61 | - Only crawls within the same domain
62 | - Text content only (no images/media)
63 | - Rate limited by OpenAI API
64 | - Basic error handling
65 |
66 | ## Dependencies
67 |
68 | - pocketflow: Flow-based processing
69 | - requests: HTTP requests
70 | - beautifulsoup4: HTML parsing
71 | - openai: GPT-4 API access
72 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-crawler/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import CrawlWebsiteNode, AnalyzeContentBatchNode, GenerateReportNode
3 |
4 | def create_flow() -> Flow:
5 | """Create and configure the crawling flow
6 |
7 | Returns:
8 | Flow: Configured flow ready to run
9 | """
10 | # Create nodes
11 | crawl = CrawlWebsiteNode()
12 | analyze = AnalyzeContentBatchNode()
13 | report = GenerateReportNode()
14 |
15 | # Connect nodes
16 | crawl >> analyze >> report
17 |
18 | # Create flow starting with crawl
19 | return Flow(start=crawl)
20 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-crawler/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | from flow import create_flow
3 |
4 | def main():
5 | """Run the web crawler flow"""
6 |
7 | # Get website URL from user
8 | url = input("Enter website URL to crawl (e.g., https://example.com): ")
9 | if not url:
10 | print("Error: URL is required")
11 | return
12 |
13 | # Initialize shared data
14 | shared = {
15 | "base_url": url,
16 | "max_pages": 1
17 | }
18 |
19 | # Create and run flow
20 | flow = create_flow()
21 | flow.run(shared)
22 |
23 | # Results are in shared["report"]
24 |
25 | if __name__ == "__main__":
26 | main()
27 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-crawler/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.1.0
2 | requests>=2.31.0
3 | beautifulsoup4>=4.12.0
4 | openai>=1.0.0 # for content analysis
5 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-crawler/tools/parser.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List
2 | from utils.call_llm import call_llm
3 |
4 | def analyze_content(content: Dict) -> Dict:
5 | """Analyze webpage content using LLM
6 |
7 | Args:
8 | content (Dict): Webpage content with url, title and text
9 |
10 | Returns:
11 | Dict: Analysis results including summary and topics
12 | """
13 | prompt = f"""
14 | Analyze this webpage content:
15 |
16 | Title: {content['title']}
17 | URL: {content['url']}
18 | Content: {content['text'][:2000]} # Limit content length
19 |
20 | Please provide:
21 | 1. A brief summary (2-3 sentences)
22 | 2. Main topics/keywords (up to 5)
23 | 3. Content type (article, product page, etc)
24 |
25 | Output in YAML format:
26 | ```yaml
27 | summary: >
28 | brief summary here
29 | topics:
30 | - topic 1
31 | - topic 2
32 | content_type: type here
33 | ```
34 | """
35 |
36 | try:
37 | response = call_llm(prompt)
38 | # Extract YAML between code fences
39 | yaml_str = response.split("```yaml")[1].split("```")[0].strip()
40 |
41 | import yaml
42 | analysis = yaml.safe_load(yaml_str)
43 |
44 | # Validate required fields
45 | assert "summary" in analysis
46 | assert "topics" in analysis
47 | assert "content_type" in analysis
48 | assert isinstance(analysis["topics"], list)
49 |
50 | return analysis
51 |
52 | except Exception as e:
53 | print(f"Error analyzing content: {str(e)}")
54 | return {
55 | "summary": "Error analyzing content",
56 | "topics": [],
57 | "content_type": "unknown"
58 | }
59 |
60 | def analyze_site(crawl_results: List[Dict]) -> List[Dict]:
61 | """Analyze all crawled pages
62 |
63 | Args:
64 | crawl_results (List[Dict]): List of crawled page contents
65 |
66 | Returns:
67 | List[Dict]: Original content with added analysis
68 | """
69 | analyzed_results = []
70 |
71 | for content in crawl_results:
72 | if content and content.get("text"):
73 | analysis = analyze_content(content)
74 | content["analysis"] = analysis
75 | analyzed_results.append(content)
76 |
77 | return analyzed_results
78 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-crawler/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-tool-crawler/utils/__init__.py
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-crawler/utils/call_llm.py:
--------------------------------------------------------------------------------
1 | from openai import OpenAI
2 | import os
3 |
4 | # Initialize OpenAI client
5 | client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
6 |
7 | def call_llm(prompt: str) -> str:
8 | """Call OpenAI API to analyze text
9 |
10 | Args:
11 | prompt (str): Input prompt for the model
12 |
13 | Returns:
14 | str: Model response
15 | """
16 | try:
17 | response = client.chat.completions.create(
18 | model="gpt-4",
19 | messages=[{"role": "user", "content": prompt}]
20 | )
21 | return response.choices[0].message.content
22 |
23 | except Exception as e:
24 | print(f"Error calling LLM API: {str(e)}")
25 | return ""
26 |
27 | if __name__ == "__main__":
28 | # Test LLM call
29 | response = call_llm("What is web crawling?")
30 | print("Response:", response)
31 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-database/example.db:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-tool-database/example.db
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-database/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import InitDatabaseNode, CreateTaskNode, ListTasksNode
3 |
4 | def create_database_flow():
5 | """Create a flow for database operations"""
6 |
7 | # Create nodes
8 | init_db = InitDatabaseNode()
9 | create_task = CreateTaskNode()
10 | list_tasks = ListTasksNode()
11 |
12 | # Connect nodes
13 | init_db >> create_task >> list_tasks
14 |
15 | # Create and return flow
16 | return Flow(start=init_db)
17 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-database/main.py:
--------------------------------------------------------------------------------
1 | from flow import create_database_flow
2 |
3 | def main():
4 | # Create the flow
5 | flow = create_database_flow()
6 |
7 | # Prepare example task data
8 | shared = {
9 | "task_title": "Example Task",
10 | "task_description": "This is an example task created using PocketFlow"
11 | }
12 |
13 | # Run the flow
14 | flow.run(shared)
15 |
16 | # Print results
17 | print("Database Status:", shared.get("db_status"))
18 | print("Task Status:", shared.get("task_status"))
19 | print("\nAll Tasks:")
20 | for task in shared.get("tasks", []):
21 | print(f"- ID: {task[0]}")
22 | print(f" Title: {task[1]}")
23 | print(f" Description: {task[2]}")
24 | print(f" Status: {task[3]}")
25 | print(f" Created: {task[4]}")
26 | print()
27 |
28 | if __name__ == "__main__":
29 | main()
30 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-database/nodes.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Node
2 | from tools.database import execute_sql, init_db
3 |
4 | class InitDatabaseNode(Node):
5 | """Node for initializing the database"""
6 |
7 | def exec(self, _):
8 | init_db()
9 | return "Database initialized"
10 |
11 | def post(self, shared, prep_res, exec_res):
12 | shared["db_status"] = exec_res
13 | return "default"
14 |
15 | class CreateTaskNode(Node):
16 | """Node for creating a new task"""
17 |
18 | def prep(self, shared):
19 | return (
20 | shared.get("task_title", ""),
21 | shared.get("task_description", "")
22 | )
23 |
24 | def exec(self, inputs):
25 | title, description = inputs
26 | query = "INSERT INTO tasks (title, description) VALUES (?, ?)"
27 | execute_sql(query, (title, description))
28 | return "Task created successfully"
29 |
30 | def post(self, shared, prep_res, exec_res):
31 | shared["task_status"] = exec_res
32 | return "default"
33 |
34 | class ListTasksNode(Node):
35 | """Node for listing all tasks"""
36 |
37 | def exec(self, _):
38 | query = "SELECT * FROM tasks"
39 | return execute_sql(query)
40 |
41 | def post(self, shared, prep_res, exec_res):
42 | shared["tasks"] = exec_res
43 | return "default"
44 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-database/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.1.0
2 | python-dotenv>=0.19.0
3 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-database/tools/database.py:
--------------------------------------------------------------------------------
1 | import sqlite3
2 | from typing import List, Tuple, Any
3 |
4 | def execute_sql(query: str, params: Tuple = None) -> List[Tuple[Any, ...]]:
5 | """Execute a SQL query and return results
6 |
7 | Args:
8 | query (str): SQL query to execute
9 | params (tuple, optional): Query parameters to prevent SQL injection
10 |
11 | Returns:
12 | list: Query results as a list of tuples
13 | """
14 | conn = sqlite3.connect("example.db")
15 | try:
16 | cursor = conn.cursor()
17 | if params:
18 | cursor.execute(query, params)
19 | else:
20 | cursor.execute(query)
21 | result = cursor.fetchall()
22 | conn.commit()
23 | return result
24 | finally:
25 | conn.close()
26 |
27 | def init_db():
28 | """Initialize database with example table"""
29 | create_table_sql = """
30 | CREATE TABLE IF NOT EXISTS tasks (
31 | id INTEGER PRIMARY KEY AUTOINCREMENT,
32 | title TEXT NOT NULL,
33 | description TEXT,
34 | status TEXT DEFAULT 'pending',
35 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
36 | )
37 | """
38 | execute_sql(create_table_sql)
39 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-database/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-tool-database/utils/__init__.py
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-embeddings/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import EmbeddingNode
3 |
4 | def create_embedding_flow():
5 | """Create a flow for text embedding"""
6 | # Create embedding node
7 | embedding = EmbeddingNode()
8 |
9 | # Create and return flow
10 | return Flow(start=embedding)
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-embeddings/main.py:
--------------------------------------------------------------------------------
1 | from flow import create_embedding_flow
2 |
3 | def main():
4 | # Create the flow
5 | flow = create_embedding_flow()
6 |
7 | # Example text
8 | text = "What's the meaning of life?"
9 |
10 | # Prepare shared data
11 | shared = {"text": text}
12 |
13 | # Run the flow
14 | flow.run(shared)
15 |
16 | # Print results
17 | print("Text:", text)
18 | print("Embedding dimension:", len(shared["embedding"]))
19 | print("First 5 values:", shared["embedding"][:5])
20 |
21 | if __name__ == "__main__":
22 | main()
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-embeddings/nodes.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Node
2 | from tools.embeddings import get_embedding
3 |
4 | class EmbeddingNode(Node):
5 | """Node for getting embeddings from OpenAI API"""
6 |
7 | def prep(self, shared):
8 | # Get text from shared store
9 | return shared.get("text", "")
10 |
11 | def exec(self, text):
12 | # Get embedding using tool function
13 | return get_embedding(text)
14 |
15 | def post(self, shared, prep_res, exec_res):
16 | # Store embedding in shared store
17 | shared["embedding"] = exec_res
18 | return "default"
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-embeddings/requirements.txt:
--------------------------------------------------------------------------------
1 | openai>=1.0.0
2 | numpy>=1.24.0
3 | faiss-cpu>=1.7.0
4 | python-dotenv>=1.0.0
5 | pocketflow>=0.1.0
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-embeddings/tools/embeddings.py:
--------------------------------------------------------------------------------
1 | from utils.call_llm import client
2 |
3 | def get_embedding(text):
4 | response = client.embeddings.create(
5 | model="text-embedding-ada-002",
6 | input=text
7 | )
8 | return response.data[0].embedding
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-embeddings/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-tool-embeddings/utils/__init__.py
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-embeddings/utils/call_llm.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import OpenAI
3 |
4 | # No need for dotenv if using system environment variables
5 | client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
6 |
7 | def call_llm(prompt):
8 | r = client.chat.completions.create(
9 | model="gpt-4o",
10 | messages=[{"role": "user", "content": prompt}]
11 | )
12 | return r.choices[0].message.content
13 |
14 | if __name__ == "__main__":
15 | prompt = "What is the meaning of life?"
16 | print(call_llm(prompt))
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-pdf-vision/README.md:
--------------------------------------------------------------------------------
1 | # PocketFlow Tool: PDF Vision
2 |
3 | A PocketFlow example project demonstrating PDF processing with OpenAI's Vision API for OCR and text extraction.
4 |
5 | ## Features
6 |
7 | - Convert PDF pages to images while maintaining quality and size limits
8 | - Extract text from scanned documents using GPT-4 Vision API
9 | - Support for custom extraction prompts
10 | - Maintain page order and formatting in extracted text
11 | - Batch processing of multiple PDFs from a directory
12 |
13 | ## Installation
14 |
15 | 1. Clone the repository
16 | 2. Install dependencies:
17 | ```bash
18 | pip install -r requirements.txt
19 | ```
20 | 3. Set your OpenAI API key as an environment variable:
21 | ```bash
22 | export OPENAI_API_KEY=your_api_key_here
23 | ```
24 |
25 | ## Usage
26 |
27 | 1. Place your PDF files in the `pdfs` directory
28 | 2. Run the example:
29 | ```bash
30 | python main.py
31 | ```
32 | The script will process all PDF files in the `pdfs` directory and output the extracted text for each one.
33 |
34 | ## Project Structure
35 |
36 | ```
37 | pocketflow-tool-pdf-vision/
38 | ├── pdfs/ # Directory for PDF files to process
39 | ├── tools/
40 | │ ├── pdf.py # PDF to image conversion
41 | │ └── vision.py # Vision API integration
42 | ├── utils/
43 | │ └── call_llm.py # OpenAI client config
44 | ├── nodes.py # PocketFlow nodes
45 | ├── flow.py # Flow configuration
46 | └── main.py # Example usage
47 | ```
48 |
49 | ## Flow Description
50 |
51 | 1. **LoadPDFNode**: Loads PDF and converts pages to images
52 | 2. **ExtractTextNode**: Processes images with Vision API
53 | 3. **CombineResultsNode**: Combines extracted text from all pages
54 |
55 | ## Customization
56 |
57 | You can customize the extraction by modifying the prompt in `shared`:
58 |
59 | ```python
60 | shared = {
61 | "pdf_path": "your_file.pdf",
62 | "extraction_prompt": "Your custom prompt here"
63 | }
64 | ```
65 |
66 | ## Limitations
67 |
68 | - Maximum PDF page size: 2000px (configurable in `tools/pdf.py`)
69 | - Vision API token limit: 1000 tokens per response
70 | - Image size limit: 20MB per image for Vision API
71 |
72 | ## License
73 |
74 | MIT
75 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-pdf-vision/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import ProcessPDFBatchNode
3 |
4 | def create_vision_flow():
5 | """Create a flow for batch PDF processing with Vision API"""
6 | return Flow(start=ProcessPDFBatchNode())
7 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-pdf-vision/main.py:
--------------------------------------------------------------------------------
1 | from flow import create_vision_flow
2 |
3 | def main():
4 | # Create and run flow
5 | flow = create_vision_flow()
6 | shared = {}
7 | flow.run(shared)
8 |
9 | # Print results
10 | if "results" in shared:
11 | for result in shared["results"]:
12 | print(f"\nFile: {result['filename']}")
13 | print("-" * 50)
14 | print(result["text"])
15 |
16 | if __name__ == "__main__":
17 | main()
18 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-pdf-vision/pdfs/pocket-flow.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-tool-pdf-vision/pdfs/pocket-flow.pdf
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-pdf-vision/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.1.0
2 | openai>=1.0.0
3 | PyMuPDF>=1.22.0 # for PDF processing
4 | Pillow>=10.0.0 # for image processing
5 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-pdf-vision/tools/pdf.py:
--------------------------------------------------------------------------------
1 | import fitz # PyMuPDF
2 | from PIL import Image
3 | import io
4 | import base64
5 | from typing import List, Tuple
6 |
7 | def pdf_to_images(pdf_path: str, max_size: int = 2000) -> List[Tuple[Image.Image, int]]:
8 | """Convert PDF pages to PIL Images with size limit
9 |
10 | Args:
11 | pdf_path (str): Path to PDF file
12 | max_size (int): Maximum dimension (width/height) for images
13 |
14 | Returns:
15 | list: List of tuples (PIL Image, page number)
16 | """
17 | doc = fitz.open(pdf_path)
18 | images = []
19 |
20 | try:
21 | for page_num in range(len(doc)):
22 | page = doc[page_num]
23 | pix = page.get_pixmap()
24 |
25 | # Convert to PIL Image
26 | img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
27 |
28 | # Resize if needed while maintaining aspect ratio
29 | if max(img.size) > max_size:
30 | ratio = max_size / max(img.size)
31 | new_size = tuple(int(dim * ratio) for dim in img.size)
32 | img = img.resize(new_size, Image.Resampling.LANCZOS)
33 |
34 | images.append((img, page_num + 1))
35 |
36 | finally:
37 | doc.close()
38 |
39 | return images
40 |
41 | def image_to_base64(image: Image.Image) -> str:
42 | """Convert PIL Image to base64 string
43 |
44 | Args:
45 | image (PIL.Image): Image to convert
46 |
47 | Returns:
48 | str: Base64 encoded image string
49 | """
50 | buffer = io.BytesIO()
51 | image.save(buffer, format="PNG")
52 | return base64.b64encode(buffer.getvalue()).decode('utf-8')
53 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-pdf-vision/tools/vision.py:
--------------------------------------------------------------------------------
1 | from PIL import Image
2 | from utils.call_llm import client
3 | from tools.pdf import image_to_base64
4 |
5 | def extract_text_from_image(image: Image.Image, prompt: str = None) -> str:
6 | """Extract text from image using OpenAI Vision API
7 |
8 | Args:
9 | image (PIL.Image): Image to process
10 | prompt (str, optional): Custom prompt for extraction. Defaults to general OCR.
11 |
12 | Returns:
13 | str: Extracted text from image
14 | """
15 | # Convert image to base64
16 | img_base64 = image_to_base64(image)
17 |
18 | # Default prompt for general OCR
19 | if prompt is None:
20 | prompt = "Please extract all text from this image."
21 |
22 | # Call Vision API
23 | response = client.chat.completions.create(
24 | model="gpt-4o",
25 | messages=[{
26 | "role": "user",
27 | "content": [
28 | {"type": "text", "text": prompt},
29 | {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_base64}"}}
30 | ]
31 | }]
32 | )
33 |
34 | return response.choices[0].message.content
35 |
36 | if __name__ == "__main__":
37 | # Test vision processing
38 | test_image = Image.open("example.png")
39 | result = extract_text_from_image(test_image)
40 | print("Extracted text:", result)
41 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-pdf-vision/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-tool-pdf-vision/utils/__init__.py
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-pdf-vision/utils/call_llm.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import OpenAI
3 | from pathlib import Path
4 |
5 | # Get the project root directory (parent of utils directory)
6 | ROOT_DIR = Path(__file__).parent.parent
7 |
8 | # Initialize OpenAI client with API key from environment
9 | client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
10 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-search/README.md:
--------------------------------------------------------------------------------
1 | # Web Search with Analysis
2 |
3 | A web search tool built with PocketFlow that performs searches using SerpAPI and analyzes results using LLM.
4 |
5 | ## Features
6 |
7 | - Web search using Google via SerpAPI
8 | - Extracts titles, snippets, and links
9 | - Analyzes search results using GPT-4 to provide:
10 | - Result summaries
11 | - Key points/facts
12 | - Suggested follow-up queries
13 | - Clean command-line interface
14 |
15 | ## Installation
16 |
17 | 1. Clone the repository
18 | 2. Install dependencies:
19 | ```bash
20 | pip install -r requirements.txt
21 | ```
22 | 3. Set required API keys:
23 | ```bash
24 | export SERPAPI_API_KEY='your-serpapi-key'
25 | export OPENAI_API_KEY='your-openai-key'
26 | ```
27 |
28 | ## Usage
29 |
30 | Run the search tool:
31 | ```bash
32 | python main.py
33 | ```
34 |
35 | You will be prompted to:
36 | 1. Enter your search query
37 | 2. Specify number of results to fetch (default: 5)
38 |
39 | The tool will then:
40 | 1. Perform the search using SerpAPI
41 | 2. Analyze results using GPT-4
42 | 3. Present a summary with key points and follow-up queries
43 |
44 | ## Project Structure
45 |
46 | ```
47 | pocketflow-tool-search/
48 | ├── tools/
49 | │ ├── search.py # SerpAPI search functionality
50 | │ └── parser.py # Result analysis using LLM
51 | ├── utils/
52 | │ └── call_llm.py # LLM API wrapper
53 | ├── nodes.py # PocketFlow nodes
54 | ├── flow.py # Flow configuration
55 | ├── main.py # Main script
56 | └── requirements.txt # Dependencies
57 | ```
58 |
59 | ## Limitations
60 |
61 | - Requires SerpAPI subscription
62 | - Rate limited by both APIs
63 | - Basic error handling
64 | - Text results only
65 |
66 | ## Dependencies
67 |
68 | - pocketflow: Flow-based processing
69 | - google-search-results: SerpAPI client
70 | - openai: GPT-4 API access
71 | - pyyaml: YAML processing
72 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-search/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import SearchNode, AnalyzeResultsNode
3 |
4 | def create_flow() -> Flow:
5 | """Create and configure the search flow
6 |
7 | Returns:
8 | Flow: Configured flow ready to run
9 | """
10 | # Create nodes
11 | search = SearchNode()
12 | analyze = AnalyzeResultsNode()
13 |
14 | # Connect nodes
15 | search >> analyze
16 |
17 | # Create flow starting with search
18 | return Flow(start=search)
19 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-search/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | from flow import create_flow
3 |
4 | def main():
5 | """Run the web search flow"""
6 |
7 | # Get search query from user
8 | query = input("Enter search query: ")
9 | if not query:
10 | print("Error: Query is required")
11 | return
12 |
13 | # Initialize shared data
14 | shared = {
15 | "query": query,
16 | "num_results": 5
17 | }
18 |
19 | # Create and run flow
20 | flow = create_flow()
21 | flow.run(shared)
22 |
23 | # Results are in shared["analysis"]
24 |
25 | if __name__ == "__main__":
26 | main()
27 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-search/nodes.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Node
2 | from tools.search import SearchTool
3 | from tools.parser import analyze_results
4 | from typing import List, Dict
5 |
6 | class SearchNode(Node):
7 | """Node to perform web search using SerpAPI"""
8 |
9 | def prep(self, shared):
10 | return shared.get("query"), shared.get("num_results", 5)
11 |
12 | def exec(self, inputs):
13 | query, num_results = inputs
14 | if not query:
15 | return []
16 |
17 | searcher = SearchTool()
18 | return searcher.search(query, num_results)
19 |
20 | def post(self, shared, prep_res, exec_res):
21 | shared["search_results"] = exec_res
22 | return "default"
23 |
24 | class AnalyzeResultsNode(Node):
25 | """Node to analyze search results using LLM"""
26 |
27 | def prep(self, shared):
28 | return shared.get("query"), shared.get("search_results", [])
29 |
30 | def exec(self, inputs):
31 | query, results = inputs
32 | if not results:
33 | return {
34 | "summary": "No search results to analyze",
35 | "key_points": [],
36 | "follow_up_queries": []
37 | }
38 |
39 | return analyze_results(query, results)
40 |
41 | def post(self, shared, prep_res, exec_res):
42 | shared["analysis"] = exec_res
43 |
44 | # Print analysis
45 | print("\nSearch Analysis:")
46 | print("\nSummary:", exec_res["summary"])
47 |
48 | print("\nKey Points:")
49 | for point in exec_res["key_points"]:
50 | print(f"- {point}")
51 |
52 | print("\nSuggested Follow-up Queries:")
53 | for query in exec_res["follow_up_queries"]:
54 | print(f"- {query}")
55 |
56 | return "default"
57 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-search/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.1.0
2 | google-search-results>=2.4.2 # SerpAPI client
3 | openai>=1.0.0 # for search result analysis
4 | pyyaml>=6.0.1 # for structured output
5 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-search/tools/parser.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List
2 | from utils.call_llm import call_llm
3 |
4 | def analyze_results(query: str, results: List[Dict]) -> Dict:
5 | """Analyze search results using LLM
6 |
7 | Args:
8 | query (str): Original search query
9 | results (List[Dict]): Search results to analyze
10 |
11 | Returns:
12 | Dict: Analysis including summary and key points
13 | """
14 | # Format results for prompt
15 | formatted_results = []
16 | for i, result in enumerate(results, 1):
17 | formatted_results.append(f"""
18 | Result {i}:
19 | Title: {result['title']}
20 | Snippet: {result['snippet']}
21 | URL: {result['link']}
22 | """)
23 |
24 | prompt = f"""
25 | Analyze these search results for the query: "{query}"
26 |
27 | {'\n'.join(formatted_results)}
28 |
29 | Please provide:
30 | 1. A concise summary of the findings (2-3 sentences)
31 | 2. Key points or facts (up to 5 bullet points)
32 | 3. Suggested follow-up queries (2-3)
33 |
34 | Output in YAML format:
35 | ```yaml
36 | summary: >
37 | brief summary here
38 | key_points:
39 | - point 1
40 | - point 2
41 | follow_up_queries:
42 | - query 1
43 | - query 2
44 | ```
45 | """
46 |
47 | try:
48 | response = call_llm(prompt)
49 | # Extract YAML between code fences
50 | yaml_str = response.split("```yaml")[1].split("```")[0].strip()
51 |
52 | import yaml
53 | analysis = yaml.safe_load(yaml_str)
54 |
55 | # Validate required fields
56 | assert "summary" in analysis
57 | assert "key_points" in analysis
58 | assert "follow_up_queries" in analysis
59 | assert isinstance(analysis["key_points"], list)
60 | assert isinstance(analysis["follow_up_queries"], list)
61 |
62 | return analysis
63 |
64 | except Exception as e:
65 | print(f"Error analyzing results: {str(e)}")
66 | return {
67 | "summary": "Error analyzing results",
68 | "key_points": [],
69 | "follow_up_queries": []
70 | }
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-search/tools/search.py:
--------------------------------------------------------------------------------
1 | import os
2 | from serpapi import GoogleSearch
3 | from typing import Dict, List, Optional
4 |
5 | class SearchTool:
6 | """Tool for performing web searches using SerpAPI"""
7 |
8 | def __init__(self, api_key: Optional[str] = None):
9 | """Initialize search tool with API key
10 |
11 | Args:
12 | api_key (str, optional): SerpAPI key. Defaults to env var SERPAPI_API_KEY.
13 | """
14 | self.api_key = api_key or os.getenv("SERPAPI_API_KEY")
15 | if not self.api_key:
16 | raise ValueError("SerpAPI key not found. Set SERPAPI_API_KEY env var.")
17 |
18 | def search(self, query: str, num_results: int = 5) -> List[Dict]:
19 | """Perform Google search via SerpAPI
20 |
21 | Args:
22 | query (str): Search query
23 | num_results (int, optional): Number of results to return. Defaults to 5.
24 |
25 | Returns:
26 | List[Dict]: Search results with title, snippet, and link
27 | """
28 | # Configure search parameters
29 | params = {
30 | "engine": "google",
31 | "q": query,
32 | "api_key": self.api_key,
33 | "num": num_results
34 | }
35 |
36 | try:
37 | # Execute search
38 | search = GoogleSearch(params)
39 | results = search.get_dict()
40 |
41 | # Extract organic results
42 | if "organic_results" not in results:
43 | return []
44 |
45 | processed_results = []
46 | for result in results["organic_results"][:num_results]:
47 | processed_results.append({
48 | "title": result.get("title", ""),
49 | "snippet": result.get("snippet", ""),
50 | "link": result.get("link", "")
51 | })
52 |
53 | return processed_results
54 |
55 | except Exception as e:
56 | print(f"Search error: {str(e)}")
57 | return []
58 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-search/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-tool-search/utils/__init__.py
--------------------------------------------------------------------------------
/cookbook/pocketflow-tool-search/utils/call_llm.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import OpenAI
3 | from pathlib import Path
4 |
5 | # Get the project root directory (parent of utils directory)
6 | ROOT_DIR = Path(__file__).parent.parent
7 |
8 | # Initialize OpenAI client with API key from environment
9 | client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
10 |
11 | def call_llm(prompt: str) -> str:
12 | """Call OpenAI API to analyze text
13 |
14 | Args:
15 | prompt (str): Input prompt for the model
16 |
17 | Returns:
18 | str: Model response
19 | """
20 | try:
21 | response = client.chat.completions.create(
22 | model="gpt-4o",
23 | messages=[{"role": "user", "content": prompt}]
24 | )
25 | return response.choices[0].message.content
26 |
27 | except Exception as e:
28 | print(f"Error calling LLM API: {str(e)}")
29 | return ""
30 |
31 | if __name__ == "__main__":
32 | # Test LLM call
33 | response = call_llm("What is web search?")
34 | print("Response:", response)
35 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-visualization/viz/flow_visualization.json:
--------------------------------------------------------------------------------
1 | {
2 | "nodes": [
3 | {
4 | "id": 3,
5 | "name": "ValidatePayment",
6 | "group": 2
7 | },
8 | {
9 | "id": 4,
10 | "name": "ProcessPayment",
11 | "group": 2
12 | },
13 | {
14 | "id": 5,
15 | "name": "PaymentConfirmation",
16 | "group": 2
17 | },
18 | {
19 | "id": 7,
20 | "name": "CheckStock",
21 | "group": 6
22 | },
23 | {
24 | "id": 8,
25 | "name": "ReserveItems",
26 | "group": 6
27 | },
28 | {
29 | "id": 9,
30 | "name": "UpdateInventory",
31 | "group": 6
32 | },
33 | {
34 | "id": 11,
35 | "name": "CreateLabel",
36 | "group": 10
37 | },
38 | {
39 | "id": 12,
40 | "name": "AssignCarrier",
41 | "group": 10
42 | },
43 | {
44 | "id": 13,
45 | "name": "SchedulePickup",
46 | "group": 10
47 | }
48 | ],
49 | "links": [
50 | {
51 | "source": 3,
52 | "target": 4,
53 | "action": "default"
54 | },
55 | {
56 | "source": 4,
57 | "target": 5,
58 | "action": "default"
59 | },
60 | {
61 | "source": 7,
62 | "target": 8,
63 | "action": "default"
64 | },
65 | {
66 | "source": 8,
67 | "target": 9,
68 | "action": "default"
69 | },
70 | {
71 | "source": 11,
72 | "target": 12,
73 | "action": "default"
74 | },
75 | {
76 | "source": 12,
77 | "target": 13,
78 | "action": "default"
79 | }
80 | ],
81 | "group_links": [
82 | {
83 | "source": 2,
84 | "target": 6,
85 | "action": "default"
86 | },
87 | {
88 | "source": 6,
89 | "target": 10,
90 | "action": "default"
91 | }
92 | ],
93 | "flows": {
94 | "1": "OrderFlow",
95 | "2": "AsyncFlow",
96 | "6": "AsyncFlow",
97 | "10": "AsyncFlow"
98 | }
99 | }
--------------------------------------------------------------------------------
/cookbook/pocketflow-voice-chat/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import CaptureAudioNode, SpeechToTextNode, QueryLLMNode, TextToSpeechNode
3 |
4 | def create_voice_chat_flow() -> Flow:
5 | """Creates and returns the voice chat flow."""
6 | # Create nodes
7 | capture_audio = CaptureAudioNode()
8 | speech_to_text = SpeechToTextNode()
9 | query_llm = QueryLLMNode()
10 | text_to_speech = TextToSpeechNode()
11 |
12 | # Define transitions
13 | capture_audio >> speech_to_text
14 | speech_to_text >> query_llm
15 | query_llm >> text_to_speech
16 |
17 | # Loop back for next turn or end
18 | text_to_speech - "next_turn" >> capture_audio
19 | # "end_conversation" action from any node will terminate the flow naturally
20 | # if no transition is defined for it from the current node.
21 | # Alternatively, one could explicitly transition to an EndNode if desired.
22 |
23 | # Create flow starting with the capture audio node
24 | voice_chat_flow = Flow(start=capture_audio)
25 | return voice_chat_flow
--------------------------------------------------------------------------------
/cookbook/pocketflow-voice-chat/main.py:
--------------------------------------------------------------------------------
1 | from flow import create_voice_chat_flow
2 |
3 | def main():
4 | """Runs the PocketFlow Voice Chat application."""
5 | print("Starting PocketFlow Voice Chat...")
6 | print("Speak your query after 'Listening for your query...' appears.")
7 | print("The conversation will continue until an error occurs or the loop is intentionally stopped.")
8 | print("To attempt to stop, you might need to cause an error (e.g., silence during capture if not handled by VAD to end gracefully) or modify shared[\"continue_conversation\"] if a mechanism is added.")
9 |
10 | shared = {
11 | "user_audio_data": None,
12 | "user_audio_sample_rate": None,
13 | "chat_history": [],
14 | "continue_conversation": True # Flag to control the main conversation loop
15 | }
16 |
17 | # Create the flow
18 | voice_chat_flow = create_voice_chat_flow()
19 |
20 | # Run the flow
21 | # The flow will loop based on the "next_turn" action from TextToSpeechNode
22 | # and the continue_conversation flag checked within nodes or if an error action is returned.
23 | voice_chat_flow.run(shared)
24 |
25 | if __name__ == "__main__":
26 | main()
27 |
--------------------------------------------------------------------------------
/cookbook/pocketflow-voice-chat/requirements.txt:
--------------------------------------------------------------------------------
1 | openai
2 | pocketflow
3 | numpy
4 | sounddevice
5 | scipy
6 | soundfile
--------------------------------------------------------------------------------
/cookbook/pocketflow-voice-chat/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-voice-chat/utils/__init__.py
--------------------------------------------------------------------------------
/cookbook/pocketflow-voice-chat/utils/call_llm.py:
--------------------------------------------------------------------------------
1 | from openai import OpenAI
2 | import os
3 |
4 | def call_llm(messages):
5 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
6 |
7 | response = client.chat.completions.create(
8 | model="gpt-4o",
9 | messages=messages,
10 | temperature=0.7
11 | )
12 |
13 | return response.choices[0].message.content
14 |
15 | if __name__ == "__main__":
16 | # Test the LLM call
17 | messages = [{"role": "user", "content": "In a few words, what's the meaning of life?"}]
18 | response = call_llm(messages)
19 | print(f"Prompt: {messages[0]['content']}")
20 | print(f"Response: {response}")
--------------------------------------------------------------------------------
/cookbook/pocketflow-voice-chat/utils/speech_to_text.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import OpenAI
3 | import io
4 |
5 | def speech_to_text_api(audio_data: bytes, sample_rate: int):
6 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
7 |
8 | # The API expects a file-like object. We can use io.BytesIO for in-memory bytes.
9 | # We also need to give it a name, as if it were a file upload.
10 | audio_file = io.BytesIO(audio_data)
11 | audio_file.name = "audio.wav" # Corrected to WAV format
12 |
13 | transcript = client.audio.transcriptions.create(
14 | model="gpt-4o-transcribe",
15 | file=audio_file
16 | # language="en" # Optional: specify language ISO-639-1 code
17 | # prompt="PocketFlow, LLM" # Optional: provide a prompt to guide the model
18 | )
19 | return transcript.text
20 |
21 | if __name__ == "__main__":
22 | print("Testing Speech-to-Text API...")
23 | # The OpenAI client will raise an error if API key is not found or invalid.
24 | # No explicit check here to keep it minimal.
25 | test_audio_path = "tts_output.mp3"
26 | if os.path.exists(test_audio_path):
27 | print(f"Found {test_audio_path}, using it for STT test.")
28 | with open(test_audio_path, "rb") as f:
29 | audio_bytes_for_stt = f.read()
30 |
31 | # Sample rate for tts_output.mp3 from our TTS script is 24000
32 | # but Whisper should ideally infer or handle common formats well.
33 | stt_sample_rate = 24000
34 |
35 | transcribed_text = speech_to_text_api(audio_bytes_for_stt, stt_sample_rate)
36 |
37 | if transcribed_text:
38 | print(f"Transcribed text: {transcribed_text}")
39 | else:
40 | print("Failed to transcribe audio (API returned empty data).")
41 | else:
42 | print(f"Test audio file '{test_audio_path}' not found.")
43 | print("Please run the text_to_speech.py test first to generate it, or place your own audio file")
44 | print(" (e.g., named 'test_audio.mp3') in the same directory as this script and modify the path.")
45 | print("Make sure it's a common audio format like MP3, WAV, M4A etc.")
--------------------------------------------------------------------------------
/cookbook/pocketflow-voice-chat/utils/text_to_speech.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import OpenAI
3 |
4 | def text_to_speech_api(text_to_synthesize: str):
5 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
6 |
7 | response = client.audio.speech.create(
8 | model="gpt-4o-mini-tts",
9 | voice="alloy", # Other voices: echo, fable, onyx, nova, shimmer
10 | input=text_to_synthesize,
11 | response_format="mp3" # Other formats: opus, aac, flac. MP3 is widely supported.
12 | # OpenAI default sample rate for tts-1 is 24kHz.
13 | )
14 | # The response.content is already bytes (the audio data)
15 | # Alternatively, for streaming and saving to file: response.stream_to_file("output.mp3")
16 | audio_data_bytes = response.content
17 | sample_rate = 24000 # OpenAI TTS model tts-1 outputs 24kHz
18 | return audio_data_bytes, sample_rate
19 |
20 | if __name__ == "__main__":
21 | print("Testing Text-to-Speech API...")
22 | # The OpenAI client will raise an error if API key is not found or invalid.
23 | # No explicit check here to keep it minimal.
24 | text = "Hello from PocketFlow! This is a test of the text-to-speech functionality."
25 | audio_bytes, rate = text_to_speech_api(text)
26 | if audio_bytes and rate:
27 | print(f"Successfully converted text to speech. Audio data length: {len(audio_bytes)} bytes, Sample rate: {rate} Hz.")
28 | with open('tts_output.mp3', 'wb') as f:
29 | f.write(audio_bytes)
30 | print("Saved TTS output to tts_output.mp3")
31 | else:
32 | print("Failed to convert text to speech (API returned empty data).")
--------------------------------------------------------------------------------
/cookbook/pocketflow-voice-chat/utils/tts_output.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/The-Pocket/PocketFlow/02ff1e7e9b6c6aa08e860c86bcb302cc09deeb75/cookbook/pocketflow-voice-chat/utils/tts_output.mp3
--------------------------------------------------------------------------------
/cookbook/pocketflow-workflow/flow.py:
--------------------------------------------------------------------------------
1 | from pocketflow import Flow
2 | from nodes import GenerateOutline, WriteSimpleContent, ApplyStyle
3 |
4 | def create_article_flow():
5 | """
6 | Create and configure the article writing workflow
7 | """
8 | # Create node instances
9 | outline_node = GenerateOutline()
10 | write_node = WriteSimpleContent()
11 | style_node = ApplyStyle()
12 |
13 | # Connect nodes in sequence
14 | outline_node >> write_node >> style_node
15 |
16 | # Create flow starting with outline node
17 | article_flow = Flow(start=outline_node)
18 |
19 | return article_flow
--------------------------------------------------------------------------------
/cookbook/pocketflow-workflow/main.py:
--------------------------------------------------------------------------------
1 | from flow import create_article_flow
2 |
3 | def run_flow(topic="AI Safety"):
4 | """
5 | Run the article writing workflow with a specific topic
6 |
7 | Args:
8 | topic (str): The topic for the article
9 | """
10 | # Initialize shared data with the topic
11 | shared = {"topic": topic}
12 |
13 | # Print starting message
14 | print(f"\n=== Starting Article Workflow on Topic: {topic} ===\n")
15 |
16 | # Run the flow
17 | flow = create_article_flow()
18 | flow.run(shared)
19 |
20 | # Output summary
21 | print("\n=== Workflow Completed ===\n")
22 | print(f"Topic: {shared['topic']}")
23 | print(f"Outline Length: {len(shared['outline'])} characters")
24 | print(f"Draft Length: {len(shared['draft'])} characters")
25 | print(f"Final Article Length: {len(shared['final_article'])} characters")
26 |
27 | return shared
28 |
29 | if __name__ == "__main__":
30 | import sys
31 |
32 | # Get topic from command line if provided
33 | topic = "AI Safety" # Default topic
34 | if len(sys.argv) > 1:
35 | topic = " ".join(sys.argv[1:])
36 |
37 | run_flow(topic)
--------------------------------------------------------------------------------
/cookbook/pocketflow-workflow/requirements.txt:
--------------------------------------------------------------------------------
1 | pocketflow>=0.0.1
2 | openai>=1.0.0
3 | pyyaml>=6.0
--------------------------------------------------------------------------------
/cookbook/pocketflow-workflow/utils/call_llm.py:
--------------------------------------------------------------------------------
1 | import os
2 | from openai import OpenAI
3 |
4 | def call_llm(prompt):
5 | client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
6 | r = client.chat.completions.create(
7 | model="gpt-4o",
8 | messages=[{"role": "user", "content": prompt}]
9 | )
10 | return r.choices[0].message.content
11 |
12 | # Example usage
13 | if __name__ == "__main__":
14 | print(call_llm("Tell me a short joke"))
--------------------------------------------------------------------------------
/docs/_config.yml:
--------------------------------------------------------------------------------
1 | # Basic site settings
2 | title: Pocket Flow
3 | tagline: A 100-line LLM framework
4 | description: Pocket Flow – Minimalist LLM Framework in 100 Lines, Enabling LLMs to Program Themselves
5 |
6 | # Theme settings
7 | remote_theme: just-the-docs/just-the-docs
8 | search_enabled: true
9 |
10 | # SEO & sitemap
11 | plugins:
12 | - jekyll-seo-tag
13 | - jekyll-sitemap
14 |
15 | jekyll-seo-tag:
16 | social:
17 | name: "Pocket Flow"
18 | twitter: "ZacharyHuang12"
19 | github: "the-pocket/PocketFlow"
20 |
21 | # Navigation
22 | nav_sort: case_sensitive
23 |
24 | # Aux links (shown in upper right)
25 | aux_links:
26 | "View on GitHub":
27 | - "//github.com/the-pocket/PocketFlow"
28 |
29 | # Color scheme
30 | color_scheme: light
31 |
32 | # Author settings
33 | author:
34 | name: Zachary Huang
35 | url: https://www.columbia.edu/~zh2408/
36 | twitter: ZacharyHuang12
37 |
38 | # Mermaid settings
39 | mermaid:
40 | version: "9.1.3"
41 | config: |
42 | directionLR
43 |
44 | # Callouts settings
45 | callouts:
46 | warning:
47 | title: Warning
48 | color: red
49 | note:
50 | title: Note
51 | color: blue
52 | best-practice:
53 | title: Best Practice
54 | color: green
55 |
56 | # Custom navigation
57 | nav:
58 | - Home: index.md
59 | - GitHub: "https://github.com/the-pocket/PocketFlow"
60 | - Discord: "https://discord.gg/hUHHE9Sa6T"
--------------------------------------------------------------------------------
/docs/core_abstraction/async.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: default
3 | title: "(Advanced) Async"
4 | parent: "Core Abstraction"
5 | nav_order: 5
6 | ---
7 |
8 | # (Advanced) Async
9 |
10 | **Async** Nodes implement `prep_async()`, `exec_async()`, `exec_fallback_async()`, and/or `post_async()`. This is useful for:
11 |
12 | 1. **prep_async()**: For *fetching/reading data (files, APIs, DB)* in an I/O-friendly way.
13 | 2. **exec_async()**: Typically used for async LLM calls.
14 | 3. **post_async()**: For *awaiting user feedback*, *coordinating across multi-agents* or any additional async steps after `exec_async()`.
15 |
16 | **Note**: `AsyncNode` must be wrapped in `AsyncFlow`. `AsyncFlow` can also include regular (sync) nodes.
17 |
18 | ### Example
19 |
20 | ```python
21 | class SummarizeThenVerify(AsyncNode):
22 | async def prep_async(self, shared):
23 | # Example: read a file asynchronously
24 | doc_text = await read_file_async(shared["doc_path"])
25 | return doc_text
26 |
27 | async def exec_async(self, prep_res):
28 | # Example: async LLM call
29 | summary = await call_llm_async(f"Summarize: {prep_res}")
30 | return summary
31 |
32 | async def post_async(self, shared, prep_res, exec_res):
33 | # Example: wait for user feedback
34 | decision = await gather_user_feedback(exec_res)
35 | if decision == "approve":
36 | shared["summary"] = exec_res
37 | return "approve"
38 | return "deny"
39 |
40 | summarize_node = SummarizeThenVerify()
41 | final_node = Finalize()
42 |
43 | # Define transitions
44 | summarize_node - "approve" >> final_node
45 | summarize_node - "deny" >> summarize_node # retry
46 |
47 | flow = AsyncFlow(start=summarize_node)
48 |
49 | async def main():
50 | shared = {"doc_path": "document.txt"}
51 | await flow.run_async(shared)
52 | print("Final Summary:", shared.get("summary"))
53 |
54 | asyncio.run(main())
55 | ```
--------------------------------------------------------------------------------
/docs/core_abstraction/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: default
3 | title: "Core Abstraction"
4 | nav_order: 2
5 | has_children: true
6 | ---
--------------------------------------------------------------------------------
/docs/core_abstraction/parallel.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: default
3 | title: "(Advanced) Parallel"
4 | parent: "Core Abstraction"
5 | nav_order: 6
6 | ---
7 |
8 | # (Advanced) Parallel
9 |
10 | **Parallel** Nodes and Flows let you run multiple **Async** Nodes and Flows **concurrently**—for example, summarizing multiple texts at once. This can improve performance by overlapping I/O and compute.
11 |
12 | > Because of Python’s GIL, parallel nodes and flows can’t truly parallelize CPU-bound tasks (e.g., heavy numerical computations). However, they excel at overlapping I/O-bound work—like LLM calls, database queries, API requests, or file I/O.
13 | {: .warning }
14 |
15 | > - **Ensure Tasks Are Independent**: If each item depends on the output of a previous item, **do not** parallelize.
16 | >
17 | > - **Beware of Rate Limits**: Parallel calls can **quickly** trigger rate limits on LLM services. You may need a **throttling** mechanism (e.g., semaphores or sleep intervals).
18 | >
19 | > - **Consider Single-Node Batch APIs**: Some LLMs offer a **batch inference** API where you can send multiple prompts in a single call. This is more complex to implement but can be more efficient than launching many parallel requests and mitigates rate limits.
20 | {: .best-practice }
21 |
22 | ## AsyncParallelBatchNode
23 |
24 | Like **AsyncBatchNode**, but run `exec_async()` in **parallel**:
25 |
26 | ```python
27 | class ParallelSummaries(AsyncParallelBatchNode):
28 | async def prep_async(self, shared):
29 | # e.g., multiple texts
30 | return shared["texts"]
31 |
32 | async def exec_async(self, text):
33 | prompt = f"Summarize: {text}"
34 | return await call_llm_async(prompt)
35 |
36 | async def post_async(self, shared, prep_res, exec_res_list):
37 | shared["summary"] = "\n\n".join(exec_res_list)
38 | return "default"
39 |
40 | node = ParallelSummaries()
41 | flow = AsyncFlow(start=node)
42 | ```
43 |
44 | ## AsyncParallelBatchFlow
45 |
46 | Parallel version of **BatchFlow**. Each iteration of the sub-flow runs **concurrently** using different parameters:
47 |
48 | ```python
49 | class SummarizeMultipleFiles(AsyncParallelBatchFlow):
50 | async def prep_async(self, shared):
51 | return [{"filename": f} for f in shared["files"]]
52 |
53 | sub_flow = AsyncFlow(start=LoadAndSummarizeFile())
54 | parallel_flow = SummarizeMultipleFiles(start=sub_flow)
55 | await parallel_flow.run_async(shared)
56 | ```
--------------------------------------------------------------------------------
/docs/design_pattern/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: default
3 | title: "Design Pattern"
4 | nav_order: 3
5 | has_children: true
6 | ---
--------------------------------------------------------------------------------
/docs/design_pattern/workflow.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: default
3 | title: "Workflow"
4 | parent: "Design Pattern"
5 | nav_order: 2
6 | ---
7 |
8 | # Workflow
9 |
10 | Many real-world tasks are too complex for one LLM call. The solution is to **Task Decomposition**: decompose them into a [chain](../core_abstraction/flow.md) of multiple Nodes.
11 |
12 |
13 |

14 |
15 |
16 | > - You don't want to make each task **too coarse**, because it may be *too complex for one LLM call*.
17 | > - You don't want to make each task **too granular**, because then *the LLM call doesn't have enough context* and results are *not consistent across nodes*.
18 | >
19 | > You usually need multiple *iterations* to find the *sweet spot*. If the task has too many *edge cases*, consider using [Agents](./agent.md).
20 | {: .best-practice }
21 |
22 | ### Example: Article Writing
23 |
24 | ```python
25 | class GenerateOutline(Node):
26 | def prep(self, shared): return shared["topic"]
27 | def exec(self, topic): return call_llm(f"Create a detailed outline for an article about {topic}")
28 | def post(self, shared, prep_res, exec_res): shared["outline"] = exec_res
29 |
30 | class WriteSection(Node):
31 | def prep(self, shared): return shared["outline"]
32 | def exec(self, outline): return call_llm(f"Write content based on this outline: {outline}")
33 | def post(self, shared, prep_res, exec_res): shared["draft"] = exec_res
34 |
35 | class ReviewAndRefine(Node):
36 | def prep(self, shared): return shared["draft"]
37 | def exec(self, draft): return call_llm(f"Review and improve this draft: {draft}")
38 | def post(self, shared, prep_res, exec_res): shared["final_article"] = exec_res
39 |
40 | # Connect nodes
41 | outline = GenerateOutline()
42 | write = WriteSection()
43 | review = ReviewAndRefine()
44 |
45 | outline >> write >> review
46 |
47 | # Create and run flow
48 | writing_flow = Flow(start=outline)
49 | shared = {"topic": "AI Safety"}
50 | writing_flow.run(shared)
51 | ```
52 |
53 | For *dynamic cases*, consider using [Agents](./agent.md).
--------------------------------------------------------------------------------
/docs/utility_function/chunking.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: default
3 | title: "Text Chunking"
4 | parent: "Utility Function"
5 | nav_order: 4
6 | ---
7 |
8 | # Text Chunking
9 |
10 | We recommend some implementations of commonly used text chunking approaches.
11 |
12 |
13 | > Text Chunking is more a micro optimization, compared to the Flow Design.
14 | >
15 | > It's recommended to start with the Naive Chunking and optimize later.
16 | {: .best-practice }
17 |
18 | ---
19 |
20 | ## Example Python Code Samples
21 |
22 | ### 1. Naive (Fixed-Size) Chunking
23 | Splits text by a fixed number of words, ignoring sentence or semantic boundaries.
24 |
25 | ```python
26 | def fixed_size_chunk(text, chunk_size=100):
27 | chunks = []
28 | for i in range(0, len(text), chunk_size):
29 | chunks.append(text[i : i + chunk_size])
30 | return chunks
31 | ```
32 |
33 | However, sentences are often cut awkwardly, losing coherence.
34 |
35 | ### 2. Sentence-Based Chunking
36 |
37 | ```python
38 | import nltk
39 |
40 | def sentence_based_chunk(text, max_sentences=2):
41 | sentences = nltk.sent_tokenize(text)
42 | chunks = []
43 | for i in range(0, len(sentences), max_sentences):
44 | chunks.append(" ".join(sentences[i : i + max_sentences]))
45 | return chunks
46 | ```
47 |
48 | However, might not handle very long sentences or paragraphs well.
49 |
50 | ### 3. Other Chunking
51 |
52 | - **Paragraph-Based**: Split text by paragraphs (e.g., newlines). Large paragraphs can create big chunks.
53 | - **Semantic**: Use embeddings or topic modeling to chunk by semantic boundaries.
54 | - **Agentic**: Use an LLM to decide chunk boundaries based on context or meaning.
--------------------------------------------------------------------------------
/docs/utility_function/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: default
3 | title: "Utility Function"
4 | nav_order: 4
5 | has_children: true
6 | ---
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | setup(
4 | name="pocketflow",
5 | version="0.0.2",
6 | packages=find_packages(),
7 | author="Zachary Huang",
8 | author_email="zh2408@columbia.edu",
9 | description="Pocket Flow: 100-line LLM framework. Let Agents build Agents!",
10 | url="https://github.com/The-Pocket/PocketFlow",
11 | )
--------------------------------------------------------------------------------