├── .gitignore
├── example.txt
├── .env.example
├── Gemfile
├── examples
├── parallelization.rb
├── evaluator_optimizer.rb
├── prompt_chaining.rb
├── orchestrator_workers.rb
└── routing.rb
├── main.rb
├── Gemfile.lock
├── lib
├── orchestrator.rb
├── optimizer.rb
└── llm_utils.rb
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | .env
--------------------------------------------------------------------------------
/example.txt:
--------------------------------------------------------------------------------
1 | Some file content
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
1 | OPENROUTER_API_KEY=YOUR_OPENROUTER_API_KEY
2 |
--------------------------------------------------------------------------------
/Gemfile:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | source "https://rubygems.org"
4 |
5 | gem "ruby_llm"
6 | gem "dotenv"
7 | gem "parallel"
--------------------------------------------------------------------------------
/examples/parallelization.rb:
--------------------------------------------------------------------------------
1 | # Example 2: Parallelization workflow for stakeholder impact analysis
2 | # Process impact analysis for multiple stakeholder groups concurrently
3 |
4 | stakeholders = [
5 | "Customers:\n- Price sensitive\n- Want better tech\n- Environmental concerns",
6 | "Employees:\n- Job security worries\n- Need new skills\n- Want clear direction",
7 | "Investors:\n- Expect growth\n- Want cost control\n- Risk concerns",
8 | "Suppliers:\n- Capacity constraints\n- Price pressures\n- Tech transitions"
9 | ]
10 |
11 | impact_prompt = "Analyze how market changes will impact this stakeholder group.\n" \
12 | "Provide specific impacts and recommended actions.\n" \
13 | "Format with clear sections and priorities."
14 |
15 | impact_results = parallel(prompt: impact_prompt, inputs: stakeholders)
16 |
17 | impact_results.each do |result|
18 | puts result
19 | puts "+" * 80
20 | end
--------------------------------------------------------------------------------
/main.rb:
--------------------------------------------------------------------------------
1 | require 'dotenv/load'
2 | require_relative 'lib/llm_utils'
3 | include LLMUtils
4 |
5 | puts "=== RubyLLM AI Agents Cookbook ==="
6 | puts "=========================="
7 |
8 | # Only uncomment the example you want to run
9 | #
10 | # Example 1: Prompt Chaining workflow for data processing
11 | require_relative 'examples/prompt_chaining'
12 | #
13 | # Example 2: Parallelization workflow for stakeholder impact analysis
14 | # require_relative 'examples/parallelization'
15 | #
16 | # Example 3: Routing workflow for support ticket routing
17 | # require_relative 'examples/routing'
18 | #
19 | # Example 4: Orchestrator pattern with worker specialization
20 | # require_relative 'examples/orchestrator_workers'
21 | #
22 | # Example 5: Evaluator-Optimizer pattern for code generation and optimization
23 | # require_relative 'examples/evaluator_optimizer'
24 |
25 | puts "=========================="
26 | puts "=== Done ==="
27 |
--------------------------------------------------------------------------------
/Gemfile.lock:
--------------------------------------------------------------------------------
1 | GEM
2 | remote: https://rubygems.org/
3 | specs:
4 | base64 (0.3.0)
5 | dotenv (3.1.8)
6 | event_stream_parser (1.0.0)
7 | faraday (2.13.2)
8 | faraday-net_http (>= 2.0, < 3.5)
9 | json
10 | logger
11 | faraday-multipart (1.1.1)
12 | multipart-post (~> 2.0)
13 | faraday-net_http (3.4.1)
14 | net-http (>= 0.5.0)
15 | faraday-retry (2.3.2)
16 | faraday (~> 2.0)
17 | json (2.12.2)
18 | logger (1.7.0)
19 | marcel (1.0.4)
20 | multipart-post (2.4.1)
21 | net-http (0.6.0)
22 | uri
23 | parallel (1.27.0)
24 | ruby_llm (1.3.1)
25 | base64
26 | event_stream_parser (~> 1)
27 | faraday (>= 1.10.0)
28 | faraday-multipart (>= 1)
29 | faraday-net_http (>= 1)
30 | faraday-retry (>= 1)
31 | marcel (~> 1.0)
32 | zeitwerk (~> 2)
33 | uri (1.0.3)
34 | zeitwerk (2.7.3)
35 |
36 | PLATFORMS
37 | arm64-darwin-23
38 | ruby
39 |
40 | DEPENDENCIES
41 | dotenv
42 | parallel
43 | ruby_llm
44 |
45 | BUNDLED WITH
46 | 2.6.8
47 |
--------------------------------------------------------------------------------
/examples/evaluator_optimizer.rb:
--------------------------------------------------------------------------------
1 | # Example 5: Evaluator-Optimizer pattern for code generation and optimization
2 | # In this workflow, one LLM call generates a response while another provides evaluation and feedback in a loop
3 |
4 | require_relative '../lib/optimizer'
5 |
6 | evaluator_prompt = <<~EVAL
7 | Evaluate this following code implementation for:
8 | 1. code correctness
9 | 2. time complexity
10 | 3. style and best practices
11 |
12 | You should be evaluating only and not attemping to solve the task.
13 | Only output "PASS" if all criteria are met and you have no further suggestions for improvements.
14 | Output your evaluation concisely in the following format.
15 |
16 | PASS, NEEDS_IMPROVEMENT, or FAIL
17 |
18 | What needs improvement and why.
19 |
20 | EVAL
21 |
22 | generator_prompt = <<~GEN
23 | Your goal is to complete the task based on . If there are feedback
24 | from your previous generations, you should reflect on them to improve your solution
25 |
26 | Output your answer concisely in the following format:
27 |
28 |
29 | [Your understanding of the task and feedback and how you plan to improve]
30 |
31 |
32 |
33 | [Your code implementation here]
34 |
35 | GEN
36 |
37 | task = <<~TASK
38 |
39 | Implement a Stack with:
40 | 1. push(x)
41 | 2. pop()
42 | 3. getMin()
43 | All operations should be O(1).
44 |
45 | TASK
46 |
47 | Optimizer.new.process(
48 | task: task,
49 | evaluator_prompt: evaluator_prompt,
50 | generator_prompt: generator_prompt
51 | )
--------------------------------------------------------------------------------
/examples/prompt_chaining.rb:
--------------------------------------------------------------------------------
1 | # Example 1: Prompt chain workflow for structured data extraction and formatting
2 | # Each step progressively transforms raw text into a formatted table
3 |
4 | data_processing_steps = [
5 | "Extract only the numerical values and their associated metrics from the text.\n" \
6 | "Format each as 'value: metric' on a new line.\n" \
7 | "Example format:\n" \
8 | "92: customer satisfaction\n" \
9 | "45%: revenue growth",
10 |
11 | "Convert all numerical values to percentages where possible.\n" \
12 | "If not a percentage or points, convert to decimal (e.g., 92 points -> 92%).\n" \
13 | "Keep one number per line.\n" \
14 | "Example format:\n" \
15 | "92%: customer satisfaction\n" \
16 | "45%: revenue growth",
17 |
18 | "Sort all lines in descending order by numerical value.\n" \
19 | "Keep the format 'value: metric' on each line.\n" \
20 | "Example:\n" \
21 | "92%: customer satisfaction\n" \
22 | "87%: employee satisfaction",
23 |
24 | "Format the sorted data as a markdown table with columns:\n" \
25 | "| Metric | Value |\n" \
26 | "|:--|--:|\n" \
27 | "| Customer Satisfaction | 92% |"
28 | ]
29 |
30 | report =
31 | "Q3 Performance Summary:\n" \
32 | "Our customer satisfaction score rose to 92 points this quarter.\n" \
33 | "Revenue grew by 45% compared to last year.\n" \
34 | "Market share is now at 23% in our primary market.\n" \
35 | "Customer churn decreased to 5% from 8%.\n" \
36 | "New user acquisition cost is $43 per user.\n" \
37 | "Product adoption rate increased to 78%.\n" \
38 | "Employee satisfaction is at 87 points.\n" \
39 | "Operating margin improved to 34%."
40 |
41 | puts "\nInput text:"
42 | puts report
43 | formatted_result = chain(input: report, prompts: data_processing_steps)
44 |
45 | puts "\nFormatted result:"
46 | puts formatted_result
--------------------------------------------------------------------------------
/lib/orchestrator.rb:
--------------------------------------------------------------------------------
1 | class Orchestrator
2 | attr_reader :orchestrator_prompt, :worker_prompt
3 |
4 | def initialize(orchestrator_prompt:, worker_prompt:)
5 | @orchestrator_prompt = orchestrator_prompt
6 | @worker_prompt = worker_prompt
7 | end
8 |
9 | def process(task:, context: {})
10 | context ||= {}
11 |
12 | # Step 1: Orchestrator Phase
13 | orchestrator_input = format_prompt(orchestrator_prompt, { task: task }.merge(context))
14 | orchestrator_response = llm_call(orchestrator_input)
15 |
16 | analysis = extract_xml(orchestrator_response, "analysis")
17 | tasks_xml = extract_xml(orchestrator_response, "tasks")
18 | tasks = parse_tasks(tasks_xml)
19 |
20 | puts "\n=== ORCHESTRATOR OUTPUT ==="
21 | puts "\nANALYSIS:\n#{analysis}"
22 | puts "\nTASKS:\n#{tasks}"
23 |
24 | # Step 2: Worker Phase
25 | worker_results = tasks.map do |task_info|
26 | worker_input = format_prompt(worker_prompt, {
27 | original_task: task,
28 | task_type: task_info['type'],
29 | task_description: task_info['description']
30 | }.merge(context))
31 |
32 | worker_response = llm_call(worker_input)
33 | result = extract_xml(worker_response, "response")
34 |
35 | puts "\n=== WORKER RESULT (#{task_info['type']}) ===\n#{result}\n"
36 |
37 | { type: task_info['type'], description: task_info['description'], result: result }
38 | end
39 |
40 | { analysis: analysis, worker_results: worker_results }
41 | end
42 |
43 | private
44 |
45 | def format_prompt(template, variables)
46 | raise ArgumentError, "Prompt template cannot be nil" if template.nil?
47 | begin
48 | template % variables
49 | rescue KeyError => e
50 | raise ArgumentError, "Missing required prompt variable: #{e.message}"
51 | end
52 | end
53 | end
--------------------------------------------------------------------------------
/examples/orchestrator_workers.rb:
--------------------------------------------------------------------------------
1 | # Example 4: Orchestrator pattern with worker specialization
2 | # Break down a task into multiple subtasks and delegate to workers before synthesizing their results
3 |
4 | require_relative '../lib/orchestrator'
5 |
6 | orchestrator_prompt = <<~PROMPT
7 | Analyze this task and break it down into 2-3 distinct approaches:
8 |
9 | User Task: `%{task}`
10 |
11 | Return your response in this format:
12 |
13 |
14 | Explain your understanding of the task and which variations would be valuable.
15 | Focus on how each approach serves different aspects of the task.
16 |
17 |
18 |
19 |
20 | formal
21 | Write a precise, technical version that emphasizes specifications
22 |
23 |
24 | conversational
25 | Write an engaging, friendly version that connects with readers
26 |
27 |
28 | PROMPT
29 |
30 | worker_prompt = <<~PROMPT
31 | Generate content based on:
32 | Task: `%{original_task}`
33 | Style: `%{task_type}`
34 | Guidelines: `%{task_description}`
35 |
36 | Return your response in this format:
37 |
38 |
39 | Your content here, maintaining the specified style and fully addressing requirements.
40 |
41 | PROMPT
42 |
43 | orchestrator = Orchestrator.new(
44 | orchestrator_prompt: orchestrator_prompt,
45 | worker_prompt: worker_prompt
46 | )
47 |
48 | results = orchestrator.process(
49 | task: "Write a product description for a new eco-friendly water bottle",
50 | context: {
51 | target_audience: "environmentally conscious millennials",
52 | key_features: ["plastic-free", "insulated", "lifetime warranty"]
53 | }
54 | )
55 |
56 | puts "\n=== ORCHESTRATOR ANALYSIS ===\n#{results[:analysis]}\n"
57 | puts "\n=== WORKER RESULTS ===\n"
58 | results[:worker_results].each do |result|
59 | puts "\n=== #{result[:type]} ===\n#{result[:result]}\n"
60 | end
61 |
--------------------------------------------------------------------------------
/lib/optimizer.rb:
--------------------------------------------------------------------------------
1 | class Optimizer
2 | def initialize(max_iterations: 3)
3 | @max_iterations = max_iterations
4 | end
5 |
6 | def process(task:, evaluator_prompt:, generator_prompt:)
7 | # Keep generating and evaluating until requirements are met
8 | memory = []
9 | chain_of_thought = []
10 |
11 | thoughts, result = generate(generator_prompt, task)
12 | memory << result
13 | chain_of_thought << { thoughts: thoughts, result: result }
14 |
15 | loop do
16 | evaulation, feedback = evaluate(evaluator_prompt, result, task)
17 | return [result, chain_of_thought] if evaulation == "PASS"
18 |
19 | context = [
20 | "Previous attempts:",
21 | *memory.map { |m| "- #{m}" },
22 | "\nFeedback: #{feedback}"
23 | ].join("\n")
24 |
25 | thoughts, result = generate(generator_prompt, task, context)
26 | memory << result
27 | chain_of_thought << { thoughts: thoughts, result: result }
28 |
29 | break if chain_of_thought.size >= @max_iterations
30 | end
31 | end
32 |
33 | private
34 |
35 | def generate(prompt, task, context = "")
36 | # Generate and improve a solution based on feedback
37 | full_prompt = if context.empty?
38 | "#{prompt}\nTask: #{task}"
39 | else
40 | "#{prompt}\n#{context}\nTask: #{task}"
41 | end
42 |
43 | response = llm_call(full_prompt)
44 | thoughts = extract_xml(response, "thoughts")
45 | result = extract_xml(response, "response")
46 |
47 | puts "\n=== GENERATION START ==="
48 | puts "Thoughts:\n#{thoughts}\n"
49 | puts "Generated:\n#{result}"
50 | puts "=== GENERATION END ===\n"
51 |
52 | [thoughts, result]
53 | end
54 |
55 | def evaluate(prompt, content, task)
56 | # Evaluate if a solution meets requirements
57 | full_prompt = "#{prompt}\nOriginal task: #{task}\nContent to evaluate: #{content}"
58 | response = llm_call(full_prompt)
59 | evaluation = extract_xml(response, "evaluation")
60 | feedback = extract_xml(response, "feedback")
61 |
62 | puts "=== EVALUATION START ==="
63 | puts "Status: #{evaluation}"
64 | puts "Feedback: #{feedback}"
65 | puts "=== EVALUATION END ===\n"
66 |
67 | [evaluation, feedback]
68 | end
69 | end
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Building Effective Agents with Ruby
2 |
3 | A collection of Anthropic-inspired examples showcasing how to build AI Agents with Ruby.
4 |
5 | This repository contains implementations of common agent
6 | workflows discussed in Anthropic's blog post, [Building Effective Agents](https://www.anthropic.com/engineering/building-effective-agents).
7 |
8 | The code here is primarily a rewrite of the sample code
9 | that was originally written in Python by Erik Schluntz and Barry Zhang [here](https://github.com/anthropics/anthropic-cookbook/tree/main/patterns/agents).
10 |
11 | ## Installation
12 |
13 | Clone the repo and run `bundle install` to install dependencies.
14 |
15 | ```bash
16 | bundle install
17 | ```
18 |
19 | ## Usage
20 |
21 | These examples use OpenRouter API for LLM calls. You will need to set up an OpenRouter API key by creating a `.env` file using the `.env.example` as a template.
22 |
23 | If you would like to use a different LLM provider, you can modify the `llm_utils.rb` file to use a different LLM provider along with the model of your choice as well. In these
24 | examples, the `meta-llama/llama-4-scout` model is used.
25 |
26 | The LLM calls are made using the [RubyLLM](https://github.com/crmne/ruby_llm) gem. To see the complete list of models, you can go [here](https://rubyllm.com/guides/available-models).
27 |
28 | In the `main.rb` file, you will see examples of different
29 | agent workflows. Simply uncomment the example you want to run as such:
30 |
31 | ```ruby
32 | # Only uncomment the example you want to run
33 | #
34 | # Example 1: Prompt Chaining workflow for data processing
35 | require_relative 'examples/prompt_chaining'
36 | #
37 | # Example 2: Parallelization workflow for stakeholder impact analysis
38 | # require_relative 'examples/parallelization'
39 | #
40 | # Example 3: Routing workflow for support ticket routing
41 | # require_relative 'examples/routing'
42 | #
43 | # Example 4: Orchestrator pattern with worker specialization
44 | # require_relative 'examples/orchestrator_workers'
45 | #
46 | # Example 5: Evaluator-Optimizer pattern for code generation and optimization
47 | # require_relative 'examples/evaluator_optimizer'
48 | ```
49 |
50 | Then run your selected example with `ruby main.rb`.
51 |
52 | ## Contributing
53 |
54 | Pull requests are always welcome.
55 |
56 | ## License
57 |
58 | [MIT](https://choosealicense.com/licenses/mit/)
--------------------------------------------------------------------------------
/lib/llm_utils.rb:
--------------------------------------------------------------------------------
1 | require 'dotenv/load'
2 | require 'ruby_llm'
3 | require 'parallel'
4 |
5 | RubyLLM.configure do |config|
6 | config.openrouter_api_key = ENV.fetch('OPENROUTER_API_KEY', nil)
7 | end
8 |
9 | module LLMUtils
10 | def llm_call(prompt, system_prompt: "", model: "meta-llama/llama-4-scout")
11 | # Calls the model with the given prompt and returns the response
12 | chat = RubyLLM.chat(model: model)
13 | .with_temperature(0.1)
14 | .with_instructions(system_prompt)
15 |
16 | response = chat.ask(prompt)
17 | response.content
18 | end
19 |
20 | def extract_xml(text, tag)
21 | # Extracts the content of the specified XML tag from the given text
22 | match = text.match(/<#{tag}>(.*?)<\/#{tag}>/m)
23 | match ? match[1] : ""
24 | end
25 |
26 | def parse_tasks(tasks_xml)
27 | # Parse XML tasks into an array of task hashes.
28 | tasks = []
29 | current_task = {}
30 |
31 | tasks_xml.each_line do |line|
32 | line = line.strip
33 | next if line.empty?
34 |
35 | if line.start_with?("")
36 | current_task = {}
37 | elsif line.start_with?("")
38 | current_task["type"] = line[6..-8].strip
39 | elsif line.start_with?("")
40 | current_task["description"] = line[12..-14].strip
41 | elsif line.start_with?("")
42 | if current_task.key?("description")
43 | current_task["type"] ||= "default"
44 | tasks << current_task
45 | end
46 | end
47 | end
48 |
49 | tasks
50 | end
51 |
52 | def chain(input:, prompts:)
53 | # Chain multiple LLM calls sequentially, passing results between steps
54 | result = input
55 | prompts.each_with_index do |prompt, i|
56 | puts "\nStep #{i + 1}:"
57 | result = llm_call("#{prompt}\nInput: #{input}")
58 | puts result
59 | end
60 | result
61 | end
62 |
63 | def parallel(prompt:, inputs:, n_workers: 3)
64 | # Process multiple inputs concurrently with the same prompt
65 | Parallel.map(inputs, in_threads: n_workers) do |input|
66 | llm_call("#{prompt}\nInput: #{input}")
67 | end
68 | end
69 |
70 | def route(input:, routes:)
71 | # Route input to specialized prompt using content classification.
72 | puts "\nAvailable routes: #{routes.keys}"
73 |
74 | selector_prompt =
75 | "Analyze the input and select the most appropriate support team from these options: #{routes.keys}\n" +
76 | "First explain your reasoning, then provide your selection in this XML format:\n\n" +
77 | "\n" +
78 | "Brief explanation of why this ticket should be routed to a specific team.\n" +
79 | "Consider key terms, user intent, and urgency level.\n" +
80 | "\n\n" +
81 | "\n" +
82 | "The chosen team name\n" +
83 | "\n\n" +
84 | "Input: #{input}"
85 |
86 | route_response = llm_call(selector_prompt)
87 | reasoning = extract_xml(route_response, 'reasoning')
88 | route_key = extract_xml(route_response, 'selection').strip.downcase
89 |
90 | puts "Routing Analysis:"
91 | puts reasoning
92 | puts "\nSelected route: #{route_key}"
93 |
94 | # Process input with selected specialized prompt
95 | selected_prompt = routes[route_key]
96 | llm_call("#{selected_prompt}\nInput: #{input}")
97 | end
98 | end
--------------------------------------------------------------------------------
/examples/routing.rb:
--------------------------------------------------------------------------------
1 | # Example 3: Route workflow for customer support ticket handling
2 | # Route support tickets to appropriate teams based on content analysis
3 |
4 | support_routes = {
5 | "billing" => "You are a billing support specialist. Follow these guidelines:\n" +
6 | "1. Always start with \"Billing Support Response:\"\n" +
7 | "2. First acknowledge the specific billing issue\n" +
8 | "3. Explain any charges or discrepancies clearly\n" +
9 | "4. List concrete next steps with timeline\n" +
10 | "5. End with payment options if relevant\n\n" +
11 | "Keep responses professional but friendly.\n\n" +
12 | "Input: ",
13 |
14 | "technical" => "You are a technical support engineer. Follow these guidelines:\n" +
15 | "1. Always start with \"Technical Support Response:\"\n" +
16 | "2. List exact steps to resolve the issue\n" +
17 | "3. Include system requirements if relevant\n" +
18 | "4. Provide workarounds for common problems\n" +
19 | "5. End with escalation path if needed\n\n" +
20 | "Use clear, numbered steps and technical details.\n\n" +
21 | "Input: ",
22 |
23 | "account" => "You are an account security specialist. Follow these guidelines:\n" +
24 | "1. Always start with \"Account Support Response:\"\n" +
25 | "2. Prioritize account security and verification\n" +
26 | "3. Provide clear steps for account recovery/changes\n" +
27 | "4. Include security tips and warnings\n" +
28 | "5. Set clear expectations for resolution time\n\n" +
29 | "Maintain a serious, security-focused tone.\n\n" +
30 | "Input: ",
31 |
32 | "product" => "You are a product specialist. Follow these guidelines:\n" +
33 | "1. Always start with \"Product Support Response:\"\n" +
34 | "2. Focus on feature education and best practices\n" +
35 | "3. Include specific examples of usage\n" +
36 | "4. Link to relevant documentation sections\n" +
37 | "5. Suggest related features that might help\n\n" +
38 | "Be educational and encouraging in tone.\n\n" +
39 | "Input: "
40 | }
41 |
42 | # Test with different support tickets
43 | tickets = [
44 | "Subject: Can't access my account\n" +
45 | "Message: Hi, I've been trying to log in for the past hour but keep getting an 'invalid password' error. \n" +
46 | "I'm sure I'm using the right password. Can you help me regain access? This is urgent as I need to \n" +
47 | "submit a report by end of day.\n" +
48 | "- John",
49 |
50 | "Subject: Unexpected charge on my card\n" +
51 | "Message: Hello, I just noticed a charge of $49.99 on my credit card from your company, but I thought\n" +
52 | "I was on the $29.99 plan. Can you explain this charge and adjust it if it's a mistake?\n" +
53 | "Thanks,\n" +
54 | "Sarah",
55 |
56 | "Subject: How to export data?\n" +
57 | "Message: I need to export all my project data to Excel. I've looked through the docs but can't\n" +
58 | "figure out how to do a bulk export. Is this possible? If so, could you walk me through the steps?\n" +
59 | "Best regards,\n" +
60 | "Mike"
61 | ]
62 |
63 | puts "Processing support tickets...\n"
64 | tickets.each_with_index do |ticket, i|
65 | puts "\nTicket #{i + 1}:"
66 | puts "-" * 40
67 | puts ticket
68 | puts "\nResponse:"
69 | puts "-" * 40
70 | response = route(input: ticket, routes: support_routes)
71 | puts response
72 | puts "+" * 80
73 | end
--------------------------------------------------------------------------------