├── .github
└── workflows
│ └── dev-build.yml
├── .gitignore
├── .rubocop.yml
├── CHANGELOG.md
├── Gemfile
├── LICENSE
├── README.md
├── Rakefile
├── bin
└── console
├── build_mkdocs.sh
├── docs
├── blog
│ ├── .authors.yml
│ └── index.md
├── concepts
│ ├── logging.md
│ ├── patching.md
│ ├── philosophy.md
│ ├── schema.md
│ ├── streaming.md
│ └── tips.md
├── contributing.md
├── examples
│ ├── action_items.md
│ ├── action_items.png
│ ├── classification.md
│ ├── content_moderation.md
│ ├── index.md
│ ├── query_decomposition.md
│ ├── self_correction.md
│ └── validated_citations.md
├── help.md
├── index.md
└── overrides
│ └── main.html
├── ellipsis.Dockerfile
├── ellipsis.yaml
├── instructor-rb.gemspec
├── lib
├── instructor.rb
└── instructor
│ ├── anthropic
│ ├── patch.rb
│ └── response.rb
│ ├── base
│ └── patch.rb
│ ├── mode.rb
│ ├── openai
│ ├── patch.rb
│ └── response.rb
│ └── version.rb
├── mkdocs.yml
├── scripts
└── requirements-docs.txt
└── spec
├── anthropic
└── patch_spec.rb
├── examples
└── autoticketer_spec.rb
├── features
├── basic_use_spec.rb
└── iterable_spec.rb
├── helpers
└── autoticketer_models.rb
├── instructor_spec.rb
├── openai
├── patch_spec.rb
└── response_spec.rb
├── spec_helper.rb
└── vcr_cassettes
├── anthropic_patch
├── invalid_response.yml
├── valid_response.yml
└── with_validation_context.yml
├── autoticketer
└── generate.yml
├── basic_spec
└── valid_response.yml
├── iterable_spec
└── valid_response.yml
└── patching_spec
├── invalid_response.yml
├── standard_usage.yml
├── valid_response.yml
└── with_validation_context.yml
/.github/workflows/dev-build.yml:
--------------------------------------------------------------------------------
1 | name: Dev Build
2 |
3 | on:
4 | push:
5 | branches: [ "main" ]
6 | pull_request:
7 | branches: [ "main" ]
8 |
9 | permissions:
10 | contents: read
11 |
12 | jobs:
13 | test:
14 |
15 | runs-on: ubuntu-latest
16 | strategy:
17 | matrix:
18 | ruby-version: ['3.3']
19 |
20 | steps:
21 | - uses: actions/checkout@v4
22 | - name: Set up Ruby
23 |
24 | uses: ruby/setup-ruby@v1
25 | with:
26 | ruby-version: ${{ matrix.ruby-version }}
27 | bundler-cache: true # runs 'bundle install' and caches installed gems automatically
28 | - name: Run tests
29 | run: bundle exec rake
30 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.gem
2 | *.rbc
3 | .DS_Store
4 | /.config
5 | /coverage/
6 | /InstalledFiles
7 | /pkg/
8 | /spec/reports/
9 | /spec/examples.txt
10 | /test/tmp/
11 | /test/version_tmp/
12 | /tmp/
13 |
14 | # Used by dotenv library to load environment variables.
15 | # .env
16 |
17 | # Ignore Byebug command history file.
18 | .byebug_history
19 |
20 | ## Specific to RubyMotion:
21 | .dat*
22 | .repl_history
23 | build/
24 | *.bridgesupport
25 | build-iPhoneOS/
26 | build-iPhoneSimulator/
27 |
28 | ## Specific to RubyMotion (use of CocoaPods):
29 | #
30 | # We recommend against adding the Pods directory to your .gitignore. However
31 | # you should judge for yourself, the pros and cons are mentioned at:
32 | # https://guides.cocoapods.org/using/using-cocoapods.html#should-i-check-the-pods-directory-into-source-control
33 | #
34 | # vendor/Pods/
35 |
36 | ## Documentation cache and generated files:
37 | /.yardoc/
38 | /_yardoc/
39 | /doc/
40 | /rdoc/
41 | .cache/
42 |
43 | ## Environment normalization:
44 | /.bundle/
45 | /vendor/bundle
46 | /lib/bundler/man/
47 |
48 | # for a library or gem, you might want to ignore these files since the code is
49 | # intended to run in multiple environments; otherwise, check them in:
50 | Gemfile.lock
51 | .ruby-version
52 | # .ruby-gemset
53 |
54 | # unless supporting rvm < 1.11.0 or doing something fancy, ignore this:
55 | .rvmrc
56 |
57 | # Used by RuboCop. Remote config files pulled in from inherit_from directive.
58 | # .rubocop-https?--*
59 | site/
60 |
--------------------------------------------------------------------------------
/.rubocop.yml:
--------------------------------------------------------------------------------
1 | require:
2 | - rubocop-rake
3 | - rubocop-rspec
4 |
5 | AllCops:
6 | TargetRubyVersion: 3.1
7 |
8 | Metrics/BlockLength:
9 | Exclude:
10 | - 'spec/**/*'
11 |
12 | Lint/ConstantDefinitionInBlock:
13 | Exclude:
14 | - 'spec/**/*'
15 |
16 | Layout/LineLength:
17 | Exclude:
18 | - 'spec/**/*'
19 |
20 | RSpec/FilePath:
21 | SpecSuffixOnly: true
22 |
23 | RSpec/MultipleExpectations:
24 | Max: 4
25 |
26 | RSpec/ExampleLength:
27 | Max: 10
28 | Exclude:
29 | - spec/examples/*
30 |
31 | RSpec/DescribeClass:
32 | Exclude:
33 | - spec/examples/*
34 | - spec/features/*
35 |
36 | RSpec/MethodLength:
37 | Exclude:
38 | - spec/examples/*
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | ## [0.1.3] - 2024-05-22
2 | - Bumped OpenAI client version.
3 | - Laying the work for more modes. See https://python.useinstructor.com/concepts/patching/ for more information.
4 | - Allow the OpenAI client to be used normally in case you just want to use other client features.
5 |
6 | ## [0.1.2] - 2024-05-17
7 | - Improved the ability to customize the function name and the LLM function call description (instructions).
8 |
9 | ## [0.1.1] - 2024-05-07
10 | - Improved documentation in /docs folder.
11 | - Readme updates.
12 | - Upgraded EasyTalk (many improvements and bug fixes).
13 |
14 | ## [0.1.0] - 2024-04-24
15 | - Initial release
16 |
--------------------------------------------------------------------------------
/Gemfile:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | source 'https://rubygems.org'
4 | gemspec
5 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 instructor
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # instructor-rb
2 |
3 | _Structured extraction in Ruby, powered by llms, designed for simplicity, transparency, and control._
4 |
5 | ---
6 |
7 | [](https://twitter.com/jxnlco)
8 | [](https://twitter.com/sbayona)
9 | [](https://jxnl.github.io/instructor-rb)
10 | [](https://github.com/instructor-ai/instructor-rb/issues)
11 | [](https://discord.gg/CV8sPM5k5Y)
12 |
13 | Instructor-rb is a Ruby library that makes it a breeze to work with structured outputs from large language models (LLMs). Built on top of [EasyTalk](https://github.com/sergiobayona/easy_talk), it provides a simple, transparent, and user-friendly API to manage validation, retries, and streaming responses. Get ready to supercharge your LLM workflows!
14 |
15 | # Getting Started
16 |
17 | 1. Install Instructor-rb at the command prompt if you haven't yet:
18 |
19 | ```bash
20 | $ gem install instructor-rb
21 | ```
22 |
23 | 2. In your Ruby project, require the gem:
24 |
25 | ```ruby
26 | require 'instructor'
27 | ```
28 |
29 | 3. At the beginning of your script, initialize and patch the client:
30 |
31 | For the OpenAI client:
32 |
33 | ```ruby
34 | client = Instructor.from_openai(OpenAI::Client)
35 | ```
36 | For the Anthropic client:
37 |
38 | ```ruby
39 | client = Instructor.from_anthropic(Anthropic::Client)
40 | ```
41 |
42 | ## Usage
43 |
44 | export your API key:
45 |
46 | ```bash
47 | export OPENAI_API_KEY=sk-...
48 | ```
49 |
50 | or for Anthropic:
51 |
52 | ```bash
53 | export ANTHROPIC_API_KEY=sk-...
54 | ```
55 |
56 | Then use Instructor by defining your schema in Ruby using the `define_schema` block and [EasyTalk](https://github.com/sergiobayona/easy_talk)'s schema definition syntax. Here's an example in:
57 |
58 | ```ruby
59 | require 'instructor'
60 |
61 | class UserDetail
62 | include EasyTalk::Model
63 |
64 | define_schema do
65 | property :name, String
66 | property :age, Integer
67 | end
68 | end
69 |
70 | client = Instructor.from_openai(OpenAI::Client).new
71 |
72 | user = client.chat(
73 | parameters: {
74 | model: 'gpt-3.5-turbo',
75 | messages: [{ role: 'user', content: 'Extract Jason is 25 years old' }]
76 | },
77 | response_model: UserDetail
78 | )
79 |
80 | user.name
81 | # => "Jason"
82 | user.age
83 | # => 25
84 |
85 | ```
86 |
87 |
88 | > ℹ️ **Tip:** Support in other languages
89 |
90 | Check out ports to other languages below:
91 |
92 | - [Python](https://www.github.com/jxnl/instructor)
93 | - [TS/JS](https://github.com/instructor-ai/instructor-js/)
94 | - [Ruby](https://github.com/instructor-ai/instructor-rb)
95 | - [Elixir](https://github.com/thmsmlr/instructor_ex/)
96 |
97 | If you want to port Instructor to another language, please reach out to us on [Twitter](https://twitter.com/jxnlco) we'd love to help you get started!
98 |
99 | ## Why use Instructor?
100 |
101 |
102 | 1. **OpenAI Integration** — Integrates seamlessly with OpenAI's API, facilitating efficient data management and manipulation.
103 |
104 | 2. **Customizable** — It offers significant flexibility. Users can tailor validation processes and define unique error messages.
105 |
106 | 3. **Tested and Trusted** — Its reliability is proven by extensive real-world application.
107 |
108 | [Installing Instructor](installation.md) is a breeze.
109 |
110 | ## Contributing
111 |
112 | If you want to help out, checkout some of the issues marked as `good-first-issue` or `help-wanted`. Found [here](https://github.com/instructor-ai/instructor-js/labels/good%20first%20issue). They could be anything from code improvements, a guest blog post, or a new cook book.
113 |
114 | Checkout the [contribution guide]() for details on how to set things up, testing, changesets and guidelines.
115 |
116 | ## License
117 |
118 | This project is licensed under the terms of the MIT License.
119 |
120 | ## TODO
121 | - [ ] Add patch
122 | - [ ] Mode.FUNCTIONS
123 | - [ ] Mode.TOOLS
124 | - [ ] Mode.MD_JSON
125 | - [ ] Mode.JSON
126 | - [ ] Add response_model
127 | - [ ] Support async
128 | - [ ] Support stream=True, Partial[T] and iterable[T]
129 | - [ ] Support Streaming
130 | - [ ] Optional/Maybe types
131 | - [ ] Add Tutorials, include in docs
132 | - [ ] Text Classification
133 | - [ ] Search Queries
134 | - [ ] Query Decomposition
135 | - [ ] Citations
136 | - [ ] Knowledge Graph
137 | - [ ] Self Critique
138 | - [ ] Image Extracting Tables
139 | - [ ] Moderation
140 | - [ ] Entity Resolution
141 | - [ ] Action Item and Dependency Mapping
142 | - [ ] Logging for Distillation / Finetuning
143 | - [ ] Add `llm_validator`
--------------------------------------------------------------------------------
/Rakefile:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'bundler/gem_tasks'
4 | require 'rspec/core/rake_task'
5 | require 'rubocop/rake_task'
6 |
7 | RSpec::Core::RakeTask.new(:spec)
8 |
9 | RuboCop::RakeTask.new
10 |
11 | task default: %i[spec rubocop]
12 |
--------------------------------------------------------------------------------
/bin/console:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env ruby
2 | # frozen_string_literal: true
3 |
4 | require 'bundler/setup'
5 | require 'instructor'
6 |
7 | require 'irb'
8 | IRB.start(__FILE__)
9 |
--------------------------------------------------------------------------------
/build_mkdocs.sh:
--------------------------------------------------------------------------------
1 | pip install -r scripts/requirements-docs.txt
2 | mkdocs build
--------------------------------------------------------------------------------
/docs/blog/.authors.yml:
--------------------------------------------------------------------------------
1 | authors:
2 | jxnl:
3 | name: Jason Liu
4 | description: Creator
5 | avatar: https://avatars.githubusercontent.com/u/4852235?v=4
6 | url: https://twitter.com/intent/follow?screen_name=jxnlco
7 | sergiobayona:
8 | name: Sergio Bayona
9 | description: Contributor
10 | avatar: https://avatars.githubusercontent.com/u/155783?v=4
11 | url: https://twitter.com/intent/follow?screen_name=sergiobayona
12 |
--------------------------------------------------------------------------------
/docs/blog/index.md:
--------------------------------------------------------------------------------
1 | # Welcome to the Instructor Blog
2 |
3 | If you wanted to check out the main blog check us out [here](https://jxnl.github.io/instructor/blog/) where we have a bunch of posts about Instructor and OpenAI, and how to think about building with structured prompting. This blog will be more focused on the technical details of the Ruby library.
4 |
--------------------------------------------------------------------------------
/docs/concepts/logging.md:
--------------------------------------------------------------------------------
1 | Work in progress, we're open to contributions
--------------------------------------------------------------------------------
/docs/concepts/patching.md:
--------------------------------------------------------------------------------
1 | # Patching
2 |
3 | Instructor enhances the client functionality with three new arguments for backwards compatibility. This allows use of the enhanced client as usual, with structured output benefits.
4 |
5 | - `response_model`: Defines the response type for `chat`.
6 | - `max_retries`: Determines retry attempts for failed `chat` validations.
7 | - `validation_context`: Provides extra context to the validation process.
8 |
9 | Instructor-rb only supports the 'tools' mode at the moment. Other modes will be added in the near future.
10 |
11 | ## Tool Calling
12 |
13 | This is the recommended method for OpenAI clients. Since tools is the default and only mode currently supported, there is no `mode:` argument available. It "just works" with the patched client.
14 |
15 |
--------------------------------------------------------------------------------
/docs/concepts/philosophy.md:
--------------------------------------------------------------------------------
1 | # Philosophy
2 |
3 | The instructor values [simplicity](https://eugeneyan.com/writing/simplicity/) and flexibility in leveraging language models (LLMs). It offers a streamlined approach for structured output, avoiding unnecessary dependencies or complex abstractions. Let [EasyTalk](https://github.com/sergiobayona/easy_talk) do the heavy lifting.
4 |
5 | > “Simplicity is a great virtue but it requires hard work to achieve it and education to appreciate it. And to make matters worse: complexity sells better.” — Edsger Dijkstra
6 |
7 | ## The Bridge to Object-Oriented Programming
8 |
9 | `instructor` acts as a bridge converting text-based LLM interactions into a familiar object-oriented format. Its integration with EasyTalk provides type hints, and runtime validation. By treating LLMs as methods returning typed objects, instructor makes [language models backwards compatible with code](https://www.youtube.com/watch?v=yj-wSRJwrrc), making them practical for everyday use while being complex enough for advanced applications.
10 |
11 | ## The zen of `instructor`
12 |
13 | Maintain the flexibility and power of Ruby, without unnecessary constraints.
14 |
15 | Begin with a method and a return type hint – simplicity is key. With my experience maintaining a large enterprize framework at my previous job over many years I've learned that the goal of a making a useful framework is minimizing regret, both for the author and hopefully for the user.
16 |
17 | 1. Define a Schema
18 | ```ruby
19 | class StructuredData
20 | include EasyTalk::Model
21 | end
22 | ```
23 | 2. Define properties and methods on your schema.
24 | 3. Encapsulate all your LLM logic into a function `#!ruby def extract(a)`
25 | 4. Define typed computations against your data with `#!ruby def compute(data: StructuredData)` or call methods on your schema `#!ruby data.compute()`
26 |
27 | It should be that simple.
28 |
29 | ## My Goals
30 |
31 | The goal for the library, [documentation](https://instructor-ai.github.io/instructor-rb/), and [blog](https://instructor-ai.github.io/instructor-rb/blog/), is to help you be a better Ruby programmer and as a result a better AI engineer.
32 |
33 | - The library is a result of my desire for simplicity.
34 | - The library should help maintain simplicity in your codebase.
35 | - I won't try to write prompts for you,
36 | - I don't try to create indirections or abstractions that make it hard to debug in the future
37 |
38 | Please note that the library is designed to be adaptable and open-ended, allowing you to customize and extend its functionality based on your specific requirements. If you have any further questions or ideas hit me up on [twitter](https://twitter.com/jxnlco)
39 |
40 | Cheers!
41 |
--------------------------------------------------------------------------------
/docs/concepts/schema.md:
--------------------------------------------------------------------------------
1 | # EasyTalk Schemas
2 |
3 | EasyTalk is a Ruby library for describing, generating and validating JSON Schema.
4 |
5 | ## Basic Usage
6 |
7 | ```Ruby
8 |
9 | class UserDetail
10 | include EasyTalk::Model
11 |
12 | define_schema do
13 | property :name, String
14 | property :age, Integer
15 | end
16 | end
17 | ```
18 |
19 | ## Descriptions are Prompts
20 |
21 | One of the core things about instructors is that it's able to use these descriptions as part of the prompt.
22 |
23 | ```Ruby
24 | class UserDetail
25 | include EasyTalk::Model
26 |
27 | define_schema do
28 | description 'Fully extracted user detail'
29 | property :name, String, description: 'Your full name'
30 | property :age, Integer
31 | end
32 | end
33 | ```
34 |
35 | ## Model Composition
36 |
37 | EasyTalk models can themselves be composed of other models.
38 |
39 | ```Ruby
40 | class Address
41 | include EasyTalk::Model
42 |
43 | define_schema do
44 | property :street, String
45 | property :city, String
46 | end
47 | end
48 |
49 | class UserDetail
50 | include EasyTalk::Model
51 |
52 | define_schema do
53 | property :name, String
54 | property :address, Address
55 | end
56 | end
57 |
58 | ```
59 |
60 | ## Default Values
61 |
62 | In order to help the language model, we can also define defaults for the values.
63 |
64 | ```Ruby
65 | class UserDetail
66 | include EasyTalk::Model
67 |
68 | define_schema do
69 | property :name, String
70 | property :is_student, Boolean, default: false
71 | end
72 | end
73 |
74 | ```
75 | ## Arrays
76 |
77 | Arrays can be defined using the `T::Array[]` method.
78 |
79 | ```Ruby
80 | class UserDetail
81 | include EasyTalk::Model
82 |
83 | define_schema do
84 | property :name, String
85 | property :friends, T::Array[String]
86 | end
87 | end
88 |
89 | ```
90 |
91 | ## Enums
92 |
93 | Enums can be defined using the `enum` constraint.
94 |
95 | ```Ruby
96 | class UserDetail
97 | include EasyTalk::Model
98 |
99 | define_schema do
100 | property :name, String
101 | property :role, String, enum: %w[admin user]
102 | end
103 | end
104 |
105 | ```
--------------------------------------------------------------------------------
/docs/concepts/streaming.md:
--------------------------------------------------------------------------------
1 | Work in progress, we're open to contributions
--------------------------------------------------------------------------------
/docs/concepts/tips.md:
--------------------------------------------------------------------------------
1 | Work in progress, we're open to contributions.
--------------------------------------------------------------------------------
/docs/contributing.md:
--------------------------------------------------------------------------------
1 | We would love for you to contribute to `Instructor-rb`.
2 |
3 | ## Migrating Docs from Python
4 |
5 | Theres a bunch of examples in the python version, including documentation here [python docs](https://useinstructor.com/examples/)
6 |
7 | If you want to contribute, please check out [issues](https://github.com/instructor-ai/instructor-rb/issues)
8 |
9 | ## Issues
10 |
11 | If you find a bug, please file an issue on [our issue tracker on GitHub](https://github.com/instructor-ai/instructor-rb/issues).
12 |
13 | To help us reproduce the bug, please provide a minimal reproducible example, including a code snippet and the full error message as well as:
14 |
15 | 1. The `response_model` you are using.
16 | 2. The `messages` you are using.
17 | 3. The `model` you are using.
18 |
19 | ---
20 |
21 | ## Environment Setup
22 |
23 | Ruby 3.2.1 is required to run the project.
24 |
25 |
26 | ### Installation
27 |
28 | 1. **Install Dependencies**:
29 | Run the following command to install the project dependencies:
30 |
31 | ```bash
32 | bundle install
33 | ```
34 |
35 | 2. **Environment Variables**:
36 | setup the OpenAI API key in your environment variables.
37 |
38 | **Code Quality Tools**
39 |
40 | - This project uses rubocop.
41 |
42 | **Running Tests**
43 |
44 | - Execute tests using the following command:
45 |
46 | ```bash
47 | bundle exec rspec
48 | ```
49 |
50 | ### Running the Rubocop
51 |
52 | ```bash
53 | bundle exec rubocop
54 | ```
55 |
56 | ### Pull Requests
57 |
58 | We welcome pull requests! There is plenty to do, and we are happy to discuss any contributions you would like to make.
59 |
60 | If it is not a small change, please start by [filing an issue](https://github.com/instructor-ai/instructor-rb/issues) first.
61 |
62 |
63 | ## Community and Support
64 |
65 | - Join our community on Discord: [Join Discord](https://discord.gg/DWHZdqpNgz)
66 | - Reach out on Twitter: [@sergiobayona](https://twitter.com/sergiobayona) [@jxnlco](https://twitter.com/jxnlco)
67 |
68 | ## Contributors
69 |
70 |
71 |
72 |
73 |
74 |
75 | ## Additional Resources
76 | Python is required to run the documentation locally using mkdocs.
77 |
78 | To improve your understanding of the documentation, here are some useful references:
79 |
80 | - **mkdocs serve:** The `mkdocs serve` command is used to preview your documentation locally during the development phase. When you run this command in your terminal, MkDocs starts a development server, allowing you to view and interact with your documentation in a web browser. This is helpful for checking how your changes look before publishing the documentation. Learn more in the [mkdocs serve documentation](https://www.mkdocs.org/commands/serve/).
81 |
82 | - **hl_lines in Code Blocks:** The `hl_lines` feature in code blocks allows you to highlight specific lines within the code block. This is useful for drawing attention to particular lines of code when explaining examples or providing instructions. You can specify the lines to highlight using the `hl_lines` option in your code block configuration. For more details and examples, you can refer to the [hl_lines documentation](https://www.mkdocs.org/user-guide/writing-your-docs/#syntax-highlighting).
83 |
84 | - **Admonitions:** Admonitions are a way to visually emphasize or call attention to certain pieces of information in your documentation. They come in various styles, such as notes, warnings, tips, etc. Admonitions provide a structured and consistent way to present important content. For usage examples and details on incorporating admonitions into your documentation, you can refer to the [admonitions documentation](https://www.mkdocs.org/user-guide/writing-your-docs/#admonitions).
85 |
86 | For more details about the documentation structure and features, refer to the [MkDocs Material documentation](https://squidfunk.github.io/mkdocs-material/).
87 |
88 | Thank you for your contributions, and happy coding!
--------------------------------------------------------------------------------
/docs/examples/action_items.md:
--------------------------------------------------------------------------------
1 | # Example: Extracting Action Items from Meeting Transcripts
2 |
3 | In this guide, we'll walk through how to extract action items from meeting transcripts using OpenAI's API. This use case is a good example for automating project management tasks, such as task assignment and priority setting.
4 |
5 | !!! tips "Motivation"
6 |
7 | Significant amount of time is dedicated to meetings, where action items are generated as the actionable outcomes of these discussions. Automating the extraction of action items can save time and guarantee that no critical tasks are overlooked.
8 |
9 | ## Defining the Structures
10 |
11 | We'll model a meeting transcript as a collection of **`Ticket`** objects, each representing an action item. Every **`Ticket`** can have multiple **`Subtask`** objects, representing smaller, manageable pieces of the main task.
12 |
13 | ```Ruby
14 | class Subtask
15 | include EasyTalk::Model
16 |
17 | define_schema do
18 | property :id, Integer, description: 'Unique identifier for the subtask'
19 | property :name, String, description: 'Informative title of the subtask'
20 | end
21 | end
22 |
23 | class Ticket
24 | include EasyTalk::Model
25 |
26 | PRIORITY = %w[low medium high].freeze
27 |
28 | define_schema do
29 | property :id, Integer, description: 'Unique identifier for the ticket'
30 | property :name, String, description: 'Title of the ticket'
31 | property :description, String, description: 'Detailed description of the ticket'
32 | property :priority, String, description: 'Priority level'
33 | property :assignees, T::Array[String], description: 'List of users assigned to the ticket'
34 | property :subtasks, T.nilable(T::Array[Subtask]), description: 'List of subtasks associated with the ticket'
35 | property :dependencies, T.nilable(T::Array[Integer]),
36 | description: 'List of ticket IDs that this ticket depends on'
37 | end
38 | end
39 |
40 | class ActionItems
41 | include EasyTalk::Model
42 |
43 | define_schema do
44 | property :items, T::Array[Ticket]
45 | end
46 | end
47 | ```
48 |
49 | ## Extracting Action Items
50 |
51 | To extract action items from a meeting transcript, we use the **`extract_action_items()`** method. It calls OpenAI's API, processes the text, and returns a set of action items modeled as **`ActionItems`**.
52 |
53 | ```Ruby
54 |
55 | def extract_action_items(data)
56 | client = Instructor.from_openai(OpenAI::Client).new
57 |
58 | client.chat(
59 | parameters: {
60 | model: 'gpt-3.5-turbo',
61 | messages: [
62 | {
63 | role: 'system',
64 | "content": 'The following is a transcript of a meeting between a manager and their team. The manager is assigning tasks to their team members and creating action items for them to complete.'
65 | },
66 | {
67 | "role": 'user',
68 | "content": "Create the action items for the following transcript: #{data}"
69 | }
70 | ]
71 | },
72 | response_model: ActionItems
73 | )
74 | end
75 | ```
76 |
77 | ## Evaluation and Testing
78 |
79 | To test the **`extract_action_items`** method, we provide it with a sample transcript, and then print the JSON representation of the extracted action items.
80 |
81 | ```Ruby
82 | data = <<~DATA
83 | Alice: Hey team, we have several critical tasks we need to tackle for the upcoming release. First, we need to work on improving the authentication system. It's a top priority.
84 |
85 | Bob: Got it, Alice. I can take the lead on the authentication improvements. Are there any specific areas you want me to focus on?
86 |
87 | Alice: Good question, Bob. We need both a front-end revamp and back-end optimization. So basically, two sub-tasks.
88 |
89 | Carol: I can help with the front-end part of the authentication system.
90 |
91 | Bob: Great, Carol. I'll handle the back-end optimization then.
92 |
93 | Alice: Perfect. Now, after the authentication system is improved, we have to integrate it with our new billing system. That's a medium priority task.
94 |
95 | Carol: Is the new billing system already in place?
96 |
97 | Alice: No, it's actually another task. So it's a dependency for the integration task. Bob, can you also handle the billing system?
98 |
99 | Bob: Sure, but I'll need to complete the back-end optimization of the authentication system first, so it's dependent on that.
100 |
101 | Alice: Understood. Lastly, we also need to update our user documentation to reflect all these changes. It's a low-priority task but still important.
102 |
103 | Carol: I can take that on once the front-end changes for the authentication system are done. So, it would be dependent on that.
104 |
105 | Alice: Sounds like a plan. Let's get these tasks modeled out and get started.
106 | DATA
107 |
108 | result = generate(data)
109 | puts(result.as_json)
110 | ```
111 |
112 | ## Visualizing the tasks
113 |
114 | In order to quickly visualize the data we used code interpreter to create a graphviz export of the json version of the ActionItems array.
115 |
116 | 
117 |
118 | ```json
119 | {
120 | "items": [
121 | {
122 | "id": 1,
123 | "name": "Improve Authentication System",
124 | "description": "Revamp the front-end and optimize the back-end of the authentication system",
125 | "priority": "High",
126 | "assignees": ["Bob", "Carol"],
127 | "subtasks": [
128 | {
129 | "id": 2,
130 | "name": "Front-end Revamp"
131 | },
132 | {
133 | "id": 3,
134 | "name": "Back-end Optimization"
135 | }
136 | ],
137 | "dependencies": []
138 | },
139 | {
140 | "id": 4,
141 | "name": "Integrate Authentication System with Billing System",
142 | "description": "Integrate the improved authentication system with the new billing system",
143 | "priority": "Medium",
144 | "assignees": ["Bob"],
145 | "subtasks": [],
146 | "dependencies": [1]
147 | },
148 | {
149 | "id": 5,
150 | "name": "Update User Documentation",
151 | "description": "Update the user documentation to reflect the changes in the authentication system",
152 | "priority": "Low",
153 | "assignees": ["Carol"],
154 | "subtasks": [],
155 | "dependencies": [2]
156 | }
157 | ]
158 | }
159 | ```
160 |
161 | In this example, the **`extract_action_items`** method successfully identifies and segments the action items, assigning them priorities, assignees, subtasks, and dependencies as discussed in the meeting.
162 |
163 | By automating this process, you can ensure that important tasks and details are not lost in the sea of meeting minutes, making project management more efficient and effective.
--------------------------------------------------------------------------------
/docs/examples/action_items.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/567-labs/instructor-rb/e96ab3b4e9c8c1d08c8f784c0ac091f33a12a916/docs/examples/action_items.png
--------------------------------------------------------------------------------
/docs/examples/classification.md:
--------------------------------------------------------------------------------
1 | # Classification
2 | Pending update
3 |
--------------------------------------------------------------------------------
/docs/examples/content_moderation.md:
--------------------------------------------------------------------------------
1 | # Content Moderation
2 | pending update
3 |
--------------------------------------------------------------------------------
/docs/examples/index.md:
--------------------------------------------------------------------------------
1 | # Cookbook
2 |
3 | !!! warning "Page under construction"
4 |
5 | This page is under construction. Please check back later. Consider contributing to this page by opening a PR! Theres a bunch of examples in the python version, including documentation here [python docs](https://jxnl.github.io/instructor/examples/)
6 |
7 | If you want to contribute, please check out [issues](https://github.com/instructor-ai/instructor-js/issues/8)
8 |
9 |
10 |
11 | ## Table of Contents
12 |
13 | - [How do I do classification?](./classification.md)
14 | - [How are complex queries decomposed into subqueries for a single request?](./query_decomposition.md)
15 | - [How are action items and dependencies generated from transcripts?](./action_items.md)
16 | - [How is AI self-assessment implemented with llm_validator?](./self_correction.md)
17 | - [How are exact citations retrieved using regular expressions and smart prompting?](./validated_citations.md)
18 | - [How to enable OpenAI's moderation](./content_moderation.md)
19 |
--------------------------------------------------------------------------------
/docs/examples/query_decomposition.md:
--------------------------------------------------------------------------------
1 | # Query Decomposition
2 | Pending update
3 |
--------------------------------------------------------------------------------
/docs/examples/self_correction.md:
--------------------------------------------------------------------------------
1 | # Self Correction
2 | Pending update
3 |
--------------------------------------------------------------------------------
/docs/examples/validated_citations.md:
--------------------------------------------------------------------------------
1 | # Validated Citations
2 | Pending update
3 |
--------------------------------------------------------------------------------
/docs/help.md:
--------------------------------------------------------------------------------
1 | !!! warning "Page under construction"
2 |
3 | This page is under construction. Please check back later. Consider contributing to this page by opening a PR!
4 |
5 |
6 | # Getting help with Instructor
7 |
8 | If you need help getting started with Instructor or with advanced usage, the following sources may be useful.
9 |
10 | ## :fontawesome-brands-discord: Discord
11 |
12 | The [Discord](https://discord.gg/DWHZdqpNgz) is the best place to get help. You can ask questions, get help with debugging, and discuss Instructor with other users.
13 |
14 | ## :material-creation: Concepts
15 |
16 | The [concepts](concepts/prompting.md) section explains the core concepts of Instructor and how to prompt with models.
17 |
18 | ## :material-chef-hat: Cookbooks
19 |
20 | The [cookbooks](examples/index.md) are a great place to start. They contain a variety of examples that demonstrate how to use Instructor in different scenarios.
21 |
22 | ## :material-github: GitHub Discussions
23 |
24 | [GitHub discussions](https://github.com/instructor-ai/instructor-rb/discussions) are useful for asking questions, your question and the answer will help everyone.
25 |
26 | ## :material-github: GitHub Issues
27 |
28 | [GitHub issues](https://github.com/instructor-ai/instructor-rb/issues) are useful for reporting bugs or requesting new features.
29 |
30 | ## :material-twitter: Twitter
31 |
32 | You can also reach out to me on [Twitter](https://twitter.com/jxnlco) if you have any questions or ideas.
33 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | # instructor-rb
2 |
3 | _Structured extraction in Ruby, powered by llms, designed for simplicity, transparency, and control._
4 |
5 | ---
6 |
7 | [](https://twitter.com/jxnlco)
8 | [](https://twitter.com/sergiobayona)
9 | [](https://jxnl.github.io/instructor-rb)
10 | [](https://github.com/instructor-ai/instructor-rb/issues)
11 | [](https://discord.gg/DWHZdqpNgz)
12 |
13 | Dive into the world of Ruby-based structured extraction, by OpenAI's function calling API, Ruby schema validation with type hinting. Instructor stands out for its simplicity, transparency, and user-centric design. Whether you're a seasoned developer or just starting out, you'll find Instructor's approach intuitive and steerable.
14 |
15 | Check us out in [Python](https://jxnl.github.io/instructor/), [Elixir](https://github.com/thmsmlr/instructor_ex/), [PHP](https://github.com/cognesy/instructor-php/) and [Ruby](https://github.com/instructor-ai/instructor-rb).
16 |
17 | If you want to port Instructor to another language, please reach out to us on [Twitter](https://twitter.com/jxnlco) we'd love to help you get started!
18 |
19 | ## Usage
20 |
21 | To check out all the tips and tricks to prompt and extract data, check out the [documentation](https://instructor-ai.github.io/instructor-rb/tips/prompting/).
22 |
23 | Installation is as simple as:
24 |
25 | ```bash
26 | gem install intructor-rb
27 | ```
28 |
29 |
30 | ```Ruby
31 | require 'instructor-rb'
32 |
33 | OpenAI.configure do |config|
34 | config.access_token = ENV.fetch("OPENAI_ACCESS_TOKEN")
35 | config.organization_id = ENV.fetch("OPENAI_ORGANIZATION_ID") # Optional.
36 | end
37 |
38 | class UserDetail
39 | include EasyTalk::Model
40 |
41 | define_schema do
42 | property :name, String
43 | property :age, Integer
44 | end
45 | end
46 |
47 | client = Instructor.from_openai(OpenAI::Client).new
48 |
49 | user = client.chat(
50 | parameters: {
51 | model: 'gpt-3.5-turbo',
52 | messages: [{ role: 'user', content: 'Extract Jason is 25 years old' }]
53 | },
54 | response_model: UserDetail
55 | )
56 |
57 | user.name
58 | # => "Jason"
59 | user.age
60 | # => 25
61 | ```
62 |
63 | ## Why use Instructor?
64 |
65 | The question of using Instructor is fundamentally a question of why to use zod.
66 |
67 | 1. **Powered by OpenAI** — Instructor is powered by OpenAI's function calling API. This means you can use the same API for both prompting and extraction.
68 |
69 | 2. **Ruby Schema Validation** — Instructor uses Ruby schema validation with type hinting. This means you can validate your data before using it.
70 |
71 | ## More Examples
72 |
73 | If you'd like to see more check out our [cookbook](examples/index.md).
74 |
75 | ## Contributing
76 |
77 | If you want to help out, checkout some of the issues marked as `good-first-issue` or `help-wanted`. Found [here](https://github.com/instructor-ai/instructor-rb/labels/good%20first%20issue). They could be anything from code improvements, a guest blog post, or a new cook book.
78 |
79 | ## License
80 |
81 | This project is licensed under the terms of the MIT License.
82 |
--------------------------------------------------------------------------------
/docs/overrides/main.html:
--------------------------------------------------------------------------------
1 | {% extends "base.html" %}
2 |
6 |
7 | {% block announce %} For updates follow
8 | @jxnlco on
9 |
10 |
11 | {% include ".icons/fontawesome/brands/twitter.svg" %}
12 |
13 | Twitter
14 |
15 | and
16 |
17 | {% include ".icons/fontawesome/solid/star.svg" %}
18 |
19 | us on
20 |
21 |
22 | {% include ".icons/fontawesome/brands/github.svg" %}
23 |
24 | GitHub. If you don't like Ruby, check out the
26 | Python,
27 |
28 | Elixir
29 | and
30 | JavaScript
31 | ports. {% endblock %}
32 |
--------------------------------------------------------------------------------
/ellipsis.Dockerfile:
--------------------------------------------------------------------------------
1 | #======================================
2 | # This Dockerfile was generated by Ellipsis.
3 | # It should be referenced from your `ellipsis.yaml` config file.
4 | # For more details, see documentation: https://docs.ellipsis.dev
5 | # Test with: $ docker build -f ellipsis.Dockerfile .
6 | #======================================
7 |
8 | FROM ubuntu:20.04
9 | RUN apt-get update && apt-get install -y git build-essential
10 |
11 | WORKDIR /app
12 | COPY . .
13 |
--------------------------------------------------------------------------------
/ellipsis.yaml:
--------------------------------------------------------------------------------
1 | # See https://docs.ellipsis.dev for all available configurations.
2 |
3 | version: 1.1
4 | pr_review:
5 | auto_review_enabled: true # enable auto-review of PRs
6 | auto_summarize_pr: true # enable auto-summary of PRs
7 | confidence_threshold: 0.5 # Threshold for how confident Ellipsis needs to be in order to leave a comment, in range [0.0-1.0]
8 | rules: # customize behavior
9 | - "Code should be DRY (Don't Repeat Yourself)"
10 | - "There should no secrets or credentials in the code"
11 |
12 | # users can customize their own behavior
13 | user_overrides:
14 | # @hbrooks has disabled auto-summary and added a custom rule
15 | - usernames: ["hbrooks"]
16 | auto_summarize_pr: false
17 | rules:
18 | - "Code should be DRY (Don't Repeat Yourself)"
19 |
20 |
21 | # Below is an example of how to configure Ellipsis to build and run your repo.
22 | # Uncomment and replace with your own Dockerfile and commands.
23 |
24 | dockerfile: "ellipsis.Dockerfile" # this will be used to build your repo
25 |
26 | #=======================
27 | # commands:
28 | # - name: "build"
29 | # description: "This command compiles the code and builds the project"
30 | # command: "yarn build"
31 | # return_output_on_success: false # If output isn't useful when the command succeeds
32 | # auto_repair: true # Run this after every code change
33 | # - name: "lint_fix"
34 | # description: "Lints the code in fix mode, which will fix some errors."
35 | # command: "yarn lint:fix"
36 | # return_output_on_success: false
37 | # auto_repair: true
38 | #=======================
39 |
--------------------------------------------------------------------------------
/instructor-rb.gemspec:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative 'lib/instructor/version'
4 |
5 | Gem::Specification.new do |spec|
6 | spec.name = 'instructor-rb'
7 | spec.version = Instructor::VERSION
8 | spec.authors = ['Sergio Bayona', 'Jason Liu']
9 | spec.email = ['bayona.sergio@gmail.com', 'jason@jxnl.co']
10 |
11 | spec.summary = 'Structured extraction in Ruby, powered by llms.'
12 | spec.description = 'Explore the power of LLM structured extraction in Ruby with the Instructor gem.'
13 | spec.homepage = 'https://github.com/instructor-ai/instructor-rb'
14 | spec.license = 'MIT'
15 | spec.required_ruby_version = '>= 3.1.0'
16 |
17 | spec.metadata['allowed_push_host'] = 'https://rubygems.org'
18 |
19 | spec.metadata['homepage_uri'] = spec.homepage
20 | spec.metadata['source_code_uri'] = 'https://github.com/instructor-ai/instructor-rb'
21 | spec.metadata['changelog_uri'] = 'https://github.com/instructor-ai/instructor-rb/blob/main/CHANGELOG.md'
22 |
23 | # Specify which files should be added to the gem when it is released.
24 | # The `git ls-files -z` loads the files in the RubyGem that have been added into git.
25 | spec.files = Dir.chdir(__dir__) do
26 | `git ls-files -z`.split("\x0").reject do |f|
27 | (File.expand_path(f) == __FILE__) ||
28 | f.start_with?(*%w[spec/ .git .github Gemfile])
29 | end
30 | end
31 |
32 | spec.require_paths = ['lib']
33 |
34 | spec.add_dependency 'activesupport', '~> 7.0'
35 | spec.add_dependency 'anthropic', '~> 0.2'
36 | spec.add_dependency 'easy_talk', '~> 0.2'
37 | spec.add_dependency 'ruby-openai', '~> 7'
38 | spec.add_development_dependency 'pry-byebug', '~> 3.10'
39 | spec.add_development_dependency 'rake', '~> 13.1'
40 | spec.add_development_dependency 'rspec', '~> 3.0'
41 | spec.add_development_dependency 'rspec-json_expectations', '~> 2.0'
42 | spec.add_development_dependency 'rubocop', '~> 1.21'
43 | spec.add_development_dependency 'rubocop-rake', '~> 0.6'
44 | spec.add_development_dependency 'rubocop-rspec', '~> 2.29'
45 | spec.add_development_dependency 'vcr', '~> 6.0'
46 | spec.add_development_dependency 'webmock', '~> 3.13'
47 | end
48 |
--------------------------------------------------------------------------------
/lib/instructor.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'openai'
4 | require 'anthropic'
5 | require 'easy_talk'
6 | require 'active_support/all'
7 | require_relative 'instructor/version'
8 | require_relative 'instructor/openai/patch'
9 | require_relative 'instructor/openai/response'
10 | require_relative 'instructor/anthropic/patch'
11 | require_relative 'instructor/anthropic/response'
12 | require_relative 'instructor/mode'
13 |
14 | # Instructor makes it easy to reliably get structured data like JSON from Large Language Models (LLMs)
15 | # like GPT-3.5, GPT-4, GPT-4-Vision
16 | module Instructor
17 | @mode = nil
18 |
19 | class Error < ::StandardError; end
20 |
21 | # The ValidationError class represents an error that occurs during validation.
22 | class ValidationError < ::StandardError; end
23 |
24 | def self.mode
25 | @mode
26 | end
27 |
28 | # Patches the OpenAI client to add the following functionality:
29 | # - Retries on exceptions
30 | # - Accepts and validates a response model
31 | # - Accepts a validation_context argument
32 | #
33 | # @param openai_client [OpenAI::Client] The OpenAI client to be patched.
34 | # @param mode [Symbol] The mode to be used. Default is `Instructor::Mode::TOOLS.function`.
35 | # @return [OpenAI::Client] The patched OpenAI client.
36 | def self.from_openai(openai_client, mode: Instructor::Mode::TOOLS.function)
37 | @mode = mode
38 | openai_client.prepend(Instructor::OpenAI::Patch)
39 | end
40 |
41 | # @param anthropic_client [Anthropic::Client] The Anthropic client to be patched.
42 | # @return [Anthropic::Client] The patched Anthropic client.
43 | def self.from_anthropic(anthropic_client)
44 | anthropic_client.prepend(Instructor::Anthropic::Patch)
45 | end
46 | end
47 |
--------------------------------------------------------------------------------
/lib/instructor/anthropic/patch.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'anthropic'
4 | require 'instructor/base/patch'
5 |
6 | # The Instructor module provides functionality for interacting with Anthropic's messages API.
7 | module Instructor
8 | module Anthropic
9 | # The `Patch` module provides methods for patching and modifying the Anthropic client behavior.
10 | module Patch
11 | include Instructor::Base::Patch
12 |
13 | # Sends a message request to the API and processes the response.
14 | #
15 | # @param parameters [Hash] The parameters for the chat request as expected by the OpenAI client.
16 | # @param response_model [Class] The response model class.
17 | # @param max_retries [Integer] The maximum number of retries. Default is 0.
18 | # @param validation_context [Hash] The validation context for the parameters. Optional.
19 | # @return [Object] The processed response.
20 | def messages(parameters:, response_model: nil, max_retries: 0, validation_context: nil)
21 | with_retries(max_retries, [JSON::ParserError, Instructor::ValidationError, Faraday::ParsingError]) do
22 | model = determine_model(response_model)
23 | function = build_function(model)
24 | parameters[:max_tokens] = 1024 unless parameters.key?(:max_tokens)
25 | parameters = prepare_parameters(parameters, validation_context, function)
26 | ::Anthropic.configuration.extra_headers = { 'anthropic-beta' => 'tools-2024-04-04' }
27 | response = ::Anthropic::Client.json_post(path: '/messages', parameters:)
28 | process_response(response, model)
29 | end
30 | end
31 |
32 | # Processes the API response.
33 | #
34 | # @param response [Hash] The API response.
35 | # @param model [Class] The response model class.
36 | # @return [Object] The processed response.
37 | def process_response(response, model)
38 | parsed_response = Response.new(response).parse
39 | iterable? ? process_multiple_responses(parsed_response, model) : process_single_response(parsed_response, model)
40 | end
41 |
42 | # Builds the function details for the API request.
43 | #
44 | # @param model [Class] The response model class.
45 | # @return [Hash] The function details.
46 | def build_function(model)
47 | {
48 | name: generate_function_name(model),
49 | description: generate_description(model),
50 | input_schema: model.json_schema
51 | }
52 | end
53 | end
54 | end
55 | end
56 |
--------------------------------------------------------------------------------
/lib/instructor/anthropic/response.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Instructor
4 | module Anthropic
5 | # The Response class represents the response received from the OpenAI API.
6 | # It takes the raw response and provides convenience methods to access the chat completions,
7 | # tool calls, function responses, and parsed arguments.
8 | class Response
9 | # Initializes a new instance of the Response class.
10 | #
11 | # @param response [Hash] The response received from the OpenAI API.
12 | def initialize(response)
13 | @response = response
14 | end
15 |
16 | # Parses the function response(s) and returns the parsed arguments.
17 | #
18 | # @return [Array, Hash] The parsed arguments.
19 | # @raise [StandardError] if the api response contains an error.
20 | def parse
21 | raise StandardError, error_message if error?
22 |
23 | if single_response?
24 | arguments.first
25 | else
26 | arguments
27 | end
28 | end
29 |
30 | private
31 |
32 | def content
33 | @response['content']
34 | end
35 |
36 | def tool_calls
37 | content.is_a?(Array) && content.select { |c| c['type'] == 'tool_use' }
38 | end
39 |
40 | def single_response?
41 | tool_calls&.size == 1
42 | end
43 |
44 | def arguments
45 | tool_calls.map { |tc| tc['input'] }
46 | end
47 |
48 | def error?
49 | @response['type'] == 'error'
50 | end
51 |
52 | def error_message
53 | "#{@response.dig('error', 'type')} - #{@response.dig('error', 'message')}"
54 | end
55 | end
56 | end
57 | end
58 |
--------------------------------------------------------------------------------
/lib/instructor/base/patch.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Instructor
4 | module Base
5 | # The `Patch` module provides common methods for patching and modifying the client behavior.
6 | module Patch
7 | # Generates the function name for the API request.
8 | # You can customize the function name for the LLM by adding a `title` key to the schema.
9 | # Example:
10 | # ```ruby
11 | # class User
12 | # include EasyTalk::Model
13 | # define_schema do
14 | # title 'User'
15 | # property :name, String
16 | # property :age, Integer
17 | # end
18 | # end
19 | # ```
20 | # The function name will be `User`.
21 | # If the `title` key is not present, the function name will be the model's name.
22 | # @param model [Class] The response model class.
23 | # @return [String] The generated function name.
24 | def generate_function_name(model)
25 | model.schema.fetch(:title, model.name)
26 | end
27 |
28 | # Generates the description for the function.
29 | # You can customize the instructions for the LLM by adding an `instructions` class method to the response model.
30 | # Example:
31 | # ```ruby
32 | # class User
33 | # include EasyTalk::Model
34 | # def self.instructions
35 | # 'Extract the user name and age from the response'
36 | # end
37 | #
38 | # define_schema do ...
39 | # end
40 | # ```
41 | #
42 | # @param model [Class] The response model class.
43 | # @return [String] The generated description.
44 | def generate_description(model)
45 | if model.respond_to?(:instructions)
46 | raise Instructor::Error, 'The instructions must be a string' unless model.instructions.is_a?(String)
47 |
48 | model.instructions
49 | else
50 | "Correctly extracted `#{model.name}` with all the required parameters with correct types"
51 | end
52 | end
53 |
54 | private
55 |
56 | # Executes a block of code with retries in case of specific exceptions.
57 | #
58 | # @param max_retries [Integer] The maximum number of retries.
59 | # @param exceptions [Array] The exceptions to catch and retry.
60 | # @yield The block of code to execute.
61 | def with_retries(max_retries, exceptions, &block)
62 | attempts = 0
63 | begin
64 | block.call
65 | rescue *exceptions
66 | attempts += 1
67 | retry if attempts < max_retries
68 | raise
69 | end
70 | end
71 |
72 | # Prepares the parameters for the chat request.
73 | #
74 | # @param parameters [Hash] The original parameters.
75 | # @param validation_context [Hash] The validation context for the parameters.
76 | # @param function [Hash] The function details.
77 | # @return [Hash] The prepared parameters.
78 | def prepare_parameters(parameters, validation_context, function)
79 | # parameters # fetch the parameters's max_token or set it to 1024
80 | parameters = apply_validation_context(parameters, validation_context)
81 | parameters.merge(tools: [function])
82 | end
83 |
84 | # Processes multiple responses from the API.
85 | #
86 | # @param parsed_response [Array] The parsed API responses.
87 | # @param model [Class] The response model class.
88 | # @return [Array