├── .github └── workflows │ └── dev-build.yml ├── .gitignore ├── .rubocop.yml ├── CHANGELOG.md ├── Gemfile ├── LICENSE ├── README.md ├── Rakefile ├── bin └── console ├── build_mkdocs.sh ├── docs ├── blog │ ├── .authors.yml │ └── index.md ├── concepts │ ├── logging.md │ ├── patching.md │ ├── philosophy.md │ ├── schema.md │ ├── streaming.md │ └── tips.md ├── contributing.md ├── examples │ ├── action_items.md │ ├── action_items.png │ ├── classification.md │ ├── content_moderation.md │ ├── index.md │ ├── query_decomposition.md │ ├── self_correction.md │ └── validated_citations.md ├── help.md ├── index.md └── overrides │ └── main.html ├── ellipsis.Dockerfile ├── ellipsis.yaml ├── instructor-rb.gemspec ├── lib ├── instructor.rb └── instructor │ ├── anthropic │ ├── patch.rb │ └── response.rb │ ├── base │ └── patch.rb │ ├── mode.rb │ ├── openai │ ├── patch.rb │ └── response.rb │ └── version.rb ├── mkdocs.yml ├── scripts └── requirements-docs.txt └── spec ├── anthropic └── patch_spec.rb ├── examples └── autoticketer_spec.rb ├── features ├── basic_use_spec.rb └── iterable_spec.rb ├── helpers └── autoticketer_models.rb ├── instructor_spec.rb ├── openai ├── patch_spec.rb └── response_spec.rb ├── spec_helper.rb └── vcr_cassettes ├── anthropic_patch ├── invalid_response.yml ├── valid_response.yml └── with_validation_context.yml ├── autoticketer └── generate.yml ├── basic_spec └── valid_response.yml ├── iterable_spec └── valid_response.yml └── patching_spec ├── invalid_response.yml ├── standard_usage.yml ├── valid_response.yml └── with_validation_context.yml /.github/workflows/dev-build.yml: -------------------------------------------------------------------------------- 1 | name: Dev Build 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | test: 14 | 15 | runs-on: ubuntu-latest 16 | strategy: 17 | matrix: 18 | ruby-version: ['3.3'] 19 | 20 | steps: 21 | - uses: actions/checkout@v4 22 | - name: Set up Ruby 23 | 24 | uses: ruby/setup-ruby@v1 25 | with: 26 | ruby-version: ${{ matrix.ruby-version }} 27 | bundler-cache: true # runs 'bundle install' and caches installed gems automatically 28 | - name: Run tests 29 | run: bundle exec rake 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.gem 2 | *.rbc 3 | .DS_Store 4 | /.config 5 | /coverage/ 6 | /InstalledFiles 7 | /pkg/ 8 | /spec/reports/ 9 | /spec/examples.txt 10 | /test/tmp/ 11 | /test/version_tmp/ 12 | /tmp/ 13 | 14 | # Used by dotenv library to load environment variables. 15 | # .env 16 | 17 | # Ignore Byebug command history file. 18 | .byebug_history 19 | 20 | ## Specific to RubyMotion: 21 | .dat* 22 | .repl_history 23 | build/ 24 | *.bridgesupport 25 | build-iPhoneOS/ 26 | build-iPhoneSimulator/ 27 | 28 | ## Specific to RubyMotion (use of CocoaPods): 29 | # 30 | # We recommend against adding the Pods directory to your .gitignore. However 31 | # you should judge for yourself, the pros and cons are mentioned at: 32 | # https://guides.cocoapods.org/using/using-cocoapods.html#should-i-check-the-pods-directory-into-source-control 33 | # 34 | # vendor/Pods/ 35 | 36 | ## Documentation cache and generated files: 37 | /.yardoc/ 38 | /_yardoc/ 39 | /doc/ 40 | /rdoc/ 41 | .cache/ 42 | 43 | ## Environment normalization: 44 | /.bundle/ 45 | /vendor/bundle 46 | /lib/bundler/man/ 47 | 48 | # for a library or gem, you might want to ignore these files since the code is 49 | # intended to run in multiple environments; otherwise, check them in: 50 | Gemfile.lock 51 | .ruby-version 52 | # .ruby-gemset 53 | 54 | # unless supporting rvm < 1.11.0 or doing something fancy, ignore this: 55 | .rvmrc 56 | 57 | # Used by RuboCop. Remote config files pulled in from inherit_from directive. 58 | # .rubocop-https?--* 59 | site/ 60 | -------------------------------------------------------------------------------- /.rubocop.yml: -------------------------------------------------------------------------------- 1 | require: 2 | - rubocop-rake 3 | - rubocop-rspec 4 | 5 | AllCops: 6 | TargetRubyVersion: 3.1 7 | 8 | Metrics/BlockLength: 9 | Exclude: 10 | - 'spec/**/*' 11 | 12 | Lint/ConstantDefinitionInBlock: 13 | Exclude: 14 | - 'spec/**/*' 15 | 16 | Layout/LineLength: 17 | Exclude: 18 | - 'spec/**/*' 19 | 20 | RSpec/FilePath: 21 | SpecSuffixOnly: true 22 | 23 | RSpec/MultipleExpectations: 24 | Max: 4 25 | 26 | RSpec/ExampleLength: 27 | Max: 10 28 | Exclude: 29 | - spec/examples/* 30 | 31 | RSpec/DescribeClass: 32 | Exclude: 33 | - spec/examples/* 34 | - spec/features/* 35 | 36 | RSpec/MethodLength: 37 | Exclude: 38 | - spec/examples/* -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## [0.1.3] - 2024-05-22 2 | - Bumped OpenAI client version. 3 | - Laying the work for more modes. See https://python.useinstructor.com/concepts/patching/ for more information. 4 | - Allow the OpenAI client to be used normally in case you just want to use other client features. 5 | 6 | ## [0.1.2] - 2024-05-17 7 | - Improved the ability to customize the function name and the LLM function call description (instructions). 8 | 9 | ## [0.1.1] - 2024-05-07 10 | - Improved documentation in /docs folder. 11 | - Readme updates. 12 | - Upgraded EasyTalk (many improvements and bug fixes). 13 | 14 | ## [0.1.0] - 2024-04-24 15 | - Initial release 16 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | source 'https://rubygems.org' 4 | gemspec 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 instructor 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # instructor-rb 2 | 3 | _Structured extraction in Ruby, powered by llms, designed for simplicity, transparency, and control._ 4 | 5 | --- 6 | 7 | [![Twitter Follow](https://img.shields.io/twitter/follow/jxnlco?style=social)](https://twitter.com/jxnlco) 8 | [![Twitter Follow](https://img.shields.io/twitter/follow/sbayona?style=social)](https://twitter.com/sbayona) 9 | [![Documentation](https://img.shields.io/badge/docs-available-brightgreen)](https://jxnl.github.io/instructor-rb) 10 | [![GitHub issues](https://img.shields.io/github/issues/instructor-ai/instructor-rb.svg)](https://github.com/instructor-ai/instructor-rb/issues) 11 | [![Discord](https://img.shields.io/discord/1192334452110659664?label=discord)](https://discord.gg/CV8sPM5k5Y) 12 | 13 | Instructor-rb is a Ruby library that makes it a breeze to work with structured outputs from large language models (LLMs). Built on top of [EasyTalk](https://github.com/sergiobayona/easy_talk), it provides a simple, transparent, and user-friendly API to manage validation, retries, and streaming responses. Get ready to supercharge your LLM workflows! 14 | 15 | # Getting Started 16 | 17 | 1. Install Instructor-rb at the command prompt if you haven't yet: 18 | 19 | ```bash 20 | $ gem install instructor-rb 21 | ``` 22 | 23 | 2. In your Ruby project, require the gem: 24 | 25 | ```ruby 26 | require 'instructor' 27 | ``` 28 | 29 | 3. At the beginning of your script, initialize and patch the client: 30 | 31 | For the OpenAI client: 32 | 33 | ```ruby 34 | client = Instructor.from_openai(OpenAI::Client) 35 | ``` 36 | For the Anthropic client: 37 | 38 | ```ruby 39 | client = Instructor.from_anthropic(Anthropic::Client) 40 | ``` 41 | 42 | ## Usage 43 | 44 | export your API key: 45 | 46 | ```bash 47 | export OPENAI_API_KEY=sk-... 48 | ``` 49 | 50 | or for Anthropic: 51 | 52 | ```bash 53 | export ANTHROPIC_API_KEY=sk-... 54 | ``` 55 | 56 | Then use Instructor by defining your schema in Ruby using the `define_schema` block and [EasyTalk](https://github.com/sergiobayona/easy_talk)'s schema definition syntax. Here's an example in: 57 | 58 | ```ruby 59 | require 'instructor' 60 | 61 | class UserDetail 62 | include EasyTalk::Model 63 | 64 | define_schema do 65 | property :name, String 66 | property :age, Integer 67 | end 68 | end 69 | 70 | client = Instructor.from_openai(OpenAI::Client).new 71 | 72 | user = client.chat( 73 | parameters: { 74 | model: 'gpt-3.5-turbo', 75 | messages: [{ role: 'user', content: 'Extract Jason is 25 years old' }] 76 | }, 77 | response_model: UserDetail 78 | ) 79 | 80 | user.name 81 | # => "Jason" 82 | user.age 83 | # => 25 84 | 85 | ``` 86 | 87 | 88 | > ℹ️ **Tip:** Support in other languages 89 | 90 | Check out ports to other languages below: 91 | 92 | - [Python](https://www.github.com/jxnl/instructor) 93 | - [TS/JS](https://github.com/instructor-ai/instructor-js/) 94 | - [Ruby](https://github.com/instructor-ai/instructor-rb) 95 | - [Elixir](https://github.com/thmsmlr/instructor_ex/) 96 | 97 | If you want to port Instructor to another language, please reach out to us on [Twitter](https://twitter.com/jxnlco) we'd love to help you get started! 98 | 99 | ## Why use Instructor? 100 | 101 | 102 | 1. **OpenAI Integration** — Integrates seamlessly with OpenAI's API, facilitating efficient data management and manipulation. 103 | 104 | 2. **Customizable** — It offers significant flexibility. Users can tailor validation processes and define unique error messages. 105 | 106 | 3. **Tested and Trusted** — Its reliability is proven by extensive real-world application. 107 | 108 | [Installing Instructor](installation.md) is a breeze. 109 | 110 | ## Contributing 111 | 112 | If you want to help out, checkout some of the issues marked as `good-first-issue` or `help-wanted`. Found [here](https://github.com/instructor-ai/instructor-js/labels/good%20first%20issue). They could be anything from code improvements, a guest blog post, or a new cook book. 113 | 114 | Checkout the [contribution guide]() for details on how to set things up, testing, changesets and guidelines. 115 | 116 | ## License 117 | 118 | This project is licensed under the terms of the MIT License. 119 | 120 | ## TODO 121 | - [ ] Add patch 122 | - [ ] Mode.FUNCTIONS 123 | - [ ] Mode.TOOLS 124 | - [ ] Mode.MD_JSON 125 | - [ ] Mode.JSON 126 | - [ ] Add response_model 127 | - [ ] Support async 128 | - [ ] Support stream=True, Partial[T] and iterable[T] 129 | - [ ] Support Streaming 130 | - [ ] Optional/Maybe types 131 | - [ ] Add Tutorials, include in docs 132 | - [ ] Text Classification 133 | - [ ] Search Queries 134 | - [ ] Query Decomposition 135 | - [ ] Citations 136 | - [ ] Knowledge Graph 137 | - [ ] Self Critique 138 | - [ ] Image Extracting Tables 139 | - [ ] Moderation 140 | - [ ] Entity Resolution 141 | - [ ] Action Item and Dependency Mapping 142 | - [ ] Logging for Distillation / Finetuning 143 | - [ ] Add `llm_validator` -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'bundler/gem_tasks' 4 | require 'rspec/core/rake_task' 5 | require 'rubocop/rake_task' 6 | 7 | RSpec::Core::RakeTask.new(:spec) 8 | 9 | RuboCop::RakeTask.new 10 | 11 | task default: %i[spec rubocop] 12 | -------------------------------------------------------------------------------- /bin/console: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'bundler/setup' 5 | require 'instructor' 6 | 7 | require 'irb' 8 | IRB.start(__FILE__) 9 | -------------------------------------------------------------------------------- /build_mkdocs.sh: -------------------------------------------------------------------------------- 1 | pip install -r scripts/requirements-docs.txt 2 | mkdocs build -------------------------------------------------------------------------------- /docs/blog/.authors.yml: -------------------------------------------------------------------------------- 1 | authors: 2 | jxnl: 3 | name: Jason Liu 4 | description: Creator 5 | avatar: https://avatars.githubusercontent.com/u/4852235?v=4 6 | url: https://twitter.com/intent/follow?screen_name=jxnlco 7 | sergiobayona: 8 | name: Sergio Bayona 9 | description: Contributor 10 | avatar: https://avatars.githubusercontent.com/u/155783?v=4 11 | url: https://twitter.com/intent/follow?screen_name=sergiobayona 12 | -------------------------------------------------------------------------------- /docs/blog/index.md: -------------------------------------------------------------------------------- 1 | # Welcome to the Instructor Blog 2 | 3 | If you wanted to check out the main blog check us out [here](https://jxnl.github.io/instructor/blog/) where we have a bunch of posts about Instructor and OpenAI, and how to think about building with structured prompting. This blog will be more focused on the technical details of the Ruby library. 4 | -------------------------------------------------------------------------------- /docs/concepts/logging.md: -------------------------------------------------------------------------------- 1 | Work in progress, we're open to contributions -------------------------------------------------------------------------------- /docs/concepts/patching.md: -------------------------------------------------------------------------------- 1 | # Patching 2 | 3 | Instructor enhances the client functionality with three new arguments for backwards compatibility. This allows use of the enhanced client as usual, with structured output benefits. 4 | 5 | - `response_model`: Defines the response type for `chat`. 6 | - `max_retries`: Determines retry attempts for failed `chat` validations. 7 | - `validation_context`: Provides extra context to the validation process. 8 | 9 | Instructor-rb only supports the 'tools' mode at the moment. Other modes will be added in the near future. 10 | 11 | ## Tool Calling 12 | 13 | This is the recommended method for OpenAI clients. Since tools is the default and only mode currently supported, there is no `mode:` argument available. It "just works" with the patched client. 14 | 15 | -------------------------------------------------------------------------------- /docs/concepts/philosophy.md: -------------------------------------------------------------------------------- 1 | # Philosophy 2 | 3 | The instructor values [simplicity](https://eugeneyan.com/writing/simplicity/) and flexibility in leveraging language models (LLMs). It offers a streamlined approach for structured output, avoiding unnecessary dependencies or complex abstractions. Let [EasyTalk](https://github.com/sergiobayona/easy_talk) do the heavy lifting. 4 | 5 | > “Simplicity is a great virtue but it requires hard work to achieve it and education to appreciate it. And to make matters worse: complexity sells better.” — Edsger Dijkstra 6 | 7 | ## The Bridge to Object-Oriented Programming 8 | 9 | `instructor` acts as a bridge converting text-based LLM interactions into a familiar object-oriented format. Its integration with EasyTalk provides type hints, and runtime validation. By treating LLMs as methods returning typed objects, instructor makes [language models backwards compatible with code](https://www.youtube.com/watch?v=yj-wSRJwrrc), making them practical for everyday use while being complex enough for advanced applications. 10 | 11 | ## The zen of `instructor` 12 | 13 | Maintain the flexibility and power of Ruby, without unnecessary constraints. 14 | 15 | Begin with a method and a return type hint – simplicity is key. With my experience maintaining a large enterprize framework at my previous job over many years I've learned that the goal of a making a useful framework is minimizing regret, both for the author and hopefully for the user. 16 | 17 | 1. Define a Schema 18 | ```ruby 19 | class StructuredData 20 | include EasyTalk::Model 21 | end 22 | ``` 23 | 2. Define properties and methods on your schema. 24 | 3. Encapsulate all your LLM logic into a function `#!ruby def extract(a)` 25 | 4. Define typed computations against your data with `#!ruby def compute(data: StructuredData)` or call methods on your schema `#!ruby data.compute()` 26 | 27 | It should be that simple. 28 | 29 | ## My Goals 30 | 31 | The goal for the library, [documentation](https://instructor-ai.github.io/instructor-rb/), and [blog](https://instructor-ai.github.io/instructor-rb/blog/), is to help you be a better Ruby programmer and as a result a better AI engineer. 32 | 33 | - The library is a result of my desire for simplicity. 34 | - The library should help maintain simplicity in your codebase. 35 | - I won't try to write prompts for you, 36 | - I don't try to create indirections or abstractions that make it hard to debug in the future 37 | 38 | Please note that the library is designed to be adaptable and open-ended, allowing you to customize and extend its functionality based on your specific requirements. If you have any further questions or ideas hit me up on [twitter](https://twitter.com/jxnlco) 39 | 40 | Cheers! 41 | -------------------------------------------------------------------------------- /docs/concepts/schema.md: -------------------------------------------------------------------------------- 1 | # EasyTalk Schemas 2 | 3 | EasyTalk is a Ruby library for describing, generating and validating JSON Schema. 4 | 5 | ## Basic Usage 6 | 7 | ```Ruby 8 | 9 | class UserDetail 10 | include EasyTalk::Model 11 | 12 | define_schema do 13 | property :name, String 14 | property :age, Integer 15 | end 16 | end 17 | ``` 18 | 19 | ## Descriptions are Prompts 20 | 21 | One of the core things about instructors is that it's able to use these descriptions as part of the prompt. 22 | 23 | ```Ruby 24 | class UserDetail 25 | include EasyTalk::Model 26 | 27 | define_schema do 28 | description 'Fully extracted user detail' 29 | property :name, String, description: 'Your full name' 30 | property :age, Integer 31 | end 32 | end 33 | ``` 34 | 35 | ## Model Composition 36 | 37 | EasyTalk models can themselves be composed of other models. 38 | 39 | ```Ruby 40 | class Address 41 | include EasyTalk::Model 42 | 43 | define_schema do 44 | property :street, String 45 | property :city, String 46 | end 47 | end 48 | 49 | class UserDetail 50 | include EasyTalk::Model 51 | 52 | define_schema do 53 | property :name, String 54 | property :address, Address 55 | end 56 | end 57 | 58 | ``` 59 | 60 | ## Default Values 61 | 62 | In order to help the language model, we can also define defaults for the values. 63 | 64 | ```Ruby 65 | class UserDetail 66 | include EasyTalk::Model 67 | 68 | define_schema do 69 | property :name, String 70 | property :is_student, Boolean, default: false 71 | end 72 | end 73 | 74 | ``` 75 | ## Arrays 76 | 77 | Arrays can be defined using the `T::Array[]` method. 78 | 79 | ```Ruby 80 | class UserDetail 81 | include EasyTalk::Model 82 | 83 | define_schema do 84 | property :name, String 85 | property :friends, T::Array[String] 86 | end 87 | end 88 | 89 | ``` 90 | 91 | ## Enums 92 | 93 | Enums can be defined using the `enum` constraint. 94 | 95 | ```Ruby 96 | class UserDetail 97 | include EasyTalk::Model 98 | 99 | define_schema do 100 | property :name, String 101 | property :role, String, enum: %w[admin user] 102 | end 103 | end 104 | 105 | ``` -------------------------------------------------------------------------------- /docs/concepts/streaming.md: -------------------------------------------------------------------------------- 1 | Work in progress, we're open to contributions -------------------------------------------------------------------------------- /docs/concepts/tips.md: -------------------------------------------------------------------------------- 1 | Work in progress, we're open to contributions. -------------------------------------------------------------------------------- /docs/contributing.md: -------------------------------------------------------------------------------- 1 | We would love for you to contribute to `Instructor-rb`. 2 | 3 | ## Migrating Docs from Python 4 | 5 | Theres a bunch of examples in the python version, including documentation here [python docs](https://useinstructor.com/examples/) 6 | 7 | If you want to contribute, please check out [issues](https://github.com/instructor-ai/instructor-rb/issues) 8 | 9 | ## Issues 10 | 11 | If you find a bug, please file an issue on [our issue tracker on GitHub](https://github.com/instructor-ai/instructor-rb/issues). 12 | 13 | To help us reproduce the bug, please provide a minimal reproducible example, including a code snippet and the full error message as well as: 14 | 15 | 1. The `response_model` you are using. 16 | 2. The `messages` you are using. 17 | 3. The `model` you are using. 18 | 19 | --- 20 | 21 | ## Environment Setup 22 | 23 | Ruby 3.2.1 is required to run the project. 24 | 25 | 26 | ### Installation 27 | 28 | 1. **Install Dependencies**: 29 | Run the following command to install the project dependencies: 30 | 31 | ```bash 32 | bundle install 33 | ``` 34 | 35 | 2. **Environment Variables**: 36 | setup the OpenAI API key in your environment variables. 37 | 38 | **Code Quality Tools** 39 | 40 | - This project uses rubocop. 41 | 42 | **Running Tests** 43 | 44 | - Execute tests using the following command: 45 | 46 | ```bash 47 | bundle exec rspec 48 | ``` 49 | 50 | ### Running the Rubocop 51 | 52 | ```bash 53 | bundle exec rubocop 54 | ``` 55 | 56 | ### Pull Requests 57 | 58 | We welcome pull requests! There is plenty to do, and we are happy to discuss any contributions you would like to make. 59 | 60 | If it is not a small change, please start by [filing an issue](https://github.com/instructor-ai/instructor-rb/issues) first. 61 | 62 | 63 | ## Community and Support 64 | 65 | - Join our community on Discord: [Join Discord](https://discord.gg/DWHZdqpNgz) 66 | - Reach out on Twitter: [@sergiobayona](https://twitter.com/sergiobayona) [@jxnlco](https://twitter.com/jxnlco) 67 | 68 | ## Contributors 69 | 70 | 71 | 72 | 73 | 74 | 75 | ## Additional Resources 76 | Python is required to run the documentation locally using mkdocs. 77 | 78 | To improve your understanding of the documentation, here are some useful references: 79 | 80 | - **mkdocs serve:** The `mkdocs serve` command is used to preview your documentation locally during the development phase. When you run this command in your terminal, MkDocs starts a development server, allowing you to view and interact with your documentation in a web browser. This is helpful for checking how your changes look before publishing the documentation. Learn more in the [mkdocs serve documentation](https://www.mkdocs.org/commands/serve/). 81 | 82 | - **hl_lines in Code Blocks:** The `hl_lines` feature in code blocks allows you to highlight specific lines within the code block. This is useful for drawing attention to particular lines of code when explaining examples or providing instructions. You can specify the lines to highlight using the `hl_lines` option in your code block configuration. For more details and examples, you can refer to the [hl_lines documentation](https://www.mkdocs.org/user-guide/writing-your-docs/#syntax-highlighting). 83 | 84 | - **Admonitions:** Admonitions are a way to visually emphasize or call attention to certain pieces of information in your documentation. They come in various styles, such as notes, warnings, tips, etc. Admonitions provide a structured and consistent way to present important content. For usage examples and details on incorporating admonitions into your documentation, you can refer to the [admonitions documentation](https://www.mkdocs.org/user-guide/writing-your-docs/#admonitions). 85 | 86 | For more details about the documentation structure and features, refer to the [MkDocs Material documentation](https://squidfunk.github.io/mkdocs-material/). 87 | 88 | Thank you for your contributions, and happy coding! -------------------------------------------------------------------------------- /docs/examples/action_items.md: -------------------------------------------------------------------------------- 1 | # Example: Extracting Action Items from Meeting Transcripts 2 | 3 | In this guide, we'll walk through how to extract action items from meeting transcripts using OpenAI's API. This use case is a good example for automating project management tasks, such as task assignment and priority setting. 4 | 5 | !!! tips "Motivation" 6 | 7 | Significant amount of time is dedicated to meetings, where action items are generated as the actionable outcomes of these discussions. Automating the extraction of action items can save time and guarantee that no critical tasks are overlooked. 8 | 9 | ## Defining the Structures 10 | 11 | We'll model a meeting transcript as a collection of **`Ticket`** objects, each representing an action item. Every **`Ticket`** can have multiple **`Subtask`** objects, representing smaller, manageable pieces of the main task. 12 | 13 | ```Ruby 14 | class Subtask 15 | include EasyTalk::Model 16 | 17 | define_schema do 18 | property :id, Integer, description: 'Unique identifier for the subtask' 19 | property :name, String, description: 'Informative title of the subtask' 20 | end 21 | end 22 | 23 | class Ticket 24 | include EasyTalk::Model 25 | 26 | PRIORITY = %w[low medium high].freeze 27 | 28 | define_schema do 29 | property :id, Integer, description: 'Unique identifier for the ticket' 30 | property :name, String, description: 'Title of the ticket' 31 | property :description, String, description: 'Detailed description of the ticket' 32 | property :priority, String, description: 'Priority level' 33 | property :assignees, T::Array[String], description: 'List of users assigned to the ticket' 34 | property :subtasks, T.nilable(T::Array[Subtask]), description: 'List of subtasks associated with the ticket' 35 | property :dependencies, T.nilable(T::Array[Integer]), 36 | description: 'List of ticket IDs that this ticket depends on' 37 | end 38 | end 39 | 40 | class ActionItems 41 | include EasyTalk::Model 42 | 43 | define_schema do 44 | property :items, T::Array[Ticket] 45 | end 46 | end 47 | ``` 48 | 49 | ## Extracting Action Items 50 | 51 | To extract action items from a meeting transcript, we use the **`extract_action_items()`** method. It calls OpenAI's API, processes the text, and returns a set of action items modeled as **`ActionItems`**. 52 | 53 | ```Ruby 54 | 55 | def extract_action_items(data) 56 | client = Instructor.from_openai(OpenAI::Client).new 57 | 58 | client.chat( 59 | parameters: { 60 | model: 'gpt-3.5-turbo', 61 | messages: [ 62 | { 63 | role: 'system', 64 | "content": 'The following is a transcript of a meeting between a manager and their team. The manager is assigning tasks to their team members and creating action items for them to complete.' 65 | }, 66 | { 67 | "role": 'user', 68 | "content": "Create the action items for the following transcript: #{data}" 69 | } 70 | ] 71 | }, 72 | response_model: ActionItems 73 | ) 74 | end 75 | ``` 76 | 77 | ## Evaluation and Testing 78 | 79 | To test the **`extract_action_items`** method, we provide it with a sample transcript, and then print the JSON representation of the extracted action items. 80 | 81 | ```Ruby 82 | data = <<~DATA 83 | Alice: Hey team, we have several critical tasks we need to tackle for the upcoming release. First, we need to work on improving the authentication system. It's a top priority. 84 | 85 | Bob: Got it, Alice. I can take the lead on the authentication improvements. Are there any specific areas you want me to focus on? 86 | 87 | Alice: Good question, Bob. We need both a front-end revamp and back-end optimization. So basically, two sub-tasks. 88 | 89 | Carol: I can help with the front-end part of the authentication system. 90 | 91 | Bob: Great, Carol. I'll handle the back-end optimization then. 92 | 93 | Alice: Perfect. Now, after the authentication system is improved, we have to integrate it with our new billing system. That's a medium priority task. 94 | 95 | Carol: Is the new billing system already in place? 96 | 97 | Alice: No, it's actually another task. So it's a dependency for the integration task. Bob, can you also handle the billing system? 98 | 99 | Bob: Sure, but I'll need to complete the back-end optimization of the authentication system first, so it's dependent on that. 100 | 101 | Alice: Understood. Lastly, we also need to update our user documentation to reflect all these changes. It's a low-priority task but still important. 102 | 103 | Carol: I can take that on once the front-end changes for the authentication system are done. So, it would be dependent on that. 104 | 105 | Alice: Sounds like a plan. Let's get these tasks modeled out and get started. 106 | DATA 107 | 108 | result = generate(data) 109 | puts(result.as_json) 110 | ``` 111 | 112 | ## Visualizing the tasks 113 | 114 | In order to quickly visualize the data we used code interpreter to create a graphviz export of the json version of the ActionItems array. 115 | 116 | ![action items](action_items.png) 117 | 118 | ```json 119 | { 120 | "items": [ 121 | { 122 | "id": 1, 123 | "name": "Improve Authentication System", 124 | "description": "Revamp the front-end and optimize the back-end of the authentication system", 125 | "priority": "High", 126 | "assignees": ["Bob", "Carol"], 127 | "subtasks": [ 128 | { 129 | "id": 2, 130 | "name": "Front-end Revamp" 131 | }, 132 | { 133 | "id": 3, 134 | "name": "Back-end Optimization" 135 | } 136 | ], 137 | "dependencies": [] 138 | }, 139 | { 140 | "id": 4, 141 | "name": "Integrate Authentication System with Billing System", 142 | "description": "Integrate the improved authentication system with the new billing system", 143 | "priority": "Medium", 144 | "assignees": ["Bob"], 145 | "subtasks": [], 146 | "dependencies": [1] 147 | }, 148 | { 149 | "id": 5, 150 | "name": "Update User Documentation", 151 | "description": "Update the user documentation to reflect the changes in the authentication system", 152 | "priority": "Low", 153 | "assignees": ["Carol"], 154 | "subtasks": [], 155 | "dependencies": [2] 156 | } 157 | ] 158 | } 159 | ``` 160 | 161 | In this example, the **`extract_action_items`** method successfully identifies and segments the action items, assigning them priorities, assignees, subtasks, and dependencies as discussed in the meeting. 162 | 163 | By automating this process, you can ensure that important tasks and details are not lost in the sea of meeting minutes, making project management more efficient and effective. -------------------------------------------------------------------------------- /docs/examples/action_items.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/567-labs/instructor-rb/e96ab3b4e9c8c1d08c8f784c0ac091f33a12a916/docs/examples/action_items.png -------------------------------------------------------------------------------- /docs/examples/classification.md: -------------------------------------------------------------------------------- 1 | # Classification 2 | Pending update 3 | -------------------------------------------------------------------------------- /docs/examples/content_moderation.md: -------------------------------------------------------------------------------- 1 | # Content Moderation 2 | pending update 3 | -------------------------------------------------------------------------------- /docs/examples/index.md: -------------------------------------------------------------------------------- 1 | # Cookbook 2 | 3 | !!! warning "Page under construction" 4 | 5 | This page is under construction. Please check back later. Consider contributing to this page by opening a PR! Theres a bunch of examples in the python version, including documentation here [python docs](https://jxnl.github.io/instructor/examples/) 6 | 7 | If you want to contribute, please check out [issues](https://github.com/instructor-ai/instructor-js/issues/8) 8 | 9 | 10 | 11 | ## Table of Contents 12 | 13 | - [How do I do classification?](./classification.md) 14 | - [How are complex queries decomposed into subqueries for a single request?](./query_decomposition.md) 15 | - [How are action items and dependencies generated from transcripts?](./action_items.md) 16 | - [How is AI self-assessment implemented with llm_validator?](./self_correction.md) 17 | - [How are exact citations retrieved using regular expressions and smart prompting?](./validated_citations.md) 18 | - [How to enable OpenAI's moderation](./content_moderation.md) 19 | -------------------------------------------------------------------------------- /docs/examples/query_decomposition.md: -------------------------------------------------------------------------------- 1 | # Query Decomposition 2 | Pending update 3 | -------------------------------------------------------------------------------- /docs/examples/self_correction.md: -------------------------------------------------------------------------------- 1 | # Self Correction 2 | Pending update 3 | -------------------------------------------------------------------------------- /docs/examples/validated_citations.md: -------------------------------------------------------------------------------- 1 | # Validated Citations 2 | Pending update 3 | -------------------------------------------------------------------------------- /docs/help.md: -------------------------------------------------------------------------------- 1 | !!! warning "Page under construction" 2 | 3 | This page is under construction. Please check back later. Consider contributing to this page by opening a PR! 4 | 5 | 6 | # Getting help with Instructor 7 | 8 | If you need help getting started with Instructor or with advanced usage, the following sources may be useful. 9 | 10 | ## :fontawesome-brands-discord: Discord 11 | 12 | The [Discord](https://discord.gg/DWHZdqpNgz) is the best place to get help. You can ask questions, get help with debugging, and discuss Instructor with other users. 13 | 14 | ## :material-creation: Concepts 15 | 16 | The [concepts](concepts/prompting.md) section explains the core concepts of Instructor and how to prompt with models. 17 | 18 | ## :material-chef-hat: Cookbooks 19 | 20 | The [cookbooks](examples/index.md) are a great place to start. They contain a variety of examples that demonstrate how to use Instructor in different scenarios. 21 | 22 | ## :material-github: GitHub Discussions 23 | 24 | [GitHub discussions](https://github.com/instructor-ai/instructor-rb/discussions) are useful for asking questions, your question and the answer will help everyone. 25 | 26 | ## :material-github: GitHub Issues 27 | 28 | [GitHub issues](https://github.com/instructor-ai/instructor-rb/issues) are useful for reporting bugs or requesting new features. 29 | 30 | ## :material-twitter: Twitter 31 | 32 | You can also reach out to me on [Twitter](https://twitter.com/jxnlco) if you have any questions or ideas. 33 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # instructor-rb 2 | 3 | _Structured extraction in Ruby, powered by llms, designed for simplicity, transparency, and control._ 4 | 5 | --- 6 | 7 | [![Twitter Follow](https://img.shields.io/twitter/follow/jxnlco?style=social)](https://twitter.com/jxnlco) 8 | [![Twitter Follow](https://img.shields.io/twitter/follow/sergiobayona?style=social)](https://twitter.com/sergiobayona) 9 | [![Documentation](https://img.shields.io/badge/docs-available-brightgreen)](https://jxnl.github.io/instructor-rb) 10 | [![GitHub issues](https://img.shields.io/github/issues/instructor-ai/instructor-js.svg)](https://github.com/instructor-ai/instructor-rb/issues) 11 | [![Discord](https://img.shields.io/discord/1192334452110659664?label=discord)](https://discord.gg/DWHZdqpNgz) 12 | 13 | Dive into the world of Ruby-based structured extraction, by OpenAI's function calling API, Ruby schema validation with type hinting. Instructor stands out for its simplicity, transparency, and user-centric design. Whether you're a seasoned developer or just starting out, you'll find Instructor's approach intuitive and steerable. 14 | 15 | Check us out in [Python](https://jxnl.github.io/instructor/), [Elixir](https://github.com/thmsmlr/instructor_ex/), [PHP](https://github.com/cognesy/instructor-php/) and [Ruby](https://github.com/instructor-ai/instructor-rb). 16 | 17 | If you want to port Instructor to another language, please reach out to us on [Twitter](https://twitter.com/jxnlco) we'd love to help you get started! 18 | 19 | ## Usage 20 | 21 | To check out all the tips and tricks to prompt and extract data, check out the [documentation](https://instructor-ai.github.io/instructor-rb/tips/prompting/). 22 | 23 | Installation is as simple as: 24 | 25 | ```bash 26 | gem install intructor-rb 27 | ``` 28 | 29 | 30 | ```Ruby 31 | require 'instructor-rb' 32 | 33 | OpenAI.configure do |config| 34 | config.access_token = ENV.fetch("OPENAI_ACCESS_TOKEN") 35 | config.organization_id = ENV.fetch("OPENAI_ORGANIZATION_ID") # Optional. 36 | end 37 | 38 | class UserDetail 39 | include EasyTalk::Model 40 | 41 | define_schema do 42 | property :name, String 43 | property :age, Integer 44 | end 45 | end 46 | 47 | client = Instructor.from_openai(OpenAI::Client).new 48 | 49 | user = client.chat( 50 | parameters: { 51 | model: 'gpt-3.5-turbo', 52 | messages: [{ role: 'user', content: 'Extract Jason is 25 years old' }] 53 | }, 54 | response_model: UserDetail 55 | ) 56 | 57 | user.name 58 | # => "Jason" 59 | user.age 60 | # => 25 61 | ``` 62 | 63 | ## Why use Instructor? 64 | 65 | The question of using Instructor is fundamentally a question of why to use zod. 66 | 67 | 1. **Powered by OpenAI** — Instructor is powered by OpenAI's function calling API. This means you can use the same API for both prompting and extraction. 68 | 69 | 2. **Ruby Schema Validation** — Instructor uses Ruby schema validation with type hinting. This means you can validate your data before using it. 70 | 71 | ## More Examples 72 | 73 | If you'd like to see more check out our [cookbook](examples/index.md). 74 | 75 | ## Contributing 76 | 77 | If you want to help out, checkout some of the issues marked as `good-first-issue` or `help-wanted`. Found [here](https://github.com/instructor-ai/instructor-rb/labels/good%20first%20issue). They could be anything from code improvements, a guest blog post, or a new cook book. 78 | 79 | ## License 80 | 81 | This project is licensed under the terms of the MIT License. 82 | -------------------------------------------------------------------------------- /docs/overrides/main.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 6 | 7 | {% block announce %} For updates follow 8 | @jxnlco on 9 | 10 | 11 | {% include ".icons/fontawesome/brands/twitter.svg" %} 12 | 13 | Twitter 14 | 15 | and 16 | 17 | {% include ".icons/fontawesome/solid/star.svg" %} 18 | 19 | us on 20 | 21 | 22 | {% include ".icons/fontawesome/brands/github.svg" %} 23 | 24 | GitHub . If you don't like Ruby, check out the 26 | Python, 27 | 28 | Elixir 29 | and 30 | JavaScript 31 | ports. {% endblock %} 32 | -------------------------------------------------------------------------------- /ellipsis.Dockerfile: -------------------------------------------------------------------------------- 1 | #====================================== 2 | # This Dockerfile was generated by Ellipsis. 3 | # It should be referenced from your `ellipsis.yaml` config file. 4 | # For more details, see documentation: https://docs.ellipsis.dev 5 | # Test with: $ docker build -f ellipsis.Dockerfile . 6 | #====================================== 7 | 8 | FROM ubuntu:20.04 9 | RUN apt-get update && apt-get install -y git build-essential 10 | 11 | WORKDIR /app 12 | COPY . . 13 | -------------------------------------------------------------------------------- /ellipsis.yaml: -------------------------------------------------------------------------------- 1 | # See https://docs.ellipsis.dev for all available configurations. 2 | 3 | version: 1.1 4 | pr_review: 5 | auto_review_enabled: true # enable auto-review of PRs 6 | auto_summarize_pr: true # enable auto-summary of PRs 7 | confidence_threshold: 0.5 # Threshold for how confident Ellipsis needs to be in order to leave a comment, in range [0.0-1.0] 8 | rules: # customize behavior 9 | - "Code should be DRY (Don't Repeat Yourself)" 10 | - "There should no secrets or credentials in the code" 11 | 12 | # users can customize their own behavior 13 | user_overrides: 14 | # @hbrooks has disabled auto-summary and added a custom rule 15 | - usernames: ["hbrooks"] 16 | auto_summarize_pr: false 17 | rules: 18 | - "Code should be DRY (Don't Repeat Yourself)" 19 | 20 | 21 | # Below is an example of how to configure Ellipsis to build and run your repo. 22 | # Uncomment and replace with your own Dockerfile and commands. 23 | 24 | dockerfile: "ellipsis.Dockerfile" # this will be used to build your repo 25 | 26 | #======================= 27 | # commands: 28 | # - name: "build" 29 | # description: "This command compiles the code and builds the project" 30 | # command: "yarn build" 31 | # return_output_on_success: false # If output isn't useful when the command succeeds 32 | # auto_repair: true # Run this after every code change 33 | # - name: "lint_fix" 34 | # description: "Lints the code in fix mode, which will fix some errors." 35 | # command: "yarn lint:fix" 36 | # return_output_on_success: false 37 | # auto_repair: true 38 | #======================= 39 | -------------------------------------------------------------------------------- /instructor-rb.gemspec: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative 'lib/instructor/version' 4 | 5 | Gem::Specification.new do |spec| 6 | spec.name = 'instructor-rb' 7 | spec.version = Instructor::VERSION 8 | spec.authors = ['Sergio Bayona', 'Jason Liu'] 9 | spec.email = ['bayona.sergio@gmail.com', 'jason@jxnl.co'] 10 | 11 | spec.summary = 'Structured extraction in Ruby, powered by llms.' 12 | spec.description = 'Explore the power of LLM structured extraction in Ruby with the Instructor gem.' 13 | spec.homepage = 'https://github.com/instructor-ai/instructor-rb' 14 | spec.license = 'MIT' 15 | spec.required_ruby_version = '>= 3.1.0' 16 | 17 | spec.metadata['allowed_push_host'] = 'https://rubygems.org' 18 | 19 | spec.metadata['homepage_uri'] = spec.homepage 20 | spec.metadata['source_code_uri'] = 'https://github.com/instructor-ai/instructor-rb' 21 | spec.metadata['changelog_uri'] = 'https://github.com/instructor-ai/instructor-rb/blob/main/CHANGELOG.md' 22 | 23 | # Specify which files should be added to the gem when it is released. 24 | # The `git ls-files -z` loads the files in the RubyGem that have been added into git. 25 | spec.files = Dir.chdir(__dir__) do 26 | `git ls-files -z`.split("\x0").reject do |f| 27 | (File.expand_path(f) == __FILE__) || 28 | f.start_with?(*%w[spec/ .git .github Gemfile]) 29 | end 30 | end 31 | 32 | spec.require_paths = ['lib'] 33 | 34 | spec.add_dependency 'activesupport', '~> 7.0' 35 | spec.add_dependency 'anthropic', '~> 0.2' 36 | spec.add_dependency 'easy_talk', '~> 0.2' 37 | spec.add_dependency 'ruby-openai', '~> 7' 38 | spec.add_development_dependency 'pry-byebug', '~> 3.10' 39 | spec.add_development_dependency 'rake', '~> 13.1' 40 | spec.add_development_dependency 'rspec', '~> 3.0' 41 | spec.add_development_dependency 'rspec-json_expectations', '~> 2.0' 42 | spec.add_development_dependency 'rubocop', '~> 1.21' 43 | spec.add_development_dependency 'rubocop-rake', '~> 0.6' 44 | spec.add_development_dependency 'rubocop-rspec', '~> 2.29' 45 | spec.add_development_dependency 'vcr', '~> 6.0' 46 | spec.add_development_dependency 'webmock', '~> 3.13' 47 | end 48 | -------------------------------------------------------------------------------- /lib/instructor.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'openai' 4 | require 'anthropic' 5 | require 'easy_talk' 6 | require 'active_support/all' 7 | require_relative 'instructor/version' 8 | require_relative 'instructor/openai/patch' 9 | require_relative 'instructor/openai/response' 10 | require_relative 'instructor/anthropic/patch' 11 | require_relative 'instructor/anthropic/response' 12 | require_relative 'instructor/mode' 13 | 14 | # Instructor makes it easy to reliably get structured data like JSON from Large Language Models (LLMs) 15 | # like GPT-3.5, GPT-4, GPT-4-Vision 16 | module Instructor 17 | @mode = nil 18 | 19 | class Error < ::StandardError; end 20 | 21 | # The ValidationError class represents an error that occurs during validation. 22 | class ValidationError < ::StandardError; end 23 | 24 | def self.mode 25 | @mode 26 | end 27 | 28 | # Patches the OpenAI client to add the following functionality: 29 | # - Retries on exceptions 30 | # - Accepts and validates a response model 31 | # - Accepts a validation_context argument 32 | # 33 | # @param openai_client [OpenAI::Client] The OpenAI client to be patched. 34 | # @param mode [Symbol] The mode to be used. Default is `Instructor::Mode::TOOLS.function`. 35 | # @return [OpenAI::Client] The patched OpenAI client. 36 | def self.from_openai(openai_client, mode: Instructor::Mode::TOOLS.function) 37 | @mode = mode 38 | openai_client.prepend(Instructor::OpenAI::Patch) 39 | end 40 | 41 | # @param anthropic_client [Anthropic::Client] The Anthropic client to be patched. 42 | # @return [Anthropic::Client] The patched Anthropic client. 43 | def self.from_anthropic(anthropic_client) 44 | anthropic_client.prepend(Instructor::Anthropic::Patch) 45 | end 46 | end 47 | -------------------------------------------------------------------------------- /lib/instructor/anthropic/patch.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'anthropic' 4 | require 'instructor/base/patch' 5 | 6 | # The Instructor module provides functionality for interacting with Anthropic's messages API. 7 | module Instructor 8 | module Anthropic 9 | # The `Patch` module provides methods for patching and modifying the Anthropic client behavior. 10 | module Patch 11 | include Instructor::Base::Patch 12 | 13 | # Sends a message request to the API and processes the response. 14 | # 15 | # @param parameters [Hash] The parameters for the chat request as expected by the OpenAI client. 16 | # @param response_model [Class] The response model class. 17 | # @param max_retries [Integer] The maximum number of retries. Default is 0. 18 | # @param validation_context [Hash] The validation context for the parameters. Optional. 19 | # @return [Object] The processed response. 20 | def messages(parameters:, response_model: nil, max_retries: 0, validation_context: nil) 21 | with_retries(max_retries, [JSON::ParserError, Instructor::ValidationError, Faraday::ParsingError]) do 22 | model = determine_model(response_model) 23 | function = build_function(model) 24 | parameters[:max_tokens] = 1024 unless parameters.key?(:max_tokens) 25 | parameters = prepare_parameters(parameters, validation_context, function) 26 | ::Anthropic.configuration.extra_headers = { 'anthropic-beta' => 'tools-2024-04-04' } 27 | response = ::Anthropic::Client.json_post(path: '/messages', parameters:) 28 | process_response(response, model) 29 | end 30 | end 31 | 32 | # Processes the API response. 33 | # 34 | # @param response [Hash] The API response. 35 | # @param model [Class] The response model class. 36 | # @return [Object] The processed response. 37 | def process_response(response, model) 38 | parsed_response = Response.new(response).parse 39 | iterable? ? process_multiple_responses(parsed_response, model) : process_single_response(parsed_response, model) 40 | end 41 | 42 | # Builds the function details for the API request. 43 | # 44 | # @param model [Class] The response model class. 45 | # @return [Hash] The function details. 46 | def build_function(model) 47 | { 48 | name: generate_function_name(model), 49 | description: generate_description(model), 50 | input_schema: model.json_schema 51 | } 52 | end 53 | end 54 | end 55 | end 56 | -------------------------------------------------------------------------------- /lib/instructor/anthropic/response.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module Instructor 4 | module Anthropic 5 | # The Response class represents the response received from the OpenAI API. 6 | # It takes the raw response and provides convenience methods to access the chat completions, 7 | # tool calls, function responses, and parsed arguments. 8 | class Response 9 | # Initializes a new instance of the Response class. 10 | # 11 | # @param response [Hash] The response received from the OpenAI API. 12 | def initialize(response) 13 | @response = response 14 | end 15 | 16 | # Parses the function response(s) and returns the parsed arguments. 17 | # 18 | # @return [Array, Hash] The parsed arguments. 19 | # @raise [StandardError] if the api response contains an error. 20 | def parse 21 | raise StandardError, error_message if error? 22 | 23 | if single_response? 24 | arguments.first 25 | else 26 | arguments 27 | end 28 | end 29 | 30 | private 31 | 32 | def content 33 | @response['content'] 34 | end 35 | 36 | def tool_calls 37 | content.is_a?(Array) && content.select { |c| c['type'] == 'tool_use' } 38 | end 39 | 40 | def single_response? 41 | tool_calls&.size == 1 42 | end 43 | 44 | def arguments 45 | tool_calls.map { |tc| tc['input'] } 46 | end 47 | 48 | def error? 49 | @response['type'] == 'error' 50 | end 51 | 52 | def error_message 53 | "#{@response.dig('error', 'type')} - #{@response.dig('error', 'message')}" 54 | end 55 | end 56 | end 57 | end 58 | -------------------------------------------------------------------------------- /lib/instructor/base/patch.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module Instructor 4 | module Base 5 | # The `Patch` module provides common methods for patching and modifying the client behavior. 6 | module Patch 7 | # Generates the function name for the API request. 8 | # You can customize the function name for the LLM by adding a `title` key to the schema. 9 | # Example: 10 | # ```ruby 11 | # class User 12 | # include EasyTalk::Model 13 | # define_schema do 14 | # title 'User' 15 | # property :name, String 16 | # property :age, Integer 17 | # end 18 | # end 19 | # ``` 20 | # The function name will be `User`. 21 | # If the `title` key is not present, the function name will be the model's name. 22 | # @param model [Class] The response model class. 23 | # @return [String] The generated function name. 24 | def generate_function_name(model) 25 | model.schema.fetch(:title, model.name) 26 | end 27 | 28 | # Generates the description for the function. 29 | # You can customize the instructions for the LLM by adding an `instructions` class method to the response model. 30 | # Example: 31 | # ```ruby 32 | # class User 33 | # include EasyTalk::Model 34 | # def self.instructions 35 | # 'Extract the user name and age from the response' 36 | # end 37 | # 38 | # define_schema do ... 39 | # end 40 | # ``` 41 | # 42 | # @param model [Class] The response model class. 43 | # @return [String] The generated description. 44 | def generate_description(model) 45 | if model.respond_to?(:instructions) 46 | raise Instructor::Error, 'The instructions must be a string' unless model.instructions.is_a?(String) 47 | 48 | model.instructions 49 | else 50 | "Correctly extracted `#{model.name}` with all the required parameters with correct types" 51 | end 52 | end 53 | 54 | private 55 | 56 | # Executes a block of code with retries in case of specific exceptions. 57 | # 58 | # @param max_retries [Integer] The maximum number of retries. 59 | # @param exceptions [Array] The exceptions to catch and retry. 60 | # @yield The block of code to execute. 61 | def with_retries(max_retries, exceptions, &block) 62 | attempts = 0 63 | begin 64 | block.call 65 | rescue *exceptions 66 | attempts += 1 67 | retry if attempts < max_retries 68 | raise 69 | end 70 | end 71 | 72 | # Prepares the parameters for the chat request. 73 | # 74 | # @param parameters [Hash] The original parameters. 75 | # @param validation_context [Hash] The validation context for the parameters. 76 | # @param function [Hash] The function details. 77 | # @return [Hash] The prepared parameters. 78 | def prepare_parameters(parameters, validation_context, function) 79 | # parameters # fetch the parameters's max_token or set it to 1024 80 | parameters = apply_validation_context(parameters, validation_context) 81 | parameters.merge(tools: [function]) 82 | end 83 | 84 | # Processes multiple responses from the API. 85 | # 86 | # @param parsed_response [Array] The parsed API responses. 87 | # @param model [Class] The response model class. 88 | # @return [Array] The processed responses. 89 | def process_multiple_responses(parsed_response, model) 90 | parsed_response.map do |response| 91 | instance = model.new(response) 92 | instance.valid? ? instance : raise(Instructor::ValidationError) 93 | end 94 | end 95 | 96 | # Processes a single response from the API. 97 | # 98 | # @param parsed_response [Hash] The parsed API response. 99 | # @param model [Class] The response model class. 100 | # @return [Object] The processed response. 101 | def process_single_response(parsed_response, model) 102 | instance = model.new(parsed_response) 103 | instance.valid? ? instance : raise(Instructor::ValidationError) 104 | end 105 | 106 | # Determines the response model based on the provided value. 107 | # 108 | # @param response_model [Class] The response model class or typed array. 109 | # @return [Class] The determined response model class. 110 | def determine_model(response_model) 111 | if response_model.is_a?(T::Types::TypedArray) 112 | @iterable = true 113 | response_model.type.raw_type 114 | else 115 | @iterable = false 116 | response_model 117 | end 118 | end 119 | 120 | # Applies the validation context to the parameters. 121 | # 122 | # @param parameters [Hash] The original parameters. 123 | # @param validation_context [Hash] The validation context. 124 | # @return [Hash] The parameters with applied validation context. 125 | def apply_validation_context(parameters, validation_context) 126 | return parameters unless validation_context.is_a?(Hash) 127 | 128 | Array[validation_context].each_with_index do |message, index| 129 | parameters[:messages][index][:content] = parameters[:messages][index][:content] % message 130 | end 131 | 132 | parameters 133 | end 134 | 135 | # Checks if the response is iterable. 136 | # 137 | # @return [Boolean] `true` if the response is iterable, `false` otherwise. 138 | def iterable? 139 | @iterable 140 | end 141 | end 142 | end 143 | end 144 | -------------------------------------------------------------------------------- /lib/instructor/mode.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'ostruct' 4 | 5 | module Instructor 6 | # This module defines constants related to different modes of operation. 7 | # It provides options for tool behavior, function types, and JSON modes. 8 | # Currently supported modes are: 9 | # - tools: select between function, auto, required, and none. 10 | # more modes will be added in the near future. 11 | module Mode 12 | tool_options = %w[function auto required none].index_by(&:itself) 13 | TOOL_BEHAVIOR = OpenStruct.new(tool_options) 14 | 15 | FUNCTIONS = 'function_call' 16 | PARALLEL_TOOLS = 'parallel_tool_call' 17 | TOOLS = TOOL_BEHAVIOR 18 | JSON = 'json_mode' 19 | MD_JSON = 'markdown_json_mode' 20 | JSON_SCHEMA = 'json_schema_mode' 21 | end 22 | end 23 | -------------------------------------------------------------------------------- /lib/instructor/openai/patch.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'instructor/base/patch' 4 | 5 | # The Instructor module provides functionality for interacting with OpenAI's chat API. 6 | module Instructor 7 | module OpenAI 8 | # The `Patch` module provides methods for patching and modifying the OpenAI client behavior. 9 | module Patch 10 | include Instructor::Base::Patch 11 | 12 | # Sends a chat request to the API and processes the response. 13 | # 14 | # @param parameters [Hash] The parameters for the chat request as expected by the OpenAI client. 15 | # @param response_model [Class] The response model class. 16 | # @param max_retries [Integer] The maximum number of retries. Default is 0. 17 | # @param validation_context [Hash] The validation context for the parameters. Optional. 18 | # @return [Object] The processed response. 19 | def chat(parameters:, response_model: nil, max_retries: 0, validation_context: nil) 20 | return json_post(path: '/chat/completions', parameters:) if response_model.nil? 21 | 22 | with_retries(max_retries, [JSON::ParserError, Instructor::ValidationError, Faraday::ParsingError]) do 23 | model = determine_model(response_model) 24 | function = build_function(model) 25 | parameters = prepare_parameters(parameters, validation_context, function) 26 | tool_choice = resolve_tool_choice(function[:function][:name]) 27 | parameters.merge!(tool_choice:) 28 | response = json_post(path: '/chat/completions', parameters:) 29 | process_response(response, model) 30 | end 31 | end 32 | 33 | # Processes the API response. 34 | # 35 | # @param response [Hash] The API response. 36 | # @param model [Class] The response model class. 37 | # @return [Object] The processed response. 38 | def process_response(response, model) 39 | parsed_response = Response.new(response).parse 40 | iterable? ? process_multiple_responses(parsed_response, model) : process_single_response(parsed_response, model) 41 | end 42 | 43 | private 44 | 45 | def resolve_tool_choice(function_name) 46 | case Instructor.mode 47 | when Instructor::Mode::TOOLS.function 48 | { type: 'function', function: { name: function_name } } 49 | when Instructor::Mode::TOOLS.auto 50 | 'auto' 51 | when Instructor::Mode::TOOLS.required 52 | 'required' 53 | when Instructor::Mode::TOOLS.none 54 | 'none' 55 | end 56 | end 57 | 58 | # Builds the function details for the API request. 59 | # 60 | # @param model [Class] The response model class. 61 | # @return [Hash] The function details. 62 | def build_function(model) 63 | { 64 | type: 'function', 65 | function: { 66 | name: generate_function_name(model), 67 | description: generate_description(model), 68 | parameters: model.json_schema 69 | } 70 | } 71 | end 72 | end 73 | end 74 | end 75 | -------------------------------------------------------------------------------- /lib/instructor/openai/response.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module Instructor 4 | module OpenAI 5 | # The Response class represents the response received from the OpenAI API. 6 | # It takes the raw response and provides convenience methods to access the chat completions, 7 | # tool calls, function responses, and parsed arguments. 8 | class Response 9 | # Initializes a new instance of the Response class. 10 | # 11 | # @param response [Hash] The response received from the OpenAI API. 12 | def initialize(response) 13 | @response = response 14 | end 15 | 16 | # Returns the chat completions from the response. 17 | # 18 | # @return [Array] An array of chat completions. 19 | def chat_completions 20 | @response['choices'] 21 | end 22 | 23 | # Returns the tool calls from the chat completions. 24 | # 25 | # @return [Hash, nil] The tool calls or nil if not found. 26 | def tool_calls 27 | chat_completions&.dig(0, 'message', 'tool_calls') 28 | end 29 | 30 | # Returns the function responses from the tool calls. 31 | # 32 | # @return [Array, nil] An array of function responses or nil if not found. 33 | def function_responses 34 | tool_calls&.map { |tool_call| tool_call['function'] } 35 | end 36 | 37 | # Returns the first function response. 38 | # 39 | # @return [Hash, nil] The first function response or nil if not found. 40 | def function_response 41 | function_responses&.first 42 | end 43 | 44 | # Checks if there is only a single function response. 45 | # 46 | # @return [Boolean] True if there is only a single function response, false otherwise. 47 | def single_response? 48 | function_responses&.size == 1 49 | end 50 | 51 | # Parses the function response(s) and returns the parsed arguments. 52 | # 53 | # @return [Array, Hash] The parsed arguments. 54 | def parse 55 | if single_response? 56 | JSON.parse(function_response['arguments']) 57 | else 58 | function_responses.map { |res| JSON.parse(res['arguments']) } 59 | end 60 | end 61 | 62 | # Returns the arguments of the function with the specified name. 63 | # 64 | # @param function_name [String] The name of the function. 65 | # @return [Hash, nil] The arguments of the function or nil if not found. 66 | def by_function_name(function_name) 67 | function_responses&.find { |res| res['name'] == function_name }&.dig('arguments') 68 | end 69 | end 70 | end 71 | end 72 | -------------------------------------------------------------------------------- /lib/instructor/version.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module Instructor 4 | VERSION = '0.1.3' 5 | end 6 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: Instructor (Ruby) 2 | site_author: Jason Liu 3 | site_description: Structured LLM outputs with EasyTalk 4 | repo_name: instructor 5 | repo_url: https://github.com/instructor-ai/instructor-rb 6 | site_url: https://ruby.useinstructor.com/ 7 | edit_uri: edit/main/docs/ 8 | copyright: Copyright © 2024 Jason Liu 9 | theme: 10 | name: material 11 | icon: 12 | repo: fontawesome/brands/github 13 | edit: material/pencil 14 | view: material/eye 15 | theme: 16 | admonition: 17 | note: octicons/tag-16 18 | abstract: octicons/checklist-16 19 | info: octicons/info-16 20 | tip: octicons/squirrel-16 21 | success: octicons/check-16 22 | question: octicons/question-16 23 | warning: octicons/alert-16 24 | failure: octicons/x-circle-16 25 | danger: octicons/zap-16 26 | bug: octicons/bug-16 27 | example: octicons/beaker-16 28 | quote: octicons/quote-16 29 | features: 30 | - announce.dismiss 31 | - content.action.edit 32 | - content.action.view 33 | - content.code.annotate 34 | - content.code.copy 35 | - content.code.select 36 | - content.tabs.link 37 | - content.tooltips 38 | - header.autohide 39 | - navigation.expand 40 | - navigation.footer 41 | - navigation.indexes 42 | - navigation.instant 43 | - navigation.instant.prefetch 44 | - navigation.instant.progress 45 | - navigation.prune 46 | - navigation.sections 47 | - navigation.tabs 48 | # - navigation.tabs.sticky 49 | - navigation.top 50 | - navigation.tracking 51 | - search.highlight 52 | - search.share 53 | - search.suggest 54 | - toc.follow 55 | # - toc.integrate 56 | palette: 57 | - scheme: default 58 | primary: black 59 | accent: indigo 60 | toggle: 61 | icon: material/brightness-7 62 | name: Switch to dark mode 63 | - scheme: slate 64 | primary: black 65 | accent: indigo 66 | toggle: 67 | icon: material/brightness-4 68 | name: Switch to light mode 69 | font: 70 | text: Roboto 71 | code: Roboto Mono 72 | custom_dir: docs/overrides 73 | # Extensions 74 | markdown_extensions: 75 | - abbr 76 | - admonition 77 | - pymdownx.details 78 | - attr_list 79 | - def_list 80 | - footnotes 81 | - md_in_html 82 | - toc: 83 | permalink: true 84 | - pymdownx.arithmatex: 85 | generic: true 86 | - pymdownx.betterem: 87 | smart_enable: all 88 | - pymdownx.caret 89 | - pymdownx.details 90 | - pymdownx.emoji: 91 | emoji_generator: !!python/name:material.extensions.emoji.to_svg 92 | emoji_index: !!python/name:material.extensions.emoji.twemoji 93 | - pymdownx.highlight: 94 | anchor_linenums: true 95 | line_spans: __span 96 | pygments_lang_class: true 97 | - pymdownx.inlinehilite 98 | - pymdownx.keys 99 | - pymdownx.magiclink: 100 | normalize_issue_symbols: true 101 | repo_url_shorthand: true 102 | user: jxnl 103 | repo: instructor 104 | - pymdownx.mark 105 | - pymdownx.smartsymbols 106 | - pymdownx.snippets: 107 | auto_append: 108 | - includes/mkdocs.md 109 | - pymdownx.superfences: 110 | custom_fences: 111 | - name: mermaid 112 | class: mermaid 113 | format: !!python/name:pymdownx.superfences.fence_code_format 114 | - pymdownx.tabbed: 115 | alternate_style: true 116 | combine_header_slug: true 117 | slugify: !!python/object/apply:pymdownx.slugs.slugify 118 | kwds: 119 | case: lower 120 | - pymdownx.tasklist: 121 | custom_checkbox: true 122 | - pymdownx.tilde 123 | nav: 124 | - Introduction: 125 | - Welcome To Instructor: 'index.md' 126 | - Why use Instructor?: 'why.md' 127 | - Schema: 'concepts/schema.md' 128 | - Patching: 'concepts/patching.md' 129 | - Streaming: 'concepts/streaming.md' 130 | - Logging: 'concepts/logging.md' 131 | - Prompting Tips: 'concepts/prompting.md' 132 | - Philosophy: 'concepts/philosophy.md' 133 | - Help with Instructor: 'help.md' 134 | - Installation: 'installation.md' 135 | - Contributing: 'contributing.md' 136 | - Cookbook: 137 | - Overview: 'examples/index.md' 138 | - Classification: 'examples/classification.md' 139 | - Query Decomposition: 'examples/query_decomposition.md' 140 | - Action Item and Dependency Mapping: 'examples/action_items.md' 141 | - Self Correction: 'examples/self_correction.md' 142 | - Citing Sources: 'examples/validated_citations.md' 143 | - Content Moderation: 'examples/content_moderation.md' 144 | - Blog: 145 | - "blog/index.md" 146 | 147 | plugins: 148 | - social 149 | - search: 150 | separator: '[\s\u200b\-_,:!=\[\]()"`/]+|\.(?!\d)|&[lg]t;|(?!\b)(?=[A-Z][a-z])' 151 | - minify: 152 | minify_html: true 153 | - mkdocstrings: 154 | handlers: 155 | python: 156 | options: 157 | members_order: alphabetical 158 | allow_inspection: true 159 | show_bases: true 160 | - blog: 161 | enabled: !ENV CI 162 | blog_dir: "blog" 163 | blog_toc: true 164 | post_dir: blog/posts 165 | post_date_format: yyyy/MM/dd 166 | post_url_format: "{date}/{slug}" 167 | authors_file: "{blog}/.authors.yml" 168 | extra: 169 | analytics: 170 | provider: google 171 | property: G-3JFF533YVZ 172 | feedback: 173 | title: Was this page helpful? 174 | ratings: 175 | - icon: material/emoticon-happy-outline 176 | name: This page was helpful 177 | data: 1 178 | note: >- 179 | Thanks for your feedback! 180 | - icon: material/emoticon-sad-outline 181 | name: This page could be improved 182 | data: 0 183 | note: >- 184 | Thanks for your feedback! Help us improve this page by 185 | using our feedback form. 186 | social: 187 | - icon: fontawesome/brands/twitter 188 | link: https://twitter.com/jxnlco 189 | - icon: fontawesome/brands/github 190 | link: https://github.com/jxnl 191 | copyright: Copyright © 2024 Jason Liu 192 | -------------------------------------------------------------------------------- /scripts/requirements-docs.txt: -------------------------------------------------------------------------------- 1 | mkdocs 2 | mkdocstrings 3 | mkdocs-material 4 | mkdocs-material[imaging] 5 | mkdocs-minify-plugin 6 | mkdocs-rss-plugin -------------------------------------------------------------------------------- /spec/anthropic/patch_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'spec_helper' 4 | 5 | RSpec.describe Instructor::Anthropic::Patch do 6 | subject(:patched_client) { Instructor.from_anthropic(Anthropic::Client) } 7 | 8 | let(:user_model) do 9 | Class.new do 10 | include EasyTalk::Model 11 | 12 | def self.name 13 | 'User' 14 | end 15 | 16 | define_schema do 17 | property :name, String 18 | property :age, Integer 19 | end 20 | end 21 | end 22 | 23 | it 'returns the patched client' do 24 | expect(patched_client).to eq(Anthropic::Client) 25 | end 26 | 27 | context 'with a new instance of the patched client' do 28 | it 'returns an instance of the patched client' do 29 | expect(patched_client.new).to be_an_instance_of(Anthropic::Client) 30 | end 31 | 32 | it 'does not require the response model argument' do 33 | client = patched_client.new 34 | expect { client.messages(parameters: {}) }.not_to raise_error(ArgumentError) 35 | end 36 | 37 | it 'does require the parameters argument' do 38 | client = patched_client.new 39 | expect { client.messages }.to raise_error(ArgumentError, 'missing keyword: :parameters') 40 | end 41 | 42 | it 'returns an object with the expected valid attribute values', vcr: 'anthropic_patch/valid_response' do 43 | client = patched_client.new 44 | 45 | user = client.messages( 46 | parameters: { 47 | model: 'claude-3-opus-20240229', 48 | messages: [{ role: 'user', content: 'Extract Jason is 25 years old' }] 49 | }, 50 | response_model: user_model 51 | ) 52 | 53 | expect(user.name).to eq('Jason') 54 | expect(user.age).to eq(25) 55 | end 56 | end 57 | 58 | context 'when an exception occurs' do 59 | let(:client) { patched_client.new } 60 | let(:max_retries) { 3 } 61 | let(:parameters) { {} } 62 | let(:response_model) { double } 63 | 64 | before do 65 | allow(client).to receive(:determine_model).and_return(double) 66 | allow(client).to receive(:build_function).and_return(double) 67 | allow(client).to receive(:prepare_parameters).and_return({}) 68 | allow(client).to receive(:process_response).and_return(double) 69 | allow(::Anthropic::Client).to receive(:json_post).and_raise(JSON::ParserError) 70 | end 71 | 72 | it 'retries the specified number of times' do 73 | expect { client.messages(parameters:, response_model:, max_retries:) }.to raise_error(JSON::ParserError) 74 | expect(::Anthropic::Client).to have_received(:json_post).exactly(max_retries).times 75 | end 76 | end 77 | 78 | context 'with validation context' do 79 | let(:client) { patched_client.new } 80 | let(:parameters) do 81 | { 82 | model: 'claude-3-opus-20240229', 83 | messages: [ 84 | { 85 | role: 'user', 86 | content: 'Answer the question: %s with the text chunk: %s' 87 | } 88 | ] 89 | } 90 | end 91 | 92 | it 'returns an object with the expected valid attribute values', vcr: 'anthropic_patch/with_validation_context' do 93 | user = client.messages( 94 | parameters:, 95 | response_model: user_model, 96 | validation_context: { question: 'What is your name and age?', 97 | text_chunk: 'my name is Jason and I turned 25 years old yesterday' } 98 | ) 99 | 100 | expect(user.name).to eq('Jason') 101 | expect(user.age).to eq(25) 102 | end 103 | end 104 | 105 | context 'with an invalid response model' do 106 | let(:invalid_model) do 107 | Class.new do 108 | include EasyTalk::Model 109 | 110 | def self.name 111 | 'InvalidModel' 112 | end 113 | 114 | define_schema do 115 | property :name, String 116 | property :age, Integer 117 | end 118 | end 119 | end 120 | 121 | let(:client) { patched_client.new } 122 | let(:parameters) do 123 | { 124 | model: 'claude-3-opus-20240229', 125 | messages: [{ role: 'user', content: 'Extract Jason is 25 years old' }] 126 | } 127 | end 128 | 129 | it 'raises an error when the response model is invalid', vcr: 'anthropic_patch/invalid_response' do 130 | expect do 131 | client.messages(parameters:, response_model: invalid_model) 132 | end.to raise_error(Instructor::ValidationError) 133 | end 134 | end 135 | end 136 | -------------------------------------------------------------------------------- /spec/examples/autoticketer_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'spec_helper' 4 | require_relative '../helpers/autoticketer_models' 5 | 6 | RSpec.describe 'Auto-ticketer' do 7 | RSpec.configure do |c| 8 | c.include AutoticketerModels 9 | end 10 | 11 | let(:client) { Instructor.from_openai(OpenAI::Client).new } 12 | 13 | let(:data) do 14 | <<~DATA 15 | Alice: Hey team, we have several critical tasks we need to tackle for the upcoming release. First, we need to work on improving the authentication system. It's a top priority. 16 | 17 | Bob: Got it, Alice. I can take the lead on the authentication improvements. Are there any specific areas you want me to focus on? 18 | 19 | Alice: Good question, Bob. We need both a front-end revamp and back-end optimization. So basically, two sub-tasks. 20 | 21 | Carol: I can help with the front-end part of the authentication system. 22 | 23 | Bob: Great, Carol. I'll handle the back-end optimization then. 24 | 25 | Alice: Perfect. Now, after the authentication system is improved, we have to integrate it with our new billing system. That's a medium priority task. 26 | 27 | Carol: Is the new billing system already in place? 28 | 29 | Alice: No, it's actually another task. So it's a dependency for the integration task. Bob, can you also handle the billing system? 30 | 31 | Bob: Sure, but I'll need to complete the back-end optimization of the authentication system first, so it's dependent on that. 32 | 33 | Alice: Understood. Lastly, we also need to update our user documentation to reflect all these changes. It's a low-priority task but still important. 34 | 35 | Carol: I can take that on once the front-end changes for the authentication system are done. So, it would be dependent on that. 36 | 37 | Alice: Sounds like a plan. Let's get these tasks modeled out and get started. 38 | DATA 39 | end 40 | 41 | def generate(data) 42 | client.chat( 43 | parameters: { 44 | model: 'gpt-3.5-turbo', 45 | messages: [ 46 | { 47 | role: 'system', 48 | "content": 'The following is a transcript of a meeting between a manager and their team. The manager is assigning tasks to their team members and creating action items for them to complete.' 49 | }, 50 | { 51 | "role": 'user', 52 | "content": "Create the action items for the following transcript: #{data}" 53 | } 54 | ] 55 | }, 56 | response_model: AutoticketerModels::ActionItems 57 | ) 58 | end 59 | 60 | it 'generates the proper json-schema', vcr: 'autoticketer/generate' do 61 | result = generate(data) 62 | 63 | expect(result.as_json).to include_json( 64 | { 65 | "items": [ 66 | { 67 | "id": 1, 68 | "name": 'Improve Authentication System', 69 | "description": 'Work on front-end revamp and back-end optimization', 70 | "priority": 'High', 71 | "assignees": %w[ 72 | Bob 73 | Carol 74 | ], 75 | "subtasks": [ 76 | { 77 | "id": 2, 78 | "name": 'Front-end Revamp' 79 | }, 80 | { 81 | "id": 3, 82 | "name": 'Back-end Optimization' 83 | } 84 | ] 85 | }, 86 | { 87 | "id": 4, 88 | "name": 'Integrate Authentication System with New Billing System', 89 | "description": 'Integrate authentication system with the new billing system', 90 | "priority": 'Medium', 91 | "assignees": [ 92 | 'Bob' 93 | ], 94 | "dependencies": [ 95 | 1 96 | ] 97 | }, 98 | { 99 | "id": 5, 100 | "name": 'Update User Documentation', 101 | "description": 'Update user documentation to reflect changes', 102 | "priority": 'Low', 103 | "assignees": [ 104 | 'Carol' 105 | ], 106 | "dependencies": [ 107 | 2 108 | ] 109 | } 110 | ] 111 | } 112 | ) 113 | end 114 | end 115 | -------------------------------------------------------------------------------- /spec/features/basic_use_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'spec_helper' 4 | 5 | RSpec.describe 'running an OpenAI function call' do 6 | let(:user_model) do 7 | Class.new do 8 | include EasyTalk::Model 9 | 10 | def self.name 11 | 'User' 12 | end 13 | 14 | define_schema do 15 | property :name, String 16 | property :age, Integer 17 | end 18 | end 19 | end 20 | 21 | let(:client) { Instructor.from_openai(OpenAI::Client).new } 22 | 23 | let(:parameters) do 24 | { 25 | model: 'gpt-3.5-turbo', 26 | messages: [{ role: 'user', content: 'Extract Jason is 25 years old' }] 27 | } 28 | end 29 | 30 | let(:response_model) { user_model } 31 | 32 | it 'returns a single object with the expected valid attribute values', vcr: 'basic_spec/valid_response' do 33 | user = client.chat(parameters:, response_model:) 34 | 35 | expect(user.name).to eq('Jason') 36 | expect(user.age).to eq(25) 37 | end 38 | end 39 | -------------------------------------------------------------------------------- /spec/features/iterable_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'spec_helper' 4 | 5 | RSpec.describe 'running an OpenAI function with a multiple object response' do 6 | let(:user_model) do 7 | Class.new do 8 | include EasyTalk::Model 9 | 10 | def self.name 11 | 'Users' 12 | end 13 | 14 | define_schema do 15 | property :name, String 16 | property :age, Integer 17 | end 18 | end 19 | end 20 | 21 | let(:client) { Instructor.from_openai(OpenAI::Client, mode: Instructor::Mode::TOOLS.required).new } 22 | 23 | let(:parameters) do 24 | { 25 | model: 'gpt-3.5-turbo', 26 | messages: [ 27 | { role: 'system', content: 'Extract the names and ages of all the users' }, 28 | { role: 'user', content: 'Extract `Jason is 25 and Peter is 32`' } 29 | ] 30 | } 31 | end 32 | 33 | let(:response_model) { T::Array[user_model] } 34 | 35 | let(:users) { client.chat(parameters:, response_model:) } 36 | 37 | it 'returns valid objects', vcr: 'iterable_spec/valid_response' do 38 | users.each do |user| 39 | expect(user.valid?).to eq(true) 40 | end 41 | end 42 | 43 | it 'returns objects with valid types', vcr: 'iterable_spec/valid_response' do 44 | users.each do |user| 45 | expect(user.name).to be_a(String) 46 | expect(user.age).to be_a(Integer) 47 | end 48 | end 49 | 50 | it 'returns objects with the expected attribute values', vcr: 'iterable_spec/valid_response' do 51 | expect(users[0].name).to eq('Jason') 52 | expect(users[0].age).to eq(25) 53 | expect(users[1].name).to eq('Peter') 54 | expect(users[1].age).to eq(32) 55 | end 56 | end 57 | -------------------------------------------------------------------------------- /spec/helpers/autoticketer_models.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module AutoticketerModels 4 | class Subtask 5 | include EasyTalk::Model 6 | 7 | define_schema do 8 | property :id, Integer, description: 'Unique identifier for the subtask' 9 | property :name, String, description: 'Informative title of the subtask' 10 | end 11 | end 12 | 13 | class Ticket 14 | include EasyTalk::Model 15 | 16 | PRIORITY = %w[low medium high].freeze 17 | 18 | define_schema do 19 | property :id, Integer, description: 'Unique identifier for the ticket' 20 | property :name, String, description: 'Title of the ticket' 21 | property :description, String, description: 'Detailed description of the ticket' 22 | property :priority, String, description: 'Priority level' 23 | property :assignees, T::Array[String], description: 'List of users assigned to the ticket' 24 | property :subtasks, T.nilable(T::Array[Subtask]), description: 'List of subtasks associated with the ticket' 25 | property :dependencies, T.nilable(T::Array[Integer]), 26 | description: 'List of ticket IDs that this ticket depends on' 27 | end 28 | end 29 | 30 | class ActionItems 31 | include EasyTalk::Model 32 | 33 | define_schema do 34 | property :items, T::Array[Ticket] 35 | end 36 | end 37 | end 38 | -------------------------------------------------------------------------------- /spec/instructor_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'spec_helper' 4 | 5 | RSpec.describe Instructor, '.class' do 6 | it 'returns the default mode after patching' do 7 | described_class.from_openai(OpenAI::Client) 8 | expect(described_class.mode).to eq(Instructor::Mode::TOOLS.function) 9 | end 10 | 11 | it 'changes the the mode' do 12 | described_class.from_openai(OpenAI::Client, mode: Instructor::Mode::TOOLS.auto) 13 | expect(described_class.mode).to eq(Instructor::Mode::TOOLS.auto) 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /spec/openai/patch_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'spec_helper' 4 | 5 | RSpec.describe Instructor::OpenAI::Patch do 6 | subject(:patched_client) { Instructor.from_openai(OpenAI::Client) } 7 | 8 | let(:user_model) do 9 | Class.new do 10 | include EasyTalk::Model 11 | 12 | def self.name 13 | 'User' 14 | end 15 | 16 | def self.instructions 17 | "Extract the user's name and age." 18 | end 19 | 20 | define_schema do 21 | title 'SomeUser' 22 | property :name, String 23 | property :age, Integer 24 | end 25 | end 26 | end 27 | 28 | it 'returns the patched client' do 29 | expect(patched_client).to eq(OpenAI::Client) 30 | end 31 | 32 | context 'when generating description' do 33 | let(:client) { patched_client.new } 34 | 35 | it "returns the model's instructions" do 36 | expect(client.generate_description(user_model)).to eq("Extract the user's name and age.") 37 | end 38 | 39 | it 'returns the default description when the model does not have instructions' do 40 | model = Class.new do 41 | include EasyTalk::Model 42 | 43 | def self.name 44 | 'User' 45 | end 46 | 47 | define_schema {} 48 | end 49 | 50 | expect(client.generate_description(model)).to eq('Correctly extracted `User` with all the required parameters with correct types') 51 | end 52 | end 53 | 54 | context 'with a new instance of the patched client' do 55 | it 'returns an instance of the patched client' do 56 | expect(patched_client.new).to be_an_instance_of(OpenAI::Client) 57 | end 58 | 59 | pending 'receives the chat method with the expected arguments' do 60 | client.chat(parameters: {}, response_model: nil) 61 | expect(client).to have_received(:chat).with(parameters: {}, response_model: nil) 62 | end 63 | 64 | it 'does not require the response model argument' do 65 | expect { client.chat(parameters: {}) }.not_to raise_error(ArgumentError) 66 | end 67 | 68 | it 'does require the parameters argument' do 69 | client = patched_client.new 70 | expect { client.chat }.to raise_error(ArgumentError, 'missing keyword: :parameters') 71 | end 72 | 73 | describe 'when setting the function_name' do 74 | it 'returns the function_name based on the schema title' do 75 | client = patched_client.new 76 | expect(client.generate_function_name(user_model)).to eq('SomeUser') 77 | end 78 | 79 | it 'returns the class name when the schema title is not defined' do 80 | model = Class.new do 81 | include EasyTalk::Model 82 | 83 | def self.name 84 | 'User' 85 | end 86 | 87 | define_schema {} 88 | end 89 | 90 | client = patched_client.new 91 | expect(client.generate_function_name(model)).to eq('User') 92 | end 93 | end 94 | 95 | it 'returns an object with the expected valid attribute values', vcr: 'patching_spec/valid_response' do 96 | client = patched_client.new 97 | 98 | user = client.chat( 99 | parameters: { 100 | model: 'gpt-3.5-turbo', 101 | messages: [{ role: 'user', content: 'Extract Jason is 25 years old' }] 102 | }, 103 | response_model: user_model 104 | ) 105 | 106 | expect(user.name).to eq('Jason') 107 | expect(user.age).to eq(25) 108 | end 109 | end 110 | 111 | context 'with retry mechanism' do 112 | let(:client) { patched_client.new } 113 | let(:parameters) { { key: 'value' } } 114 | let(:max_retries) { 3 } 115 | 116 | before do 117 | allow(client).to receive(:json_post).and_return('choices' => 118 | [{ 'index' => 0, 119 | 'message' => 120 | { 'role' => 'assistant', 121 | 'tool_calls' => [{ 'id' => 'call_85vQq30Nt8xU1mly2Y2Y1tL2', 'type' => 'function', 122 | 'function' => { 'name' => 'User', 'arguments' => '\"bad:json\"' } }] } }]) 123 | end 124 | 125 | it 'retries the chat method when parsing fails' do 126 | expect do 127 | client.chat(parameters:, response_model: user_model, max_retries:) 128 | end.to raise_error(JSON::ParserError) 129 | 130 | expect(client).to have_received(:json_post).exactly(max_retries).times 131 | end 132 | end 133 | 134 | context 'with validation context' do 135 | let(:client) { patched_client.new } 136 | let(:parameters) do 137 | { 138 | model: 'gpt-3.5-turbo', 139 | messages: [ 140 | { 141 | role: 'user', 142 | content: 'Answer the question: %s with the text chunk: %s' 143 | } 144 | ] 145 | } 146 | end 147 | 148 | it 'returns an object with the expected valid attribute values', vcr: 'patching_spec/with_validation_context' do 149 | user = client.chat( 150 | parameters:, 151 | response_model: user_model, 152 | validation_context: { question: 'What is your name and age?', 153 | text_chunk: 'my name is Jason and I turned 25 years old yesterday' } 154 | ) 155 | 156 | expect(user.name).to eq('Jason') 157 | expect(user.age).to eq(25) 158 | end 159 | end 160 | 161 | context 'with an invalid response model' do 162 | let(:invalid_model) do 163 | Class.new do 164 | include EasyTalk::Model 165 | 166 | def self.name 167 | 'InvalidModel' 168 | end 169 | 170 | define_schema do 171 | property :name, String 172 | property :age, String 173 | end 174 | end 175 | end 176 | 177 | let(:client) { patched_client.new } 178 | let(:parameters) do 179 | { 180 | model: 'gpt-3.5-turbo', 181 | messages: [{ role: 'user', content: 'Extract Jason is 25 years old' }] 182 | } 183 | end 184 | 185 | it 'raises an error when the response model is invalid', vcr: 'patching_spec/invalid_response' do 186 | expect do 187 | client.chat(parameters:, response_model: invalid_model) 188 | end.to raise_error(Instructor::ValidationError) 189 | end 190 | end 191 | 192 | describe 'when the client is used ia a standard manner' do 193 | it 'does not raise an error when the client is used in a standard manner', vcr: 'patching_spec/standard_usage' do 194 | response = patched_client.new.chat( 195 | parameters: { 196 | model: 'gpt-3.5-turbo', 197 | messages: [{ role: 'user', content: 'How is the weather today in New York?' }] 198 | } 199 | ) 200 | 201 | expect(response).to be_a(Hash) 202 | expect(response.dig('choices', 0, 'message', 'content')).to be_a(String) 203 | end 204 | end 205 | end 206 | -------------------------------------------------------------------------------- /spec/openai/response_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'spec_helper' 4 | 5 | RSpec.describe Instructor::OpenAI::Response do 6 | subject(:response_object) { described_class.new(response) } 7 | 8 | let(:response) do 9 | { 'id' => 'chatcmpl-9DEGpBfHqcS17uJtx1vxpRMEb4DtK', 10 | 'object' => 'chat.completion', 11 | 'created' => 1_712_940_147, 12 | 'model' => 'gpt-3.5-turbo-0125', 13 | 'choices' => [ 14 | { 'index' => 0, 15 | 'message' => 16 | { 'role' => 'assistant', 17 | 'content' => nil, 18 | 'tool_calls' => [ 19 | { 20 | 'id' => 'call_ljjAxRNujNWmDhrlJW2DLprK', 21 | 'type' => 'function', 22 | 'function' => { 'name' => 'User', 'arguments' => '{"name": "Jason", "age": 25}' } 23 | } 24 | ] }, 25 | 'logprobs' => nil, 26 | 'finish_reason' => 'tool_calls' } 27 | ], 28 | 'usage' => { 29 | 'prompt_tokens' => 63, 30 | 'completion_tokens' => 32, 31 | 'total_tokens' => 95 32 | }, 33 | 'system_fingerprint' => 'fp_c2295e73ad' } 34 | end 35 | 36 | it 'returns a chat completion' do 37 | expect(response_object.chat_completions).to eq(response['choices']) 38 | end 39 | 40 | it 'returns the tool calls' do 41 | expect(response_object.tool_calls).to eq(response['choices'][0]['message']['tool_calls']) 42 | end 43 | 44 | it 'returns the function responses' do 45 | expect(response_object.function_responses).to eq([response['choices'][0]['message']['tool_calls'][0]['function']]) 46 | end 47 | 48 | it 'returns the function arguments by function name' do 49 | expect(response_object.by_function_name('User')).to eq('{"name": "Jason", "age": 25}') 50 | end 51 | 52 | it 'single response' do 53 | expect(response_object.single_response?).to eq(true) 54 | end 55 | 56 | it 'parses the response' do 57 | expect(response_object.parse).to eq('name' => 'Jason', 'age' => 25) 58 | end 59 | 60 | it 'returns the first function response' do 61 | expect(response_object.function_response).to eq(response['choices'][0]['message']['tool_calls'][0]['function']) 62 | end 63 | end 64 | -------------------------------------------------------------------------------- /spec/spec_helper.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'instructor' 4 | require 'pry-byebug' 5 | require 'vcr' 6 | require 'rspec/json_expectations' 7 | 8 | VCR.configure do |config| 9 | config.cassette_library_dir = 'spec/vcr_cassettes' 10 | config.hook_into :webmock 11 | config.configure_rspec_metadata! 12 | config.filter_sensitive_data('') { ENV.fetch('OPENAI_API_KEY', 'XXXXX') } 13 | config.filter_sensitive_data('') { ENV.fetch('ANTHROPIC_API_KEY', 'XXXXX') } 14 | end 15 | 16 | RSpec.configure do |config| 17 | config.expect_with :rspec do |c| 18 | c.syntax = :expect 19 | end 20 | end 21 | 22 | OpenAI.configure do |config| 23 | config.access_token = ENV.fetch('OPENAI_API_KEY', 'XXXXX') 24 | end 25 | 26 | Anthropic.configure do |config| 27 | config.access_token = ENV.fetch('ANTHROPIC_API_KEY', 'XXXXX') 28 | end 29 | -------------------------------------------------------------------------------- /spec/vcr_cassettes/anthropic_patch/invalid_response.yml: -------------------------------------------------------------------------------- 1 | --- 2 | http_interactions: 3 | - request: 4 | method: post 5 | uri: https://api.anthropic.com/v1/messages 6 | body: 7 | encoding: UTF-8 8 | string: '{"model":"claude-3-opus-20240229","messages":[{"role":"user","content":"Extract 9 | Jason is 25 years old"}],"max_tokens":1024,"tools":[{"name":"Invalidmodel","description":"Correctly 10 | extracted `InvalidModel` with all the required parameters with correct types","input_schema":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"string"}},"required":["name","age"]}}]}' 11 | headers: 12 | Content-Type: 13 | - application/json 14 | X-Api-Key: 15 | - 16 | Anthropic-Version: 17 | - '2023-06-01' 18 | Anthropic-Beta: 19 | - tools-2024-04-04 20 | Accept-Encoding: 21 | - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 22 | Accept: 23 | - "*/*" 24 | User-Agent: 25 | - Ruby 26 | response: 27 | status: 28 | code: 200 29 | message: OK 30 | headers: 31 | Date: 32 | - Thu, 09 May 2024 20:20:11 GMT 33 | Content-Type: 34 | - application/json 35 | Transfer-Encoding: 36 | - chunked 37 | Connection: 38 | - keep-alive 39 | Anthropic-Ratelimit-Requests-Limit: 40 | - '5' 41 | Anthropic-Ratelimit-Requests-Remaining: 42 | - '4' 43 | Anthropic-Ratelimit-Requests-Reset: 44 | - '2024-05-09T20:21:30Z' 45 | Anthropic-Ratelimit-Tokens-Limit: 46 | - '10000' 47 | Anthropic-Ratelimit-Tokens-Remaining: 48 | - '9000' 49 | Anthropic-Ratelimit-Tokens-Reset: 50 | - '2024-05-09T20:21:30Z' 51 | Request-Id: 52 | - req_015hdwz7zx57NRsSTpN2HTV9 53 | X-Cloud-Trace-Context: 54 | - 0bd4780909a278c548f8cc6bf3bb8178 55 | Via: 56 | - 1.1 google 57 | Cf-Cache-Status: 58 | - DYNAMIC 59 | Server: 60 | - cloudflare 61 | Cf-Ray: 62 | - 881463470a0c8dd8-MIA 63 | body: 64 | encoding: ASCII-8BIT 65 | string: '{"id":"msg_01EKfajCRuz7bheKbQE5Dkqr","type":"message","role":"assistant","model":"claude-3-opus-20240229","stop_sequence":null,"usage":{"input_tokens":489,"output_tokens":206},"content":[{"type":"text","text":"\nThe 66 | key parts of the request are:\n- Extract information from text\n- The text 67 | contains a name \"Jason\" and age \"25 years old\"\n\nThe Invalidmodel tool 68 | looks relevant because it can extract a name and age. Let''s check if the 69 | required parameters are provided:\n- name: The text contains the name \"Jason\", 70 | so this can be provided \n- age: The text specifies Jason is \"25 years old\", 71 | so the age can be provided as \"25 years old\"\n\nAll the required parameters 72 | are present in the input text, so we can proceed with calling the tool.\n"},{"type":"tool_use","id":"toolu_01B6t1vi4tF9qEAx6RMH1sPU","name":"Invalidmodel","input":{"name":"Jason","age":"25 73 | years old"}}],"stop_reason":"tool_use"}' 74 | recorded_at: Thu, 09 May 2024 20:20:11 GMT 75 | recorded_with: VCR 6.2.0 76 | -------------------------------------------------------------------------------- /spec/vcr_cassettes/anthropic_patch/valid_response.yml: -------------------------------------------------------------------------------- 1 | --- 2 | http_interactions: 3 | - request: 4 | method: post 5 | uri: https://api.anthropic.com/v1/messages 6 | body: 7 | encoding: UTF-8 8 | string: '{"model":"claude-3-opus-20240229","messages":[{"role":"user","content":"Extract 9 | Jason is 25 years old"}],"max_tokens":1024,"tools":[{"name":"User","description":"Correctly 10 | extracted `User` with all the required parameters with correct types","input_schema":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"required":["name","age"]}}]}' 11 | headers: 12 | Content-Type: 13 | - application/json 14 | X-Api-Key: 15 | - 16 | Anthropic-Version: 17 | - '2023-06-01' 18 | Anthropic-Beta: 19 | - tools-2024-04-04 20 | Accept-Encoding: 21 | - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 22 | Accept: 23 | - "*/*" 24 | User-Agent: 25 | - Ruby 26 | response: 27 | status: 28 | code: 200 29 | message: OK 30 | headers: 31 | Date: 32 | - Thu, 09 May 2024 16:30:05 GMT 33 | Content-Type: 34 | - application/json 35 | Transfer-Encoding: 36 | - chunked 37 | Connection: 38 | - keep-alive 39 | Anthropic-Ratelimit-Requests-Limit: 40 | - '5' 41 | Anthropic-Ratelimit-Requests-Remaining: 42 | - '4' 43 | Anthropic-Ratelimit-Requests-Reset: 44 | - '2024-05-09T16:31:30Z' 45 | Anthropic-Ratelimit-Tokens-Limit: 46 | - '10000' 47 | Anthropic-Ratelimit-Tokens-Remaining: 48 | - '9000' 49 | Anthropic-Ratelimit-Tokens-Reset: 50 | - '2024-05-09T16:31:30Z' 51 | Request-Id: 52 | - req_01LRMuxYDXrJtQa7EBuwcwrJ 53 | X-Cloud-Trace-Context: 54 | - 12b1b2d6fb75df0730777980cbf8e87a 55 | Via: 56 | - 1.1 google 57 | Cf-Cache-Status: 58 | - DYNAMIC 59 | Server: 60 | - cloudflare 61 | Cf-Ray: 62 | - 881312395967a677-MIA 63 | body: 64 | encoding: ASCII-8BIT 65 | string: '{"id":"msg_01GmF8irRChwxKSGNXWB64YA","type":"message","role":"assistant","model":"claude-3-opus-20240229","stop_sequence":null,"usage":{"input_tokens":486,"output_tokens":171},"content":[{"type":"text","text":"\nThe 66 | user has provided a sentence that appears to contain information in the format 67 | needed for the User function. Let''s break it down:\n\nName: The name \"Jason\" 68 | is provided directly in the input.\nAge: The age of 25 years old is provided 69 | directly after the name.\n\nBoth of the required parameters for the User function 70 | (name and age) are present in the input. Since we have all the necessary information, 71 | we can proceed with calling the User function.\n"},{"type":"tool_use","id":"toolu_01Um2D6sgPCrZmh7gNYn5Luu","name":"User","input":{"name":"Jason","age":25}}],"stop_reason":"tool_use"}' 72 | recorded_at: Thu, 09 May 2024 16:30:05 GMT 73 | recorded_with: VCR 6.2.0 74 | -------------------------------------------------------------------------------- /spec/vcr_cassettes/anthropic_patch/with_validation_context.yml: -------------------------------------------------------------------------------- 1 | --- 2 | http_interactions: 3 | - request: 4 | method: post 5 | uri: https://api.anthropic.com/v1/messages 6 | body: 7 | encoding: UTF-8 8 | string: '{"model":"claude-3-opus-20240229","messages":[{"role":"user","content":"Answer 9 | the question: What is your name and age? with the text chunk: my name is Jason 10 | and I turned 25 years old yesterday"}],"max_tokens":1024,"tools":[{"name":"User","description":"Correctly 11 | extracted `User` with all the required parameters with correct types","input_schema":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"required":["name","age"]}}]}' 12 | headers: 13 | Content-Type: 14 | - application/json 15 | X-Api-Key: 16 | - 17 | Anthropic-Version: 18 | - '2023-06-01' 19 | Anthropic-Beta: 20 | - tools-2024-04-04 21 | Accept-Encoding: 22 | - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 23 | Accept: 24 | - "*/*" 25 | User-Agent: 26 | - Ruby 27 | response: 28 | status: 29 | code: 200 30 | message: OK 31 | headers: 32 | Date: 33 | - Thu, 09 May 2024 20:17:49 GMT 34 | Content-Type: 35 | - application/json 36 | Transfer-Encoding: 37 | - chunked 38 | Connection: 39 | - keep-alive 40 | Anthropic-Ratelimit-Requests-Limit: 41 | - '5' 42 | Anthropic-Ratelimit-Requests-Remaining: 43 | - '4' 44 | Anthropic-Ratelimit-Requests-Reset: 45 | - '2024-05-09T20:18:30Z' 46 | Anthropic-Ratelimit-Tokens-Limit: 47 | - '10000' 48 | Anthropic-Ratelimit-Tokens-Remaining: 49 | - '9000' 50 | Anthropic-Ratelimit-Tokens-Reset: 51 | - '2024-05-09T20:18:30Z' 52 | Request-Id: 53 | - req_01X62QzcyrjKKb1pHKVrvFj8 54 | X-Cloud-Trace-Context: 55 | - dba7eed0f154afe4bd161be4c4f86725 56 | Via: 57 | - 1.1 google 58 | Cf-Cache-Status: 59 | - DYNAMIC 60 | Server: 61 | - cloudflare 62 | Cf-Ray: 63 | - 88145fda2ef03716-MIA 64 | body: 65 | encoding: ASCII-8BIT 66 | string: '{"id":"msg_01NSQ3xhUyjHFLhFd6TMVx42","type":"message","role":"assistant","model":"claude-3-opus-20240229","stop_sequence":null,"usage":{"input_tokens":507,"output_tokens":180},"content":[{"type":"text","text":"\nThe 67 | relevant tool to use here is the User function, since the question is asking 68 | for a name and age, which matches the parameters of that function.\n\nAnalyzing 69 | the provided text for the required parameters:\nname: The text says \"my name 70 | is Jason\", so the name is provided and is \"Jason\"\nage: The text says \"I 71 | turned 25 years old yesterday\". So the age is provided and is 25.\n\nSince 72 | both required parameters are provided, I can proceed with calling the User 73 | function.\n"},{"type":"tool_use","id":"toolu_01V2yurTTibtau42B4QHBuCJ","name":"User","input":{"name":"Jason","age":25}}],"stop_reason":"tool_use"}' 74 | recorded_at: Thu, 09 May 2024 20:17:49 GMT 75 | recorded_with: VCR 6.2.0 76 | -------------------------------------------------------------------------------- /spec/vcr_cassettes/autoticketer/generate.yml: -------------------------------------------------------------------------------- 1 | --- 2 | http_interactions: 3 | - request: 4 | method: post 5 | uri: https://api.openai.com/v1/chat/completions 6 | body: 7 | encoding: UTF-8 8 | string: '{"model":"gpt-3.5-turbo","messages":[{"role":"system","content":"The 9 | following is a transcript of a meeting between a manager and their team. The 10 | manager is assigning tasks to their team members and creating action items 11 | for them to complete."},{"role":"user","content":"Create the action items 12 | for the following transcript: Alice: Hey team, we have several critical tasks 13 | we need to tackle for the upcoming release. First, we need to work on improving 14 | the authentication system. It''s a top priority.\n\nBob: Got it, Alice. I 15 | can take the lead on the authentication improvements. Are there any specific 16 | areas you want me to focus on?\n\nAlice: Good question, Bob. We need both 17 | a front-end revamp and back-end optimization. So basically, two sub-tasks.\n\nCarol: 18 | I can help with the front-end part of the authentication system.\n\nBob: Great, 19 | Carol. I''ll handle the back-end optimization then.\n\nAlice: Perfect. Now, 20 | after the authentication system is improved, we have to integrate it with 21 | our new billing system. That''s a medium priority task.\n\nCarol: Is the new 22 | billing system already in place?\n\nAlice: No, it''s actually another task. 23 | So it''s a dependency for the integration task. Bob, can you also handle the 24 | billing system?\n\nBob: Sure, but I''ll need to complete the back-end optimization 25 | of the authentication system first, so it''s dependent on that.\n\nAlice: 26 | Understood. Lastly, we also need to update our user documentation to reflect 27 | all these changes. It''s a low-priority task but still important.\n\nCarol: 28 | I can take that on once the front-end changes for the authentication system 29 | are done. So, it would be dependent on that.\n\nAlice: Sounds like a plan. 30 | Let''s get these tasks modeled out and get started.\n"}],"tools":[{"type":"function","function":{"name":"Actionitems","description":"Correctly 31 | extracted `ActionItems` with all the required parameters with correct types","parameters":{"type":"object","properties":{"items":{"type":"array","items":{"type":"object","properties":{"id":{"type":"integer","description":"Unique 32 | identifier for the ticket"},"name":{"type":"string","description":"Title of 33 | the ticket"},"description":{"type":"string","description":"Detailed description 34 | of the ticket"},"priority":{"type":"string","description":"Priority level"},"assignees":{"type":"array","items":{"type":"string"},"description":"List 35 | of users assigned to the ticket"},"subtasks":{"anyOf":[{"type":"array","items":{"type":"object","properties":{"id":{"type":"integer","description":"Unique 36 | identifier for the subtask"},"name":{"type":"string","description":"Informative 37 | title of the subtask"}},"required":["id","name"]},"description":"List of subtasks 38 | associated with the ticket"},{"type":"null"}]},"dependencies":{"anyOf":[{"type":"array","items":{"type":"integer"},"description":"List 39 | of ticket IDs that this ticket depends on"},{"type":"null"}]}},"required":["id","name","description","priority","assignees"]}}},"required":["items"]}}}]}' 40 | headers: 41 | Content-Type: 42 | - application/json 43 | Authorization: 44 | - Bearer 45 | Accept-Encoding: 46 | - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 47 | Accept: 48 | - "*/*" 49 | User-Agent: 50 | - Ruby 51 | response: 52 | status: 53 | code: 200 54 | message: OK 55 | headers: 56 | Date: 57 | - Fri, 12 Apr 2024 18:42:05 GMT 58 | Content-Type: 59 | - application/json 60 | Transfer-Encoding: 61 | - chunked 62 | Connection: 63 | - keep-alive 64 | Access-Control-Allow-Origin: 65 | - "*" 66 | Cache-Control: 67 | - no-cache, must-revalidate 68 | Openai-Model: 69 | - gpt-3.5-turbo-0125 70 | Openai-Organization: 71 | - user-jtftkqrbreteg5pmdrfzchv6 72 | Openai-Processing-Ms: 73 | - '2596' 74 | Openai-Version: 75 | - '2020-10-01' 76 | Strict-Transport-Security: 77 | - max-age=15724800; includeSubDomains 78 | X-Ratelimit-Limit-Requests: 79 | - '10000' 80 | X-Ratelimit-Limit-Tokens: 81 | - '60000' 82 | X-Ratelimit-Remaining-Requests: 83 | - '9999' 84 | X-Ratelimit-Remaining-Tokens: 85 | - '59579' 86 | X-Ratelimit-Reset-Requests: 87 | - 8.64s 88 | X-Ratelimit-Reset-Tokens: 89 | - 421ms 90 | X-Request-Id: 91 | - req_5585e8e921a606e6ff650b185bd50af9 92 | Cf-Cache-Status: 93 | - DYNAMIC 94 | Set-Cookie: 95 | - __cf_bm=KyXMepIPH_DkWI0HlByAgyNFg0LiLphryKrP44161j0-1712947325-1.0.1.1-Xp1q6Yv31R6tOR_H91DXSzza.2NNen_DgNFpYGetiQ_IWnTPv099_aS20TkatHa3NSBOTMjQNOopWTyvShjcBg; 96 | path=/; expires=Fri, 12-Apr-24 19:12:05 GMT; domain=.api.openai.com; HttpOnly; 97 | Secure; SameSite=None 98 | - _cfuvid=al3HkpS19.N3AhU0yXzSU9vvJjrStn7bzkM3TbjPvnQ-1712947325495-0.0.1.1-604800000; 99 | path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None 100 | Server: 101 | - cloudflare 102 | Cf-Ray: 103 | - 87355a9f884d09ce-MIA 104 | Alt-Svc: 105 | - h3=":443"; ma=86400 106 | body: 107 | encoding: ASCII-8BIT 108 | string: | 109 | { 110 | "id": "chatcmpl-9DG8YYiZR7tfZpLP5xtm6hnOMak9J", 111 | "object": "chat.completion", 112 | "created": 1712947322, 113 | "model": "gpt-3.5-turbo-0125", 114 | "choices": [ 115 | { 116 | "index": 0, 117 | "message": { 118 | "role": "assistant", 119 | "content": null, 120 | "tool_calls": [ 121 | { 122 | "id": "call_Tw9YOum9ssHUSXOndPSYsAn6", 123 | "type": "function", 124 | "function": { 125 | "name": "Actionitems", 126 | "arguments": "{\"items\":[{\"id\":1,\"name\":\"Improve Authentication System\",\"description\":\"Work on front-end revamp and back-end optimization\",\"priority\":\"High\",\"assignees\":[\"Bob\",\"Carol\"],\"subtasks\":[{\"id\":2,\"name\":\"Front-end Revamp\"},{\"id\":3,\"name\":\"Back-end Optimization\"}]},{\"id\":4,\"name\":\"Integrate Authentication System with New Billing System\",\"description\":\"Integrate authentication system with the new billing system\",\"priority\":\"Medium\",\"assignees\":[\"Bob\"],\"dependencies\":[1]},{\"id\":5,\"name\":\"Update User Documentation\",\"description\":\"Update user documentation to reflect changes\",\"priority\":\"Low\",\"assignees\":[\"Carol\"],\"dependencies\":[2]}]}" 127 | } 128 | } 129 | ] 130 | }, 131 | "logprobs": null, 132 | "finish_reason": "tool_calls" 133 | } 134 | ], 135 | "usage": { 136 | "prompt_tokens": 525, 137 | "completion_tokens": 147, 138 | "total_tokens": 672 139 | }, 140 | "system_fingerprint": "fp_c2295e73ad" 141 | } 142 | recorded_at: Fri, 12 Apr 2024 18:42:05 GMT 143 | recorded_with: VCR 6.2.0 144 | -------------------------------------------------------------------------------- /spec/vcr_cassettes/basic_spec/valid_response.yml: -------------------------------------------------------------------------------- 1 | --- 2 | http_interactions: 3 | - request: 4 | method: post 5 | uri: https://api.openai.com/v1/chat/completions 6 | body: 7 | encoding: UTF-8 8 | string: '{"model":"gpt-3.5-turbo","messages":[{"role":"user","content":"Extract 9 | Jason is 25 years old"}],"tools":[{"type":"function","function":{"name":"User","description":"Correctly 10 | extracted `User` with all the required parameters with correct types","parameters":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"required":["name","age"]}}}],"tool_choice":{"type":"function","function":{"name":"User"}}}' 11 | headers: 12 | Content-Type: 13 | - application/json 14 | Authorization: 15 | - Bearer 16 | Accept-Encoding: 17 | - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 18 | Accept: 19 | - "*/*" 20 | User-Agent: 21 | - Ruby 22 | response: 23 | status: 24 | code: 200 25 | message: OK 26 | headers: 27 | Date: 28 | - Fri, 17 May 2024 21:43:36 GMT 29 | Content-Type: 30 | - application/json 31 | Transfer-Encoding: 32 | - chunked 33 | Connection: 34 | - keep-alive 35 | Openai-Organization: 36 | - user-jtftkqrbreteg5pmdrfzchv6 37 | Openai-Processing-Ms: 38 | - '279' 39 | Openai-Version: 40 | - '2020-10-01' 41 | Strict-Transport-Security: 42 | - max-age=15724800; includeSubDomains 43 | X-Ratelimit-Limit-Requests: 44 | - '10000' 45 | X-Ratelimit-Limit-Tokens: 46 | - '60000' 47 | X-Ratelimit-Remaining-Requests: 48 | - '9999' 49 | X-Ratelimit-Remaining-Tokens: 50 | - '59975' 51 | X-Ratelimit-Reset-Requests: 52 | - 8.64s 53 | X-Ratelimit-Reset-Tokens: 54 | - 25ms 55 | X-Request-Id: 56 | - req_0beb8aa55a830e8bd7e8eed01a05f3c0 57 | Cf-Cache-Status: 58 | - DYNAMIC 59 | Set-Cookie: 60 | - __cf_bm=XgFm9EbhV9_dCJnOyPugQGI.kXwrbCdRMOUM.aYIj44-1715982216-1.0.1.1-73M1D9t7hChSuX90po2Iyk26I1LElUZMiexlToP_fUTSu5kWd4KCfUbThlUttI2K0ZX4gHtk1JR13lJ6Au4oKQ; 61 | path=/; expires=Fri, 17-May-24 22:13:36 GMT; domain=.api.openai.com; HttpOnly; 62 | Secure; SameSite=None 63 | - _cfuvid=l1RnAIP4_bCW3R17g4Yh9oJuKE.stp0kZcHGI_O8ddE-1715982216818-0.0.1.1-604800000; 64 | path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None 65 | Server: 66 | - cloudflare 67 | Cf-Ray: 68 | - 8856c8b47de36db3-MIA 69 | Alt-Svc: 70 | - h3=":443"; ma=86400 71 | body: 72 | encoding: ASCII-8BIT 73 | string: | 74 | { 75 | "id": "chatcmpl-9PzeSAbfLl2wMV4tuBGPZjIW35bHD", 76 | "object": "chat.completion", 77 | "created": 1715982216, 78 | "model": "gpt-3.5-turbo-0125", 79 | "choices": [ 80 | { 81 | "index": 0, 82 | "message": { 83 | "role": "assistant", 84 | "content": null, 85 | "tool_calls": [ 86 | { 87 | "id": "call_6t9IRWIHRRjInUG3kMAflgRi", 88 | "type": "function", 89 | "function": { 90 | "name": "User", 91 | "arguments": "{\"name\":\"Jason\",\"age\":25}" 92 | } 93 | } 94 | ] 95 | }, 96 | "logprobs": null, 97 | "finish_reason": "stop" 98 | } 99 | ], 100 | "usage": { 101 | "prompt_tokens": 71, 102 | "completion_tokens": 9, 103 | "total_tokens": 80 104 | }, 105 | "system_fingerprint": null 106 | } 107 | recorded_at: Fri, 17 May 2024 21:43:36 GMT 108 | recorded_with: VCR 6.2.0 109 | -------------------------------------------------------------------------------- /spec/vcr_cassettes/iterable_spec/valid_response.yml: -------------------------------------------------------------------------------- 1 | --- 2 | http_interactions: 3 | - request: 4 | method: post 5 | uri: https://api.openai.com/v1/chat/completions 6 | body: 7 | encoding: UTF-8 8 | string: '{"model":"gpt-3.5-turbo","messages":[{"role":"system","content":"Extract 9 | the names and ages of all the users"},{"role":"user","content":"Extract `Jason 10 | is 25 and Peter is 32`"}],"tools":[{"type":"function","function":{"name":"Users","description":"Correctly 11 | extracted `Users` with all the required parameters with correct types","parameters":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"required":["name","age"]}}}],"tool_choice":"required"}' 12 | headers: 13 | Content-Type: 14 | - application/json 15 | Authorization: 16 | - Bearer 17 | Accept-Encoding: 18 | - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 19 | Accept: 20 | - "*/*" 21 | User-Agent: 22 | - Ruby 23 | response: 24 | status: 25 | code: 200 26 | message: OK 27 | headers: 28 | Date: 29 | - Sat, 18 May 2024 01:35:47 GMT 30 | Content-Type: 31 | - application/json 32 | Transfer-Encoding: 33 | - chunked 34 | Connection: 35 | - keep-alive 36 | Openai-Organization: 37 | - user-jtftkqrbreteg5pmdrfzchv6 38 | Openai-Processing-Ms: 39 | - '901' 40 | Openai-Version: 41 | - '2020-10-01' 42 | Strict-Transport-Security: 43 | - max-age=15724800; includeSubDomains 44 | X-Ratelimit-Limit-Requests: 45 | - '10000' 46 | X-Ratelimit-Limit-Tokens: 47 | - '60000' 48 | X-Ratelimit-Remaining-Requests: 49 | - '9999' 50 | X-Ratelimit-Remaining-Tokens: 51 | - '59962' 52 | X-Ratelimit-Reset-Requests: 53 | - 8.64s 54 | X-Ratelimit-Reset-Tokens: 55 | - 38ms 56 | X-Request-Id: 57 | - req_e541e0199ba8ab4969cebaa93ecf7621 58 | Cf-Cache-Status: 59 | - DYNAMIC 60 | Set-Cookie: 61 | - __cf_bm=Vw9tLN_5v0HADGwDxZlp_Xsteou0sxwUhw2uxhb2w.k-1715996147-1.0.1.1-44Ok_xULGiNWAg1PtLfVhxfNW1n4v.YZwAxEAFUFk7R2vJwYJA0bOiew0M7VI.F3mFVplHj4A_VfVZgbXyOm1Q; 62 | path=/; expires=Sat, 18-May-24 02:05:47 GMT; domain=.api.openai.com; HttpOnly; 63 | Secure; SameSite=None 64 | - _cfuvid=798B92b1HxrrOu9V8vLMsh5LvISIS._fwZvtGrxmwzs-1715996147279-0.0.1.1-604800000; 65 | path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None 66 | Server: 67 | - cloudflare 68 | Cf-Ray: 69 | - 88581cc9aeae335e-MIA 70 | Alt-Svc: 71 | - h3=":443"; ma=86400 72 | body: 73 | encoding: ASCII-8BIT 74 | string: | 75 | { 76 | "id": "chatcmpl-9Q3H8gCBtHqh2I1IS8JRDnVEhy5vM", 77 | "object": "chat.completion", 78 | "created": 1715996146, 79 | "model": "gpt-3.5-turbo-0125", 80 | "choices": [ 81 | { 82 | "index": 0, 83 | "message": { 84 | "role": "assistant", 85 | "content": null, 86 | "tool_calls": [ 87 | { 88 | "id": "call_ZjPBhmR9P86MQv1GuKct4pyx", 89 | "type": "function", 90 | "function": { 91 | "name": "Users", 92 | "arguments": "{\"name\": \"Jason\", \"age\": 25}" 93 | } 94 | }, 95 | { 96 | "id": "call_yaCW68EMq8VYIACaXdAZrc9z", 97 | "type": "function", 98 | "function": { 99 | "name": "Users", 100 | "arguments": "{\"name\": \"Peter\", \"age\": 32}" 101 | } 102 | } 103 | ] 104 | }, 105 | "logprobs": null, 106 | "finish_reason": "stop" 107 | } 108 | ], 109 | "usage": { 110 | "prompt_tokens": 80, 111 | "completion_tokens": 47, 112 | "total_tokens": 127 113 | }, 114 | "system_fingerprint": null 115 | } 116 | recorded_at: Sat, 18 May 2024 01:35:47 GMT 117 | recorded_with: VCR 6.2.0 118 | -------------------------------------------------------------------------------- /spec/vcr_cassettes/patching_spec/invalid_response.yml: -------------------------------------------------------------------------------- 1 | --- 2 | http_interactions: 3 | - request: 4 | method: post 5 | uri: https://api.openai.com/v1/chat/completions 6 | body: 7 | encoding: UTF-8 8 | string: '{"model":"gpt-3.5-turbo","messages":[{"role":"user","content":"Extract 9 | Jason is 25 years old"}],"tools":[{"type":"function","function":{"name":"InvalidModel","description":"Correctly 10 | extracted `InvalidModel` with all the required parameters with correct types","parameters":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"string"}},"required":["name","age"]}}}],"tool_choice":{"type":"function","function":{"name":"InvalidModel"}}}' 11 | headers: 12 | Content-Type: 13 | - application/json 14 | Authorization: 15 | - Bearer 16 | Accept-Encoding: 17 | - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 18 | Accept: 19 | - "*/*" 20 | User-Agent: 21 | - Ruby 22 | response: 23 | status: 24 | code: 200 25 | message: OK 26 | headers: 27 | Date: 28 | - Fri, 17 May 2024 21:38:40 GMT 29 | Content-Type: 30 | - application/json 31 | Transfer-Encoding: 32 | - chunked 33 | Connection: 34 | - keep-alive 35 | Openai-Organization: 36 | - user-jtftkqrbreteg5pmdrfzchv6 37 | Openai-Processing-Ms: 38 | - '464' 39 | Openai-Version: 40 | - '2020-10-01' 41 | Strict-Transport-Security: 42 | - max-age=15724800; includeSubDomains 43 | X-Ratelimit-Limit-Requests: 44 | - '10000' 45 | X-Ratelimit-Limit-Tokens: 46 | - '60000' 47 | X-Ratelimit-Remaining-Requests: 48 | - '9997' 49 | X-Ratelimit-Remaining-Tokens: 50 | - '59975' 51 | X-Ratelimit-Reset-Requests: 52 | - 24.331s 53 | X-Ratelimit-Reset-Tokens: 54 | - 25ms 55 | X-Request-Id: 56 | - req_bc8846f72bc7e45aea0089199393efb5 57 | Cf-Cache-Status: 58 | - DYNAMIC 59 | Set-Cookie: 60 | - __cf_bm=SN9Aw.9Y6MoQjya5MwMHqAccv0nbP0idUZewJvSCYSA-1715981920-1.0.1.1-tLf4tiuhqDimwnjiJh23BSsopzWUVqlcWRMmkmlEpnOPseRbPQUcDZPk.854YQh8SbisfZlF3eif8Ny5eEDUZw; 61 | path=/; expires=Fri, 17-May-24 22:08:40 GMT; domain=.api.openai.com; HttpOnly; 62 | Secure; SameSite=None 63 | - _cfuvid=hJJnzWXK76oChvkPWG7J4e8h4Ib9K4tdSN1o14Ku07c-1715981920951-0.0.1.1-604800000; 64 | path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None 65 | Server: 66 | - cloudflare 67 | Cf-Ray: 68 | - 8856c17a5d5309c2-MIA 69 | Alt-Svc: 70 | - h3=":443"; ma=86400 71 | body: 72 | encoding: ASCII-8BIT 73 | string: | 74 | { 75 | "id": "chatcmpl-9PzZgf0aZfhyh1YHH15sZnvUAKr6T", 76 | "object": "chat.completion", 77 | "created": 1715981920, 78 | "model": "gpt-3.5-turbo-0125", 79 | "choices": [ 80 | { 81 | "index": 0, 82 | "message": { 83 | "role": "assistant", 84 | "content": null, 85 | "tool_calls": [ 86 | { 87 | "id": "call_ZB4NXbxzAdnfPXZQI8FdfQ2q", 88 | "type": "function", 89 | "function": { 90 | "name": "InvalidModel", 91 | "arguments": "{\"name\":\"Jason\",\"age\":25}" 92 | } 93 | } 94 | ] 95 | }, 96 | "logprobs": null, 97 | "finish_reason": "stop" 98 | } 99 | ], 100 | "usage": { 101 | "prompt_tokens": 74, 102 | "completion_tokens": 9, 103 | "total_tokens": 83 104 | }, 105 | "system_fingerprint": null 106 | } 107 | recorded_at: Fri, 17 May 2024 21:38:40 GMT 108 | recorded_with: VCR 6.2.0 109 | -------------------------------------------------------------------------------- /spec/vcr_cassettes/patching_spec/standard_usage.yml: -------------------------------------------------------------------------------- 1 | --- 2 | http_interactions: 3 | - request: 4 | method: post 5 | uri: https://api.openai.com/v1/chat/completions 6 | body: 7 | encoding: UTF-8 8 | string: '{"model":"gpt-3.5-turbo","messages":[{"role":"user","content":"How 9 | is the weather today in New York?"}]}' 10 | headers: 11 | Content-Type: 12 | - application/json 13 | Authorization: 14 | - Bearer 15 | Accept-Encoding: 16 | - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 17 | Accept: 18 | - "*/*" 19 | User-Agent: 20 | - Ruby 21 | response: 22 | status: 23 | code: 200 24 | message: OK 25 | headers: 26 | Date: 27 | - Mon, 20 May 2024 20:18:44 GMT 28 | Content-Type: 29 | - application/json 30 | Transfer-Encoding: 31 | - chunked 32 | Connection: 33 | - keep-alive 34 | Openai-Organization: 35 | - user-jtftkqrbreteg5pmdrfzchv6 36 | Openai-Processing-Ms: 37 | - '1141' 38 | Openai-Version: 39 | - '2020-10-01' 40 | Strict-Transport-Security: 41 | - max-age=15724800; includeSubDomains 42 | X-Ratelimit-Limit-Requests: 43 | - '10000' 44 | X-Ratelimit-Limit-Tokens: 45 | - '60000' 46 | X-Ratelimit-Remaining-Requests: 47 | - '9999' 48 | X-Ratelimit-Remaining-Tokens: 49 | - '59973' 50 | X-Ratelimit-Reset-Requests: 51 | - 8.64s 52 | X-Ratelimit-Reset-Tokens: 53 | - 27ms 54 | X-Request-Id: 55 | - req_9cd156b89ffbc49d042594df684cd71c 56 | Cf-Cache-Status: 57 | - DYNAMIC 58 | Set-Cookie: 59 | - __cf_bm=iw7.dE814kHN0QINKJy3dRtjL44VSwwBZFyDHvN2pNo-1716236324-1.0.1.1-_sZi0S6Fm2fn7rSXA7NQ22IbWSrA0yBXtvFZ7BgT06q2J6upyZqiU7WDAqYfK3_DCCsi.xd2.aDu1erMGdb9dg; 60 | path=/; expires=Mon, 20-May-24 20:48:44 GMT; domain=.api.openai.com; HttpOnly; 61 | Secure; SameSite=None 62 | - _cfuvid=l.4Wi13joGQwrPn0H.rLjBrxZJCHqY15_r5zTRua9H0-1716236324578-0.0.1.1-604800000; 63 | path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None 64 | Server: 65 | - cloudflare 66 | Cf-Ray: 67 | - 886f047cea6374ac-MIA 68 | Alt-Svc: 69 | - h3=":443"; ma=86400 70 | body: 71 | encoding: ASCII-8BIT 72 | string: | 73 | { 74 | "id": "chatcmpl-9R3kx1j8twG05UJGM5ReVEpjUUHtL", 75 | "object": "chat.completion", 76 | "created": 1716236323, 77 | "model": "gpt-3.5-turbo-0125", 78 | "choices": [ 79 | { 80 | "index": 0, 81 | "message": { 82 | "role": "assistant", 83 | "content": "I'm sorry, I cannot provide real-time weather information. I recommend checking a reliable weather website or app for the most up-to-date weather conditions in New York." 84 | }, 85 | "logprobs": null, 86 | "finish_reason": "stop" 87 | } 88 | ], 89 | "usage": { 90 | "prompt_tokens": 16, 91 | "completion_tokens": 33, 92 | "total_tokens": 49 93 | }, 94 | "system_fingerprint": null 95 | } 96 | recorded_at: Mon, 20 May 2024 20:18:44 GMT 97 | recorded_with: VCR 6.2.0 98 | -------------------------------------------------------------------------------- /spec/vcr_cassettes/patching_spec/valid_response.yml: -------------------------------------------------------------------------------- 1 | --- 2 | http_interactions: 3 | - request: 4 | method: post 5 | uri: https://api.openai.com/v1/chat/completions 6 | body: 7 | encoding: UTF-8 8 | string: '{"model":"gpt-3.5-turbo","messages":[{"role":"user","content":"Extract 9 | Jason is 25 years old"}],"tools":[{"type":"function","function":{"name":"SomeUser","description":"Extract 10 | the user''s name and age.","parameters":{"type":"object","title":"SomeUser","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"required":["name","age"]}}}],"tool_choice":{"type":"function","function":{"name":"SomeUser"}}}' 11 | headers: 12 | Content-Type: 13 | - application/json 14 | Authorization: 15 | - Bearer 16 | Accept-Encoding: 17 | - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 18 | Accept: 19 | - "*/*" 20 | User-Agent: 21 | - Ruby 22 | response: 23 | status: 24 | code: 200 25 | message: OK 26 | headers: 27 | Date: 28 | - Fri, 17 May 2024 21:38:39 GMT 29 | Content-Type: 30 | - application/json 31 | Transfer-Encoding: 32 | - chunked 33 | Connection: 34 | - keep-alive 35 | Openai-Organization: 36 | - user-jtftkqrbreteg5pmdrfzchv6 37 | Openai-Processing-Ms: 38 | - '484' 39 | Openai-Version: 40 | - '2020-10-01' 41 | Strict-Transport-Security: 42 | - max-age=15724800; includeSubDomains 43 | X-Ratelimit-Limit-Requests: 44 | - '10000' 45 | X-Ratelimit-Limit-Tokens: 46 | - '60000' 47 | X-Ratelimit-Remaining-Requests: 48 | - '9999' 49 | X-Ratelimit-Remaining-Tokens: 50 | - '59975' 51 | X-Ratelimit-Reset-Requests: 52 | - 8.64s 53 | X-Ratelimit-Reset-Tokens: 54 | - 25ms 55 | X-Request-Id: 56 | - req_5c69bf259dd0f0bb146f5e62bddb82cd 57 | Cf-Cache-Status: 58 | - DYNAMIC 59 | Set-Cookie: 60 | - __cf_bm=oC7QqBACb1oJFAJGRqB4BRd.IS5EGaew.qR54Kf2IbA-1715981919-1.0.1.1-gaQXQc17imcz8t4AXhsl0KkoKTjZwzl9WfJwjaY1BJChjWBBkwmCmFgh9J0KYMCf2Ftiwai.tOTVjye4iTMo7g; 61 | path=/; expires=Fri, 17-May-24 22:08:39 GMT; domain=.api.openai.com; HttpOnly; 62 | Secure; SameSite=None 63 | - _cfuvid=Z9nW6TFZQpVmsPdzvKij3JJHzCgCrePIBUzh.6dEIf4-1715981919401-0.0.1.1-604800000; 64 | path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None 65 | Server: 66 | - cloudflare 67 | Cf-Ray: 68 | - 8856c1705d194974-MIA 69 | Alt-Svc: 70 | - h3=":443"; ma=86400 71 | body: 72 | encoding: ASCII-8BIT 73 | string: | 74 | { 75 | "id": "chatcmpl-9PzZfA5SLIFg19i7NuvQzj2tmiFmZ", 76 | "object": "chat.completion", 77 | "created": 1715981919, 78 | "model": "gpt-3.5-turbo-0125", 79 | "choices": [ 80 | { 81 | "index": 0, 82 | "message": { 83 | "role": "assistant", 84 | "content": null, 85 | "tool_calls": [ 86 | { 87 | "id": "call_OmMlsCcbpCSf7p9PuJyi3hpg", 88 | "type": "function", 89 | "function": { 90 | "name": "SomeUser", 91 | "arguments": "{\"name\":\"Jason\",\"age\":25}" 92 | } 93 | } 94 | ] 95 | }, 96 | "logprobs": null, 97 | "finish_reason": "stop" 98 | } 99 | ], 100 | "usage": { 101 | "prompt_tokens": 70, 102 | "completion_tokens": 9, 103 | "total_tokens": 79 104 | }, 105 | "system_fingerprint": null 106 | } 107 | recorded_at: Fri, 17 May 2024 21:38:39 GMT 108 | recorded_with: VCR 6.2.0 109 | -------------------------------------------------------------------------------- /spec/vcr_cassettes/patching_spec/with_validation_context.yml: -------------------------------------------------------------------------------- 1 | --- 2 | http_interactions: 3 | - request: 4 | method: post 5 | uri: https://api.openai.com/v1/chat/completions 6 | body: 7 | encoding: UTF-8 8 | string: '{"model":"gpt-3.5-turbo","messages":[{"role":"user","content":"Answer 9 | the question: What is your name and age? with the text chunk: my name is Jason 10 | and I turned 25 years old yesterday"}],"tools":[{"type":"function","function":{"name":"SomeUser","description":"Extract 11 | the user''s name and age.","parameters":{"type":"object","title":"SomeUser","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"required":["name","age"]}}}],"tool_choice":{"type":"function","function":{"name":"SomeUser"}}}' 12 | headers: 13 | Content-Type: 14 | - application/json 15 | Authorization: 16 | - Bearer 17 | Accept-Encoding: 18 | - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 19 | Accept: 20 | - "*/*" 21 | User-Agent: 22 | - Ruby 23 | response: 24 | status: 25 | code: 200 26 | message: OK 27 | headers: 28 | Date: 29 | - Fri, 17 May 2024 21:38:40 GMT 30 | Content-Type: 31 | - application/json 32 | Transfer-Encoding: 33 | - chunked 34 | Connection: 35 | - keep-alive 36 | Openai-Organization: 37 | - user-jtftkqrbreteg5pmdrfzchv6 38 | Openai-Processing-Ms: 39 | - '380' 40 | Openai-Version: 41 | - '2020-10-01' 42 | Strict-Transport-Security: 43 | - max-age=15724800; includeSubDomains 44 | X-Ratelimit-Limit-Requests: 45 | - '10000' 46 | X-Ratelimit-Limit-Tokens: 47 | - '60000' 48 | X-Ratelimit-Remaining-Requests: 49 | - '9998' 50 | X-Ratelimit-Remaining-Tokens: 51 | - '59952' 52 | X-Ratelimit-Reset-Requests: 53 | - 16.422s 54 | X-Ratelimit-Reset-Tokens: 55 | - 48ms 56 | X-Request-Id: 57 | - req_b75552a51b70614ba55e34bc8f46df80 58 | Cf-Cache-Status: 59 | - DYNAMIC 60 | Set-Cookie: 61 | - __cf_bm=f9c6ADDiOU1NPe3yv0iVc04Ol9CdLniiKCNTBo4Uk14-1715981920-1.0.1.1-u0cGWsKcgvFBXiPMxrO30A09FPBs3nQPk.s_FIAALRxQ52Yb0oPM7Jj_TzCbKRs2L03szG9fhbn462mYBWFzhg; 62 | path=/; expires=Fri, 17-May-24 22:08:40 GMT; domain=.api.openai.com; HttpOnly; 63 | Secure; SameSite=None 64 | - _cfuvid=QibJLZjeMze7JqBsIiNn8haLvSu3cGDyhzKtW0yu9Ws-1715981920116-0.0.1.1-604800000; 65 | path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None 66 | Server: 67 | - cloudflare 68 | Cf-Ray: 69 | - 8856c175cf710331-MIA 70 | Alt-Svc: 71 | - h3=":443"; ma=86400 72 | body: 73 | encoding: ASCII-8BIT 74 | string: | 75 | { 76 | "id": "chatcmpl-9PzZfj3nY4z0btlRlNo23t4uooL0p", 77 | "object": "chat.completion", 78 | "created": 1715981919, 79 | "model": "gpt-3.5-turbo-0125", 80 | "choices": [ 81 | { 82 | "index": 0, 83 | "message": { 84 | "role": "assistant", 85 | "content": null, 86 | "tool_calls": [ 87 | { 88 | "id": "call_ul0rzkJ5DEOx2O52v05JQT5H", 89 | "type": "function", 90 | "function": { 91 | "name": "SomeUser", 92 | "arguments": "{\"name\":\"Jason\",\"age\":25}" 93 | } 94 | } 95 | ] 96 | }, 97 | "logprobs": null, 98 | "finish_reason": "stop" 99 | } 100 | ], 101 | "usage": { 102 | "prompt_tokens": 91, 103 | "completion_tokens": 9, 104 | "total_tokens": 100 105 | }, 106 | "system_fingerprint": null 107 | } 108 | recorded_at: Fri, 17 May 2024 21:38:40 GMT 109 | recorded_with: VCR 6.2.0 110 | --------------------------------------------------------------------------------