├── .github └── workflows │ ├── publish.yaml │ └── test.yaml ├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── README.md ├── analysis_options.yaml ├── example └── palm_api_example.dart ├── lib ├── palm_api.dart └── src │ ├── dto │ ├── citation.dto.dart │ ├── discuss_service.dto.dart │ ├── export.dart │ ├── model.dto.dart │ ├── model_service.dto.dart │ ├── safety.dto.dart │ └── text_service.dto.dart │ ├── helpers │ ├── client.dart │ ├── deep_collection_equality.dart │ ├── exception.dart │ ├── export.dart │ ├── palm_api_version.dart │ └── palm_model_variations.dart │ ├── palm_api_base.dart │ └── services │ ├── base_service.dart │ ├── discuss_service.dart │ ├── export.dart │ ├── generative_language_service.dart │ ├── model_service.dart │ └── text_service.dart ├── open_api.yml ├── palm_api.md ├── pubspec.yaml ├── scripts └── get_codebase.dart └── test ├── helpers ├── client_test.dart ├── deep_collection_equality_test.dart ├── list_extension.dart └── mock_client.dart ├── palm_api_test.dart ├── services └── model_service_test.dart └── testing_helpers.dart /.github/workflows/publish.yaml: -------------------------------------------------------------------------------- 1 | # Do not run workflow for now 2 | name: Publish 3 | on: 4 | release: 5 | types: [published] 6 | 7 | jobs: 8 | publish: 9 | runs-on: ubuntu-latest 10 | 11 | container: 12 | image: dart:latest 13 | 14 | env: 15 | IS_CI: true 16 | PALM_API_KEY: ${{ secrets.PALM_API_KEY }} 17 | 18 | steps: 19 | - uses: actions/checkout@v2 20 | 21 | - name: Install dependencies 22 | run: dart pub get 23 | 24 | - name: Run tests 25 | run: dart pub run test 26 | 27 | - name: Publish Dart package 28 | uses: k-paxian/dart-package-publisher@master 29 | with: 30 | refreshToken: ${{ secrets.PUB_REFRESH_TOKEN }} 31 | accessToken: ${{ secrets.PUB_ACCESS_TOKEN }} 32 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | container: 13 | image: dart:latest 14 | env: 15 | IS_CI: true 16 | PALM_API_KEY: ${{ secrets.PALM_API_KEY }} 17 | steps: 18 | - uses: actions/checkout@v2 19 | - name: Install dependencies 20 | run: dart pub get 21 | 22 | # Run test 23 | - name: Run tests 24 | run: dart pub run test 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://www.dartlang.org/guides/libraries/private-files 2 | 3 | # Files and directories created by pub 4 | .dart_tool/ 5 | .packages 6 | build/ 7 | # If you're building an application, you may want to check-in your pubspec.lock 8 | pubspec.lock 9 | 10 | # Directory created by dartdoc 11 | # If you don't generate documentation locally you can remove this line. 12 | doc/api/ 13 | 14 | # dotenv environment variables file 15 | .env* 16 | 17 | # Avoid committing generated Javascript files: 18 | *.dart.js 19 | *.info.json # Produced by the --dump-info flag. 20 | *.js # When generated by dart2js. Don't specify *.js if your 21 | # project includes source files written in JavaScript. 22 | *.js_ 23 | *.js.deps 24 | *.js.map 25 | 26 | .flutter-plugins 27 | .flutter-plugins-dependencies 28 | .DS_Store 29 | issues/codebase.md 30 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## 0.0.3 2 | 3 | What's Changed 4 | 5 | - Fix errors on type conversion by @leoafarias 6 | - Fix example usage of PalmModel.textBison001 in README by @IsometricShahil in #5 7 | 8 | New Contributors 9 | 10 | - @IsometricShahil made their first contribution in #5 11 | 12 | ## 0.0.2 13 | 14 | - Removed "collection" as a dependency. 15 | 16 | ## 0.0.1 17 | 18 | - Initial version. 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2023, Leo Farias 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PaLM API Client 2 | 3 | 4 | ## Archived: Google now has an official SDK for Gemnini. 5 | 6 | 7 | A Dart client for interacting with the PaLM API, allowing developers to build generative AI applications. 8 | 9 | ## About PaLM 10 | 11 | The [PaLM API](https://developers.generativeai.google) is based on Google's next-generation LLM, PaLM. It excels at a variety of different tasks like code generation, reasoning, and writing. You can use the PaLM API to build generative AI applications for use cases like content generation, dialogue agents, summarization and classification systems, and more. 12 | 13 | For more information, refer to the official PaLM API documentation: 14 | 15 | - [PaLM API Guide](https://developers.generativeai.google/guide/palm_api_overview) 16 | - [PaLM API Documentation](https://developers.generativeai.google/api) 17 | 18 | ## Installation 19 | 20 | To use this package, add `palm_api` as a [dependency in your pubspec.yaml file](https://flutter.dev/docs/development/packages-and-plugins/using-packages). 21 | 22 | ```yaml 23 | dependencies: 24 | palm_api: latest 25 | ``` 26 | 27 | ## Usage 28 | 29 | To use this package, you need to: 30 | 31 | 1. Obtain an [API key](https://developers.generativeai.google/tutorials/setup) for the Google Palm API 32 | 2. Instantiate the service by passing your API key. 33 | 3. Use the client to access the API endpoints. 34 | 35 | For example: 36 | 37 | ```dart 38 | import 'package:palm_api/palm_api.dart'; 39 | 40 | // Instantiate the client 41 | final palm = TextService(apiKey: 'YOUR_API_KEY'); 42 | 43 | // Generate a message 44 | final response = await palm.generateText( 45 | model: PalmModel.textBison001.name, // or 'text-bison-001', 46 | prompt: TextPrompt(text: 'Hello'), 47 | ); 48 | 49 | // Print the candidates 50 | response.candidates.forEach(print); 51 | ``` 52 | 53 | ## Features 54 | 55 | ModelService: 56 | 57 | - `listModel`: Lists available models. 58 | - `getModel`: Get the `Model` for the given model name. 59 | 60 | DiscussService: 61 | 62 | - `generateMessage`: Generates message responses for chatbots and assistants. 63 | - `countMessageTokens`: Counts the number of tokens in a prompt for a given model. 64 | 65 | TextService: 66 | 67 | - `generateText`: Generates text completions 68 | - `embedText`: Gets text embeddings for a given text input. 69 | 70 | ## Model Parameters 71 | 72 | The PaLM API allows you to customize the generated text by specifying various parameters. Below are some of the key parameters that you can set when sending a request to the API: 73 | 74 | - `model`: (Required) The model name to use with the format 75 | - `prompt`: (Required) The free-form input text given to the model as a prompt. Given a prompt, the model will generate a TextCompletion response it predicts as the completion of the input text. 76 | - `temperature`: Controls the randomness of the output. Values can range from 0.0 to 1.0, inclusive. A value closer to 1.0 will produce responses that are more varied and creative, while a value closer to 0.0 will typically result in more straightforward responses from the model. 77 | - `candidateCount`: Number of generated responses to return. This value must be between 1 and 8, inclusive. If unset, this will default to 1. 78 | - `maxOutputTokens`: The maximum number of tokens to include in a candidate. If unset, this will default to 64. 79 | - `topP`: The maximum cumulative probability of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Tokens are sorted based on their assigned probabilities so that only the most likely tokens are considered. 80 | - `topK`: The maximum number of tokens to consider when sampling. The model uses combined Top-k and nucleus sampling. Top-k sampling considers the set of `top_k` most probable tokens. Defaults to 40. 81 | - `stopSequences`: The set of character sequences (up to 5) that will stop output generation. If specified, the API will stop at the first appearance of a stop sequence. The stop sequence will not be included as part of the response. 82 | 83 | ## TODO: 84 | - [ ] Makersuite jSON export support 85 | - [ ] Proxy using for API KEY security 86 | 87 | -------------------------------------------------------------------------------- /analysis_options.yaml: -------------------------------------------------------------------------------- 1 | # This file configures the static analysis results for your project (errors, 2 | # warnings, and lints). 3 | # 4 | # This enables the 'recommended' set of lints from `package:lints`. 5 | # This set helps identify many issues that may lead to problems when running 6 | # or consuming Dart code, and enforces writing Dart using a single, idiomatic 7 | # style and format. 8 | # 9 | # If you want a smaller set of lints you can change this to specify 10 | # 'package:lints/core.yaml'. These are just the most critical lints 11 | # (the recommended set includes the core lints). 12 | # The core lints are also what is used by pub.dev for scoring packages. 13 | 14 | include: package:lints/recommended.yaml 15 | 16 | # Uncomment the following section to specify additional rules. 17 | 18 | # linter: 19 | # rules: 20 | # - camel_case_types 21 | 22 | # analyzer: 23 | # exclude: 24 | # - path/to/excluded/files/** 25 | 26 | # For more information about the core and recommended set of lints, see 27 | # https://dart.dev/go/core-lints 28 | 29 | # For additional information about configuring this file, see 30 | # https://dart.dev/guides/language/analysis-options 31 | -------------------------------------------------------------------------------- /example/palm_api_example.dart: -------------------------------------------------------------------------------- 1 | import 'package:palm_api/palm_api.dart'; 2 | 3 | Future main() async { 4 | // Instantiate the client 5 | final palm = TextService(apiKey: 'YOUR_API_KEY'); 6 | 7 | // Generate a message 8 | final response = await palm.generateText( 9 | model: PalmModel.textBison001.name, // or 'text-bison-001' 10 | prompt: TextPrompt(text: 'Hello'), 11 | ); 12 | 13 | // Print the candidates 14 | response.candidates.forEach(print); 15 | } 16 | -------------------------------------------------------------------------------- /lib/palm_api.dart: -------------------------------------------------------------------------------- 1 | library palm_api; 2 | 3 | export 'src/palm_api_base.dart'; 4 | -------------------------------------------------------------------------------- /lib/src/dto/citation.dto.dart: -------------------------------------------------------------------------------- 1 | import 'dart:convert'; 2 | 3 | import 'package:palm_api/src/helpers/deep_collection_equality.dart'; 4 | 5 | /// A collection of source attributions for a piece of content. 6 | 7 | class CitationMetadata { 8 | /// Citations to sources for a specific response. 9 | final List citationSources; 10 | 11 | CitationMetadata({ 12 | required this.citationSources, 13 | }); 14 | 15 | @override 16 | String toString() => 'CitationMetadata(citationSources: $citationSources)'; 17 | 18 | @override 19 | bool operator ==(Object other) { 20 | if (identical(this, other)) return true; 21 | final listEquals = const DeepCollectionEquality().equals; 22 | 23 | return other is CitationMetadata && 24 | listEquals(other.citationSources, citationSources); 25 | } 26 | 27 | @override 28 | int get hashCode => citationSources.hashCode; 29 | 30 | Map toMap() { 31 | return { 32 | 'citationSources': citationSources.map((x) => x.toMap()).toList(), 33 | }; 34 | } 35 | 36 | factory CitationMetadata.fromMap(Map map) { 37 | return CitationMetadata( 38 | citationSources: List.from( 39 | map['citationSources']?.map((x) => CitationSource.fromMap(x))), 40 | ); 41 | } 42 | 43 | String toJson() => json.encode(toMap()); 44 | 45 | factory CitationMetadata.fromJson(String source) => 46 | CitationMetadata.fromMap(json.decode(source)); 47 | 48 | CitationMetadata copyWith({ 49 | List? citationSources, 50 | }) { 51 | return CitationMetadata( 52 | citationSources: citationSources ?? this.citationSources, 53 | ); 54 | } 55 | } 56 | 57 | /// A citation to a source for a portion of a specific response. 58 | 59 | class CitationSource { 60 | /// Optional. Start of segment of the response that is attributed to this 61 | /// source. 62 | /// 63 | /// Index indicates the start of the segment, measured in bytes. 64 | final int? startIndex; 65 | 66 | /// Optional. End of the attributed segment, exclusive. 67 | final int? endIndex; 68 | 69 | /// Optional. URI that is attributed as a source for a portion of the text. 70 | final String? uri; 71 | 72 | /// Optional. License for the GitHub project that is attributed as a source for 73 | /// segment. 74 | /// 75 | /// License info is required for code citations. 76 | final String? license; 77 | 78 | CitationSource({ 79 | this.startIndex, 80 | this.endIndex, 81 | this.uri, 82 | this.license, 83 | }); 84 | 85 | @override 86 | String toString() { 87 | return 'CitationSource(startIndex: $startIndex, endIndex: $endIndex, uri: $uri, license: $license)'; 88 | } 89 | 90 | @override 91 | bool operator ==(Object other) { 92 | if (identical(this, other)) return true; 93 | 94 | return other is CitationSource && 95 | other.startIndex == startIndex && 96 | other.endIndex == endIndex && 97 | other.uri == uri && 98 | other.license == license; 99 | } 100 | 101 | @override 102 | int get hashCode { 103 | return startIndex.hashCode ^ 104 | endIndex.hashCode ^ 105 | uri.hashCode ^ 106 | license.hashCode; 107 | } 108 | 109 | CitationSource copyWith({ 110 | int? startIndex, 111 | int? endIndex, 112 | String? uri, 113 | String? license, 114 | }) { 115 | return CitationSource( 116 | startIndex: startIndex ?? this.startIndex, 117 | endIndex: endIndex ?? this.endIndex, 118 | uri: uri ?? this.uri, 119 | license: license ?? this.license, 120 | ); 121 | } 122 | 123 | Map toMap() { 124 | return { 125 | 'startIndex': startIndex, 126 | 'endIndex': endIndex, 127 | 'uri': uri, 128 | 'license': license, 129 | }; 130 | } 131 | 132 | factory CitationSource.fromMap(Map map) { 133 | return CitationSource( 134 | startIndex: map['startIndex']?.toInt(), 135 | endIndex: map['endIndex']?.toInt(), 136 | uri: map['uri'], 137 | license: map['license'], 138 | ); 139 | } 140 | 141 | String toJson() => json.encode(toMap()); 142 | 143 | factory CitationSource.fromJson(String source) => 144 | CitationSource.fromMap(json.decode(source)); 145 | } 146 | -------------------------------------------------------------------------------- /lib/src/dto/discuss_service.dto.dart: -------------------------------------------------------------------------------- 1 | import 'dart:convert'; 2 | 3 | import 'package:palm_api/src/dto/citation.dto.dart'; 4 | import 'package:palm_api/src/dto/safety.dto.dart'; 5 | import 'package:palm_api/src/helpers/deep_collection_equality.dart'; 6 | 7 | /// Request to generate a message. 8 | 9 | class GenerateMessageRequest { 10 | /// Required. The name of the model to use. 11 | /// 12 | /// Format: `name=models/{model}`. 13 | final String model; 14 | 15 | /// Required. The structured textual input given to the model as a prompt. 16 | /// 17 | /// Given a 18 | /// prompt, the model will return what it predicts is the next message in the 19 | /// discussion. 20 | final MessagePrompt prompt; 21 | 22 | /// Optional. Controls the randomness of the output. 23 | /// 24 | /// Values can range over `[0.0,1.0]`, 25 | /// inclusive. A value closer to `1.0` will produce responses that are more 26 | /// varied, while a value closer to `0.0` will typically result in 27 | /// less surprising responses from the model. 28 | final double? temperature; 29 | 30 | /// Optional. The number of generated response messages to return. 31 | /// 32 | /// This value must be between 33 | /// `[1, 8]`, inclusive. If unset, this will default to `1`. 34 | final int? candidateCount; 35 | 36 | /// Optional. The maximum cumulative probability of tokens to consider when 37 | /// sampling. 38 | /// 39 | /// The model uses combined Top-k and nucleus sampling. 40 | /// 41 | /// Nucleus sampling considers the smallest set of tokens whose probability 42 | /// sum is at least `top_p`. 43 | final double? topP; 44 | 45 | /// Optional. The maximum number of tokens to consider when sampling. 46 | /// 47 | /// The model uses combined Top-k and nucleus sampling. 48 | /// 49 | /// Top-k sampling considers the set of `top_k` most probable tokens. 50 | final int? topK; 51 | 52 | GenerateMessageRequest({ 53 | required this.model, 54 | required this.prompt, 55 | this.temperature, 56 | this.candidateCount, 57 | this.topP, 58 | this.topK, 59 | }); 60 | 61 | @override 62 | bool operator ==(Object other) { 63 | if (identical(this, other)) return true; 64 | 65 | return other is GenerateMessageRequest && 66 | other.model == model && 67 | other.prompt == prompt && 68 | other.temperature == temperature && 69 | other.candidateCount == candidateCount && 70 | other.topP == topP && 71 | other.topK == topK; 72 | } 73 | 74 | @override 75 | int get hashCode { 76 | return model.hashCode ^ 77 | prompt.hashCode ^ 78 | temperature.hashCode ^ 79 | candidateCount.hashCode ^ 80 | topP.hashCode ^ 81 | topK.hashCode; 82 | } 83 | 84 | GenerateMessageRequest copyWith({ 85 | String? model, 86 | MessagePrompt? prompt, 87 | double? temperature, 88 | int? candidateCount, 89 | double? topP, 90 | int? topK, 91 | }) { 92 | return GenerateMessageRequest( 93 | model: model ?? this.model, 94 | prompt: prompt ?? this.prompt, 95 | temperature: temperature ?? this.temperature, 96 | candidateCount: candidateCount ?? this.candidateCount, 97 | topP: topP ?? this.topP, 98 | topK: topK ?? this.topK, 99 | ); 100 | } 101 | 102 | @override 103 | String toString() { 104 | return 'GenerateMessageRequest(model: $model, prompt: $prompt, temperature: $temperature, candidateCount: $candidateCount, topP: $topP, topK: $topK)'; 105 | } 106 | 107 | Map toMap() { 108 | return { 109 | 'model': model, 110 | 'prompt': prompt.toMap(), 111 | 'temperature': temperature, 112 | 'candidateCount': candidateCount, 113 | 'topP': topP, 114 | 'topK': topK, 115 | }; 116 | } 117 | 118 | factory GenerateMessageRequest.fromMap(Map map) { 119 | return GenerateMessageRequest( 120 | model: map['model'] ?? '', 121 | prompt: MessagePrompt.fromMap(map['prompt']), 122 | temperature: map['temperature']?.toDouble(), 123 | candidateCount: map['candidateCount']?.toInt(), 124 | topP: map['topP']?.toDouble(), 125 | topK: map['topK']?.toInt(), 126 | ); 127 | } 128 | 129 | String toJson() => json.encode(toMap()); 130 | 131 | factory GenerateMessageRequest.fromJson(String source) => 132 | GenerateMessageRequest.fromMap(json.decode(source)); 133 | } 134 | 135 | /// The response from the model. 136 | /// 137 | /// This includes candidate messages and 138 | /// conversation history in the form of chronologically-ordered messages. 139 | 140 | class GenerateMessageResponse { 141 | /// Candidate response messages from the model. 142 | final List candidates; 143 | 144 | /// The conversation history used by the model. 145 | final List messages; 146 | 147 | /// A set of content filtering metadata for the prompt and response 148 | /// text. 149 | /// 150 | /// This indicates which `SafetyCategory`(s) blocked a 151 | /// candidate from this response, the lowest `HarmProbability` 152 | /// that triggered a block, and the HarmThreshold setting for that category. 153 | final List filters; 154 | 155 | GenerateMessageResponse({ 156 | required this.candidates, 157 | required this.messages, 158 | required this.filters, 159 | }); 160 | 161 | @override 162 | bool operator ==(Object other) { 163 | if (identical(this, other)) return true; 164 | final listEquals = const DeepCollectionEquality().equals; 165 | 166 | return other is GenerateMessageResponse && 167 | listEquals(other.candidates, candidates) && 168 | listEquals(other.messages, messages) && 169 | listEquals(other.filters, filters); 170 | } 171 | 172 | @override 173 | int get hashCode => 174 | candidates.hashCode ^ messages.hashCode ^ filters.hashCode; 175 | 176 | GenerateMessageResponse copyWith({ 177 | List? candidates, 178 | List? messages, 179 | List? filters, 180 | }) { 181 | return GenerateMessageResponse( 182 | candidates: candidates ?? this.candidates, 183 | messages: messages ?? this.messages, 184 | filters: filters ?? this.filters, 185 | ); 186 | } 187 | 188 | Map toMap() { 189 | return { 190 | 'candidates': candidates.map((x) => x.toMap()).toList(), 191 | 'messages': messages.map((x) => x.toMap()).toList(), 192 | 'filters': filters.map((x) => x.toMap()).toList(), 193 | }; 194 | } 195 | 196 | factory GenerateMessageResponse.fromMap(Map map) { 197 | return GenerateMessageResponse( 198 | candidates: List.from( 199 | map['candidates']?.map((x) => Message.fromMap(x)) ?? []), 200 | messages: List.from( 201 | map['messages']?.map((x) => Message.fromMap(x)) ?? []), 202 | filters: List.from( 203 | map['filters']?.map((x) => ContentFilter.fromMap(x)) ?? []), 204 | ); 205 | } 206 | 207 | String toJson() => json.encode(toMap()); 208 | 209 | factory GenerateMessageResponse.fromJson(String source) => 210 | GenerateMessageResponse.fromMap(json.decode(source)); 211 | 212 | @override 213 | String toString() => 214 | 'GenerateMessageResponse(candidates: $candidates, messages: $messages, filters: $filters)'; 215 | } 216 | 217 | /// The base unit of structured text. 218 | /// 219 | /// A `Message` includes an `author` and the `content` of 220 | /// the `Message`. 221 | /// 222 | /// The `author` is used to tag messages when they are fed to the 223 | /// model as text. 224 | 225 | class Message { 226 | /// Optional. The author of this Message. 227 | /// 228 | /// This serves as a key for tagging 229 | /// the content of this Message when it is fed to the model as text. 230 | /// 231 | /// The author can be any alphanumeric string. 232 | final String? author; 233 | 234 | /// Required. The text content of the structured `Message`. 235 | final String content; 236 | 237 | /// Output only. Citation information for model-generated `content` in this 238 | /// `Message`. 239 | /// 240 | /// If this `Message` was generated as output from the model, this field may be 241 | /// populated with attribution information for any text included in the 242 | /// `content`. This field is used only on output. 243 | final CitationMetadata? citationMetadata; 244 | 245 | Message({ 246 | this.author, 247 | required this.content, 248 | this.citationMetadata, 249 | }); 250 | 251 | @override 252 | bool operator ==(Object other) { 253 | if (identical(this, other)) return true; 254 | 255 | return other is Message && 256 | other.author == author && 257 | other.content == content && 258 | other.citationMetadata == citationMetadata; 259 | } 260 | 261 | @override 262 | int get hashCode => 263 | author.hashCode ^ content.hashCode ^ citationMetadata.hashCode; 264 | 265 | Message copyWith({ 266 | String? author, 267 | String? content, 268 | CitationMetadata? citationMetadata, 269 | }) { 270 | return Message( 271 | author: author ?? this.author, 272 | content: content ?? this.content, 273 | citationMetadata: citationMetadata ?? this.citationMetadata, 274 | ); 275 | } 276 | 277 | Map toMap() { 278 | return { 279 | 'author': author, 280 | 'content': content, 281 | 'citationMetadata': citationMetadata?.toMap(), 282 | }; 283 | } 284 | 285 | factory Message.fromMap(Map map) { 286 | return Message( 287 | author: map['author'], 288 | content: map['content'] ?? '', 289 | citationMetadata: map['citationMetadata'] != null 290 | ? CitationMetadata.fromMap(map['citationMetadata']) 291 | : null, 292 | ); 293 | } 294 | 295 | String toJson() => json.encode(toMap()); 296 | 297 | factory Message.fromJson(String source) => 298 | Message.fromMap(json.decode(source)); 299 | 300 | @override 301 | String toString() => 302 | 'Message(author: $author, content: $content, citationMetadata: $citationMetadata)'; 303 | } 304 | 305 | /// All of the structured input text passed to the model as a prompt. 306 | /// 307 | /// A `MessagePrompt` contains a structured set of fields that provide context 308 | /// for the conversation, examples of user input/model output message pairs that 309 | /// prime the model to respond in different ways, and the conversation history 310 | /// or list of messages representing the alternating turns of the conversation 311 | /// between the user and the model. 312 | 313 | class MessagePrompt { 314 | /// Optional. Text that should be provided to the model first to ground the 315 | /// response. 316 | /// 317 | /// If not empty, this `context` will be given to the model first before the 318 | /// `examples` and `messages`. When using a `context` be sure to provide it 319 | /// with every request to maintain continuity. 320 | /// 321 | /// This field can be a description of your prompt to the model to help provide 322 | /// context and guide the responses. Examples: "Translate the phrase from 323 | /// English to French." or "Given a statement, classify the sentiment as happy, 324 | /// sad or neutral." 325 | /// 326 | /// Anything included in this field will take precedence over message history 327 | /// if the total input size exceeds the model's `input_token_limit` and the 328 | /// input request is truncated. 329 | final String? context; 330 | 331 | /// Optional. Examples of what the model should generate. 332 | /// 333 | /// This includes both user input and the response that the model should 334 | /// emulate. 335 | /// 336 | /// These `examples` are treated identically to conversation messages except 337 | /// that they take precedence over the history in `messages`: 338 | /// If the total input size exceeds the model's `input_token_limit` the input 339 | /// will be truncated. Items will be dropped from `messages` before `examples`. 340 | final List? examples; 341 | 342 | /// Required. A snapshot of the recent conversation history sorted 343 | /// chronologically. 344 | /// 345 | /// Turns alternate between two authors. 346 | /// 347 | /// If the total input size exceeds the model's `input_token_limit` the input 348 | /// will be truncated: The oldest items will be dropped from `messages`. 349 | final List messages; 350 | 351 | MessagePrompt({ 352 | this.context, 353 | this.examples, 354 | required this.messages, 355 | }); 356 | 357 | @override 358 | String toString() => 359 | 'MessagePrompt(context: $context, examples: $examples, messages: $messages)'; 360 | 361 | @override 362 | bool operator ==(Object other) { 363 | if (identical(this, other)) return true; 364 | final listEquals = const DeepCollectionEquality().equals; 365 | 366 | return other is MessagePrompt && 367 | other.context == context && 368 | listEquals(other.examples, examples) && 369 | listEquals(other.messages, messages); 370 | } 371 | 372 | @override 373 | int get hashCode => context.hashCode ^ examples.hashCode ^ messages.hashCode; 374 | 375 | MessagePrompt copyWith({ 376 | String? context, 377 | List? examples, 378 | List? messages, 379 | }) { 380 | return MessagePrompt( 381 | context: context ?? this.context, 382 | examples: examples ?? this.examples, 383 | messages: messages ?? this.messages, 384 | ); 385 | } 386 | 387 | Map toMap() { 388 | return { 389 | 'context': context, 390 | 'examples': examples?.map((x) => x.toMap()).toList(), 391 | 'messages': messages.map((x) => x.toMap()).toList(), 392 | }; 393 | } 394 | 395 | factory MessagePrompt.fromMap(Map map) { 396 | return MessagePrompt( 397 | context: map['context'], 398 | examples: map['examples'] != null 399 | ? List.from(map['examples']?.map((x) => Example.fromMap(x))) 400 | : null, 401 | messages: 402 | List.from(map['messages']?.map((x) => Message.fromMap(x))), 403 | ); 404 | } 405 | 406 | String toJson() => json.encode(toMap()); 407 | 408 | factory MessagePrompt.fromJson(String source) => 409 | MessagePrompt.fromMap(json.decode(source)); 410 | } 411 | 412 | /// An input/output example used to instruct the Model. 413 | /// 414 | /// It demonstrates how the model should respond or format its response. 415 | 416 | class Example { 417 | /// Required. An example of an input `Message` from the user. 418 | final Message input; 419 | 420 | /// Required. An example of what the model should output given the input. 421 | final Message output; 422 | 423 | Example({ 424 | required this.input, 425 | required this.output, 426 | }); 427 | 428 | @override 429 | bool operator ==(Object other) { 430 | if (identical(this, other)) return true; 431 | 432 | return other is Example && other.input == input && other.output == output; 433 | } 434 | 435 | @override 436 | int get hashCode => input.hashCode ^ output.hashCode; 437 | 438 | Example copyWith({ 439 | Message? input, 440 | Message? output, 441 | }) { 442 | return Example( 443 | input: input ?? this.input, 444 | output: output ?? this.output, 445 | ); 446 | } 447 | 448 | Map toMap() { 449 | return { 450 | 'input': input.toMap(), 451 | 'output': output.toMap(), 452 | }; 453 | } 454 | 455 | factory Example.fromMap(Map map) { 456 | return Example( 457 | input: Message.fromMap(map['input']), 458 | output: Message.fromMap(map['output']), 459 | ); 460 | } 461 | 462 | String toJson() => json.encode(toMap()); 463 | 464 | factory Example.fromJson(String source) => 465 | Example.fromMap(json.decode(source)); 466 | 467 | @override 468 | String toString() => 'Example(input: $input, output: $output)'; 469 | } 470 | 471 | /// Counts the number of tokens in the `prompt` sent to a model. 472 | /// 473 | /// Models may tokenize text differently, so each model may return a different 474 | /// `tokenCount`. 475 | 476 | class CountMessageTokensRequest { 477 | /// Required. The model's resource name. This serves as an ID for the Model to 478 | /// use. 479 | /// 480 | /// This name should match a model name returned by the `ListModels` method. 481 | /// 482 | /// Format: `models/{model}` 483 | final String model; 484 | 485 | /// Required. The prompt, whose token count is to be returned. 486 | final MessagePrompt prompt; 487 | 488 | CountMessageTokensRequest({ 489 | required this.model, 490 | required this.prompt, 491 | }); 492 | 493 | @override 494 | bool operator ==(Object other) { 495 | if (identical(this, other)) return true; 496 | 497 | return other is CountMessageTokensRequest && 498 | other.model == model && 499 | other.prompt == prompt; 500 | } 501 | 502 | @override 503 | int get hashCode => model.hashCode ^ prompt.hashCode; 504 | 505 | CountMessageTokensRequest copyWith({ 506 | String? model, 507 | MessagePrompt? prompt, 508 | }) { 509 | return CountMessageTokensRequest( 510 | model: model ?? this.model, 511 | prompt: prompt ?? this.prompt, 512 | ); 513 | } 514 | 515 | Map toMap() { 516 | return { 517 | 'model': model, 518 | 'prompt': prompt.toMap(), 519 | }; 520 | } 521 | 522 | factory CountMessageTokensRequest.fromMap(Map map) { 523 | return CountMessageTokensRequest( 524 | model: map['model'] ?? '', 525 | prompt: MessagePrompt.fromMap(map['prompt']), 526 | ); 527 | } 528 | 529 | String toJson() => json.encode(toMap()); 530 | 531 | factory CountMessageTokensRequest.fromJson(String source) => 532 | CountMessageTokensRequest.fromMap(json.decode(source)); 533 | 534 | @override 535 | String toString() => 536 | 'CountMessageTokensRequest(model: $model, prompt: $prompt)'; 537 | } 538 | 539 | /// A response from `CountMessageTokens`. 540 | /// 541 | /// It returns the model's `token_count` for the `prompt`. 542 | 543 | class CountMessageTokensResponse { 544 | /// The number of tokens that the `model` tokenizes the `prompt` into. 545 | /// 546 | /// Always non-negative. 547 | final int tokenCount; 548 | 549 | CountMessageTokensResponse({ 550 | required this.tokenCount, 551 | }); 552 | 553 | @override 554 | bool operator ==(Object other) { 555 | if (identical(this, other)) return true; 556 | 557 | return other is CountMessageTokensResponse && 558 | other.tokenCount == tokenCount; 559 | } 560 | 561 | @override 562 | int get hashCode => tokenCount.hashCode; 563 | 564 | CountMessageTokensResponse copyWith({ 565 | int? tokenCount, 566 | }) { 567 | return CountMessageTokensResponse( 568 | tokenCount: tokenCount ?? this.tokenCount, 569 | ); 570 | } 571 | 572 | Map toMap() { 573 | return { 574 | 'tokenCount': tokenCount, 575 | }; 576 | } 577 | 578 | factory CountMessageTokensResponse.fromMap(Map map) { 579 | return CountMessageTokensResponse( 580 | tokenCount: map['tokenCount']?.toInt() ?? 0, 581 | ); 582 | } 583 | 584 | String toJson() => json.encode(toMap()); 585 | 586 | factory CountMessageTokensResponse.fromJson(String source) => 587 | CountMessageTokensResponse.fromMap(json.decode(source)); 588 | 589 | @override 590 | String toString() => 'CountMessageTokensResponse(tokenCount: $tokenCount)'; 591 | } 592 | -------------------------------------------------------------------------------- /lib/src/dto/export.dart: -------------------------------------------------------------------------------- 1 | export './citation.dto.dart'; 2 | export './model.dto.dart'; 3 | export './model_service.dto.dart'; 4 | export './safety.dto.dart'; 5 | export './text_service.dto.dart'; 6 | export 'discuss_service.dto.dart'; 7 | -------------------------------------------------------------------------------- /lib/src/dto/model.dto.dart: -------------------------------------------------------------------------------- 1 | import 'dart:convert'; 2 | 3 | import 'package:palm_api/src/helpers/deep_collection_equality.dart'; 4 | 5 | class Model { 6 | /// Required. The resource name of the `Model`. Format: `models/{model}` with a `{model}` naming convention of: "{base_model_id}-{version}". Examples: `models/chat-bison-001` 7 | final String name; 8 | 9 | /// Required. The name of the base model, pass this to the generation request. Examples: `chat-bison` 10 | final String baseModelId; 11 | 12 | /// Required. The version number of the model. This represents the major version 13 | final String version; 14 | 15 | /// The human-readable name of the model. E.g. "Chat Bison". The name can be up to 128 characters long and can consist of any UTF-8 characters. 16 | final String displayName; 17 | 18 | /// A short description of the model. 19 | final String description; 20 | 21 | /// Maximum number of input tokens allowed for this model. 22 | final int inputTokenLimit; 23 | 24 | /// Maximum number of output tokens available for this model. 25 | final int outputTokenLimit; 26 | 27 | /// The model's supported generation methods. The method names are defined as Pascal case strings, such as `generateMessage` which correspond to API methods. 28 | final List supportedGenerationMethods; 29 | 30 | /// Controls the randomness of the output. Values can range over `[0.0,1.0]`, inclusive. A value closer to `1.0` will produce responses that are more varied, while a value closer to `0.0` will typically result in less surprising responses from the model. This value specifies default to be used by the backend while making the call to the model. 31 | final double? temperature; 32 | 33 | /// For Nucleus sampling. Nucleus sampling considers the smallest set of tokens whose probability sum is at least `top_p`. This value specifies default to be used by the backend while making the call to the model. 34 | final double? topP; 35 | 36 | /// For Top-k sampling. Top-k sampling considers the set of `top_k` most probable tokens. This value specifies default to be used by the backend while making the call to the model. 37 | final int? topK; 38 | 39 | const Model({ 40 | required this.name, 41 | required this.baseModelId, 42 | required this.version, 43 | required this.displayName, 44 | required this.description, 45 | required this.inputTokenLimit, 46 | required this.outputTokenLimit, 47 | required this.supportedGenerationMethods, 48 | this.temperature, 49 | this.topP, 50 | this.topK, 51 | }); 52 | 53 | @override 54 | bool operator ==(Object other) { 55 | if (identical(this, other)) return true; 56 | final listEquals = const DeepCollectionEquality().equals; 57 | 58 | return other is Model && 59 | other.name == name && 60 | other.baseModelId == baseModelId && 61 | other.version == version && 62 | other.displayName == displayName && 63 | other.description == description && 64 | other.inputTokenLimit == inputTokenLimit && 65 | other.outputTokenLimit == outputTokenLimit && 66 | listEquals( 67 | other.supportedGenerationMethods, supportedGenerationMethods) && 68 | other.temperature == temperature && 69 | other.topP == topP && 70 | other.topK == topK; 71 | } 72 | 73 | @override 74 | int get hashCode { 75 | return name.hashCode ^ 76 | baseModelId.hashCode ^ 77 | version.hashCode ^ 78 | displayName.hashCode ^ 79 | description.hashCode ^ 80 | inputTokenLimit.hashCode ^ 81 | outputTokenLimit.hashCode ^ 82 | supportedGenerationMethods.hashCode ^ 83 | temperature.hashCode ^ 84 | topP.hashCode ^ 85 | topK.hashCode; 86 | } 87 | 88 | @override 89 | String toString() { 90 | return 'Model(name: $name, baseModelId: $baseModelId, version: $version, displayName: $displayName, description: $description, inputTokenLimit: $inputTokenLimit, outputTokenLimit: $outputTokenLimit, supportedGenerationMethods: $supportedGenerationMethods, temperature: $temperature, topP: $topP, topK: $topK)'; 91 | } 92 | 93 | Model copyWith({ 94 | String? name, 95 | String? baseModelId, 96 | String? version, 97 | String? displayName, 98 | String? description, 99 | int? inputTokenLimit, 100 | int? outputTokenLimit, 101 | List? supportedGenerationMethods, 102 | double? temperature, 103 | double? topP, 104 | int? topK, 105 | }) { 106 | return Model( 107 | name: name ?? this.name, 108 | baseModelId: baseModelId ?? this.baseModelId, 109 | version: version ?? this.version, 110 | displayName: displayName ?? this.displayName, 111 | description: description ?? this.description, 112 | inputTokenLimit: inputTokenLimit ?? this.inputTokenLimit, 113 | outputTokenLimit: outputTokenLimit ?? this.outputTokenLimit, 114 | supportedGenerationMethods: 115 | supportedGenerationMethods ?? this.supportedGenerationMethods, 116 | temperature: temperature ?? this.temperature, 117 | topP: topP ?? this.topP, 118 | topK: topK ?? this.topK, 119 | ); 120 | } 121 | 122 | Map toMap() { 123 | return { 124 | 'name': name, 125 | 'baseModelId': baseModelId, 126 | 'version': version, 127 | 'displayName': displayName, 128 | 'description': description, 129 | 'inputTokenLimit': inputTokenLimit, 130 | 'outputTokenLimit': outputTokenLimit, 131 | 'supportedGenerationMethods': supportedGenerationMethods, 132 | 'temperature': temperature, 133 | 'topP': topP, 134 | 'topK': topK, 135 | }; 136 | } 137 | 138 | factory Model.fromMap(Map map) { 139 | return Model( 140 | name: map['name'] ?? '', 141 | baseModelId: map['baseModelId'] ?? '', 142 | version: map['version'] ?? '', 143 | displayName: map['displayName'] ?? '', 144 | description: map['description'] ?? '', 145 | inputTokenLimit: map['inputTokenLimit']?.toInt() ?? 0, 146 | outputTokenLimit: map['outputTokenLimit']?.toInt() ?? 0, 147 | supportedGenerationMethods: 148 | List.from(map['supportedGenerationMethods']), 149 | temperature: map['temperature']?.toDouble(), 150 | topP: map['topP']?.toDouble(), 151 | topK: map['topK']?.toInt(), 152 | ); 153 | } 154 | 155 | String toJson() => json.encode(toMap()); 156 | 157 | factory Model.fromJson(String source) => Model.fromMap(json.decode(source)); 158 | } 159 | -------------------------------------------------------------------------------- /lib/src/dto/model_service.dto.dart: -------------------------------------------------------------------------------- 1 | import 'dart:convert'; 2 | 3 | import 'package:palm_api/src/dto/model.dto.dart'; 4 | import 'package:palm_api/src/helpers/deep_collection_equality.dart'; 5 | 6 | /// Request for getting information about a specific Model. 7 | 8 | class GetModelRequest { 9 | /// Required. The resource name of the model. 10 | /// 11 | /// This name should match a model name returned by the `ListModels` method. 12 | /// 13 | /// Format: `models/{model}` 14 | final String name; 15 | 16 | GetModelRequest({ 17 | required this.name, 18 | }); 19 | 20 | @override 21 | bool operator ==(Object other) { 22 | if (identical(this, other)) return true; 23 | 24 | return other is GetModelRequest && other.name == name; 25 | } 26 | 27 | @override 28 | int get hashCode => name.hashCode; 29 | 30 | GetModelRequest copyWith({ 31 | String? name, 32 | }) { 33 | return GetModelRequest( 34 | name: name ?? this.name, 35 | ); 36 | } 37 | 38 | Map toMap() { 39 | return { 40 | 'name': name, 41 | }; 42 | } 43 | 44 | factory GetModelRequest.fromMap(Map map) { 45 | return GetModelRequest( 46 | name: map['name'] ?? '', 47 | ); 48 | } 49 | 50 | String toJson() => json.encode(toMap()); 51 | 52 | factory GetModelRequest.fromJson(String source) => 53 | GetModelRequest.fromMap(json.decode(source)); 54 | 55 | @override 56 | String toString() => 'GetModelRequest(name: $name)'; 57 | } 58 | 59 | /// Request for listing all Models. 60 | 61 | class ListModelsRequest { 62 | /// The maximum number of `Models` to return (per page). 63 | /// 64 | /// The service may return fewer models. 65 | /// If unspecified, at most 50 models will be returned per page. 66 | /// This method returns at most 1000 models per page, even if you pass a larger 67 | /// page_size. 68 | final int pageSize; 69 | 70 | /// A page token, received from a previous `ListModels` call. 71 | /// 72 | /// Provide the `page_token` returned by one request as an argument to the next 73 | /// request to retrieve the next page. 74 | /// 75 | /// When paginating, all other parameters provided to `ListModels` must match 76 | /// the call that provided the page token. 77 | final String pageToken; 78 | 79 | ListModelsRequest({ 80 | required this.pageSize, 81 | required this.pageToken, 82 | }); 83 | 84 | @override 85 | bool operator ==(Object other) { 86 | if (identical(this, other)) return true; 87 | 88 | return other is ListModelsRequest && 89 | other.pageSize == pageSize && 90 | other.pageToken == pageToken; 91 | } 92 | 93 | @override 94 | int get hashCode => pageSize.hashCode ^ pageToken.hashCode; 95 | 96 | ListModelsRequest copyWith({ 97 | int? pageSize, 98 | String? pageToken, 99 | }) { 100 | return ListModelsRequest( 101 | pageSize: pageSize ?? this.pageSize, 102 | pageToken: pageToken ?? this.pageToken, 103 | ); 104 | } 105 | 106 | Map toMap() { 107 | return { 108 | 'pageSize': pageSize, 109 | 'pageToken': pageToken, 110 | }; 111 | } 112 | 113 | factory ListModelsRequest.fromMap(Map map) { 114 | return ListModelsRequest( 115 | pageSize: map['pageSize']?.toInt() ?? 0, 116 | pageToken: map['pageToken'] ?? '', 117 | ); 118 | } 119 | 120 | String toJson() => json.encode(toMap()); 121 | 122 | factory ListModelsRequest.fromJson(String source) => 123 | ListModelsRequest.fromMap(json.decode(source)); 124 | 125 | @override 126 | String toString() => 127 | 'ListModelsRequest(pageSize: $pageSize, pageToken: $pageToken)'; 128 | } 129 | 130 | /// Response from `ListModel` containing a paginated list of Models. 131 | 132 | class ListModelsResponse { 133 | /// The returned Models. 134 | final List models; 135 | 136 | /// A token, which can be sent as `page_token` to retrieve the next page. 137 | /// 138 | /// If this field is omitted, there are no more pages. 139 | final String nextPageToken; 140 | 141 | ListModelsResponse({ 142 | required this.models, 143 | required this.nextPageToken, 144 | }); 145 | 146 | @override 147 | bool operator ==(Object other) { 148 | if (identical(this, other)) return true; 149 | final listEquals = const DeepCollectionEquality().equals; 150 | 151 | return other is ListModelsResponse && 152 | listEquals(other.models, models) && 153 | other.nextPageToken == nextPageToken; 154 | } 155 | 156 | @override 157 | int get hashCode => models.hashCode ^ nextPageToken.hashCode; 158 | 159 | @override 160 | String toString() => 161 | 'ListModelsResponse(models: $models, nextPageToken: $nextPageToken)'; 162 | 163 | ListModelsResponse copyWith({ 164 | List? models, 165 | String? nextPageToken, 166 | }) { 167 | return ListModelsResponse( 168 | models: models ?? this.models, 169 | nextPageToken: nextPageToken ?? this.nextPageToken, 170 | ); 171 | } 172 | 173 | Map toMap() { 174 | return { 175 | 'models': models.map((x) => x.toMap()).toList(), 176 | 'nextPageToken': nextPageToken, 177 | }; 178 | } 179 | 180 | factory ListModelsResponse.fromMap(Map map) { 181 | return ListModelsResponse( 182 | models: List.from(map['models']?.map((x) => Model.fromMap(x))), 183 | nextPageToken: map['nextPageToken'] ?? '', 184 | ); 185 | } 186 | 187 | String toJson() => json.encode(toMap()); 188 | 189 | factory ListModelsResponse.fromJson(String source) => 190 | ListModelsResponse.fromMap(json.decode(source)); 191 | } 192 | -------------------------------------------------------------------------------- /lib/src/dto/safety.dto.dart: -------------------------------------------------------------------------------- 1 | import 'dart:convert'; 2 | 3 | // The category of a rating. 4 | // 5 | // These categories cover various kinds of harms that developers 6 | // may wish to adjust. 7 | enum HarmCategory { 8 | // Category is unspecified. 9 | unspecified('HARM_CATEGORY_UNSPECIFIED'), 10 | 11 | // Negative or harmful comments targeting identity and/or protected attribute. 12 | derogatory('HARM_CATEGORY_DEROGATORY'), 13 | 14 | // Content that is rude, disrepspectful, or profane. 15 | toxicity('HARM_CATEGORY_TOXICITY'), 16 | 17 | // Describes scenarios depictng violence against an individual or group, or 18 | // general descriptions of gore. 19 | violence('HARM_CATEGORY_VIOLENCE'), 20 | 21 | // Contains references to sexual acts or other lewd content. 22 | sexual('HARM_CATEGORY_SEXUAL'), 23 | 24 | // Promotes unchecked medical advice. 25 | medical('HARM_CATEGORY_MEDICAL'), 26 | 27 | // Dangerous content that promotes, facilitates, or encourages harmful acts. 28 | dangerous('HARM_CATEGORY_DANGEROUS'); 29 | 30 | const HarmCategory(this.value); 31 | 32 | final String value; 33 | 34 | static HarmCategory fromString(String value) { 35 | return HarmCategory.values.firstWhere((e) => e.value == value); 36 | } 37 | } 38 | 39 | // Block at and beyond a specified harm probability. 40 | enum HarmBlockThreshold { 41 | // Threshold is unspecified. 42 | unspecified('HARM_BLOCK_THRESHOLD_UNSPECIFIED'), 43 | 44 | // Content with NEGLIGIBLE will be allowed. 45 | blockLowAndAbove('BLOCK_LOW_AND_ABOVE'), 46 | 47 | // Content with NEGLIGIBLE and LOW will be allowed. 48 | blockMediumAndAbove('BLOCK_MEDIUM_AND_ABOVE'), 49 | 50 | // Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed. 51 | blockOnlyHigh('BLOCK_ONLY_HIGH'); 52 | 53 | const HarmBlockThreshold(this.value); 54 | 55 | final String value; 56 | 57 | static HarmBlockThreshold fromString(String value) { 58 | return HarmBlockThreshold.values.firstWhere((e) => e.value == value); 59 | } 60 | } 61 | 62 | // A list of reasons why content may have been blocked. 63 | enum BlockedReason { 64 | // A blocked reason was not specified. 65 | unspecified('BLOCKED_REASON_UNSPECIFIED'), 66 | 67 | // Content was blocked by safety settings. 68 | safety('SAFETY'), 69 | 70 | // Content was blocked, but the reason is uncategorized. 71 | other('OTHER'); 72 | 73 | const BlockedReason(this.value); 74 | 75 | final String value; 76 | 77 | static BlockedReason fromString(String value) { 78 | return BlockedReason.values.firstWhere((e) => e.value == value); 79 | } 80 | } 81 | 82 | // The probability that a piece of content is harmful. 83 | // 84 | // The classification system gives the probability of the content being 85 | // unsafe. This does not indicate the severity of harm for a piece of content. 86 | // HARM_PROBABILITY_UNSPECIFIED Probability is unspecified. 87 | // NEGLIGIBLE Content has a negligible chance of being unsafe. 88 | // LOW Content has a low chance of being unsafe. 89 | // MEDIUM Content has a medium chance of being unsafe. 90 | // HIGH 91 | enum HarmProbability { 92 | // Probability is unspecified. 93 | unspecified('HARM_PROBABILITY_UNSPECIFIED'), 94 | 95 | // Content has a negligible chance of being unsafe. 96 | negligible('NEGLIGIBLE'), 97 | 98 | // Content has a low chance of being unsafe. 99 | low('LOW'), 100 | 101 | // Content has a medium chance of being unsafe. 102 | medium('MEDIUM'), 103 | 104 | // Content has a high chance of being unsafe. 105 | high('HIGH'); 106 | 107 | const HarmProbability(this.value); 108 | 109 | final String value; 110 | 111 | static HarmProbability fromString(String value) { 112 | return HarmProbability.values.firstWhere((e) => e.value == value); 113 | } 114 | } 115 | 116 | // Content filtering metadata associated with processing a single request. 117 | // 118 | // ContentFilter contains a reason and an optional supporting string. The reason 119 | // may be unspecified. 120 | 121 | class ContentFilter { 122 | // The reason content was blocked during request processing. 123 | final BlockedReason reason; 124 | 125 | // A string that describes the filtering behavior in more detail. 126 | final String? message; 127 | 128 | ContentFilter({ 129 | required this.reason, 130 | this.message, 131 | }); 132 | 133 | @override 134 | bool operator ==(Object other) { 135 | if (identical(this, other)) return true; 136 | 137 | return other is ContentFilter && 138 | other.reason == reason && 139 | other.message == message; 140 | } 141 | 142 | @override 143 | int get hashCode => reason.hashCode ^ message.hashCode; 144 | 145 | ContentFilter copyWith({ 146 | BlockedReason? reason, 147 | String? message, 148 | }) { 149 | return ContentFilter( 150 | reason: reason ?? this.reason, 151 | message: message ?? this.message, 152 | ); 153 | } 154 | 155 | Map toMap() { 156 | return { 157 | 'reason': reason.value, 158 | 'message': message, 159 | }; 160 | } 161 | 162 | factory ContentFilter.fromMap(Map map) { 163 | return ContentFilter( 164 | reason: BlockedReason.fromString(map['reason']), 165 | message: map['message'], 166 | ); 167 | } 168 | 169 | String toJson() => json.encode(toMap()); 170 | 171 | factory ContentFilter.fromJson(String source) => 172 | ContentFilter.fromMap(json.decode(source)); 173 | 174 | @override 175 | String toString() => 'ContentFilter(reason: $reason, message: $message)'; 176 | } 177 | 178 | // Safety feedback for an entire request. 179 | // 180 | // This field is populated if content in the input and/or response is blocked 181 | // due to safety settings. SafetyFeedback may not exist for every HarmCategory. 182 | // Each SafetyFeedback will return the safety settings used by the request as 183 | // well as the lowest HarmProbability that should be allowed in order to return 184 | // a result. 185 | 186 | class SafetyFeedback { 187 | // Safety rating evaluated from content. 188 | final SafetyRating rating; 189 | 190 | // Safety settings applied to the request. 191 | final SafetySetting setting; 192 | 193 | SafetyFeedback({ 194 | required this.rating, 195 | required this.setting, 196 | }); 197 | 198 | @override 199 | bool operator ==(Object other) { 200 | if (identical(this, other)) return true; 201 | 202 | return other is SafetyFeedback && 203 | other.rating == rating && 204 | other.setting == setting; 205 | } 206 | 207 | @override 208 | int get hashCode => rating.hashCode ^ setting.hashCode; 209 | 210 | SafetyFeedback copyWith({ 211 | SafetyRating? rating, 212 | SafetySetting? setting, 213 | }) { 214 | return SafetyFeedback( 215 | rating: rating ?? this.rating, 216 | setting: setting ?? this.setting, 217 | ); 218 | } 219 | 220 | Map toMap() { 221 | return { 222 | 'rating': rating.toMap(), 223 | 'setting': setting.toMap(), 224 | }; 225 | } 226 | 227 | factory SafetyFeedback.fromMap(Map map) { 228 | return SafetyFeedback( 229 | rating: SafetyRating.fromMap(map['rating']), 230 | setting: SafetySetting.fromMap(map['setting']), 231 | ); 232 | } 233 | 234 | String toJson() => json.encode(toMap()); 235 | 236 | factory SafetyFeedback.fromJson(String source) => 237 | SafetyFeedback.fromMap(json.decode(source)); 238 | 239 | @override 240 | String toString() => 'SafetyFeedback(rating: $rating, setting: $setting)'; 241 | } 242 | 243 | // Safety rating for a piece of content. 244 | // 245 | // The safety rating contains the category of harm and the 246 | // harm probability level in that category for a piece of content. 247 | // Content is classified for safety across a number of 248 | // harm categories and the probability of the harm classification is included 249 | // here. 250 | 251 | class SafetyRating { 252 | // Required. The category for this rating. 253 | final HarmCategory category; 254 | 255 | // Required. The probability of harm for this content. 256 | final HarmProbability probability; 257 | 258 | SafetyRating({ 259 | required this.category, 260 | required this.probability, 261 | }); 262 | 263 | @override 264 | bool operator ==(Object other) { 265 | if (identical(this, other)) return true; 266 | 267 | return other is SafetyRating && 268 | other.category == category && 269 | other.probability == probability; 270 | } 271 | 272 | @override 273 | int get hashCode => category.hashCode ^ probability.hashCode; 274 | 275 | SafetyRating copyWith({ 276 | HarmCategory? category, 277 | HarmProbability? probability, 278 | }) { 279 | return SafetyRating( 280 | category: category ?? this.category, 281 | probability: probability ?? this.probability, 282 | ); 283 | } 284 | 285 | Map toMap() { 286 | return { 287 | 'category': category.value, 288 | 'probability': probability.value, 289 | }; 290 | } 291 | 292 | factory SafetyRating.fromMap(Map map) { 293 | return SafetyRating( 294 | category: HarmCategory.fromString(map['category']), 295 | probability: HarmProbability.fromString(map['probability']), 296 | ); 297 | } 298 | 299 | String toJson() => json.encode(toMap()); 300 | 301 | factory SafetyRating.fromJson(String source) => 302 | SafetyRating.fromMap(json.decode(source)); 303 | 304 | @override 305 | String toString() => 306 | 'SafetyRating(category: $category, probability: $probability)'; 307 | } 308 | 309 | // Safety setting, affecting the safety-blocking behavior. 310 | // 311 | // Passing a safety setting for a category changes the allowed proability that 312 | // content is blocked. 313 | 314 | class SafetySetting { 315 | // Required. The category for this setting. 316 | final HarmCategory category; 317 | 318 | // Required. Controls the probability threshold at which harm is blocked. 319 | final HarmBlockThreshold threshold; 320 | 321 | SafetySetting({ 322 | required this.category, 323 | required this.threshold, 324 | }); 325 | 326 | @override 327 | bool operator ==(Object other) { 328 | if (identical(this, other)) return true; 329 | 330 | return other is SafetySetting && 331 | other.category == category && 332 | other.threshold == threshold; 333 | } 334 | 335 | @override 336 | int get hashCode => category.hashCode ^ threshold.hashCode; 337 | 338 | SafetySetting copyWith({ 339 | HarmCategory? category, 340 | HarmBlockThreshold? threshold, 341 | }) { 342 | return SafetySetting( 343 | category: category ?? this.category, 344 | threshold: threshold ?? this.threshold, 345 | ); 346 | } 347 | 348 | Map toMap() { 349 | return { 350 | 'category': category.value, 351 | 'threshold': threshold.value, 352 | }; 353 | } 354 | 355 | factory SafetySetting.fromMap(Map map) { 356 | return SafetySetting( 357 | category: HarmCategory.fromString(map['category']), 358 | threshold: HarmBlockThreshold.fromString(map['threshold']), 359 | ); 360 | } 361 | 362 | String toJson() => json.encode(toMap()); 363 | 364 | factory SafetySetting.fromJson(String source) => 365 | SafetySetting.fromMap(json.decode(source)); 366 | 367 | @override 368 | String toString() => 369 | 'SafetySetting(category: $category, threshold: $threshold)'; 370 | } 371 | -------------------------------------------------------------------------------- /lib/src/dto/text_service.dto.dart: -------------------------------------------------------------------------------- 1 | import 'dart:convert'; 2 | import 'dart:core'; 3 | 4 | import 'package:palm_api/src/dto/citation.dto.dart'; 5 | import 'package:palm_api/src/dto/safety.dto.dart'; 6 | import 'package:palm_api/src/helpers/deep_collection_equality.dart'; 7 | 8 | class GenerateTextRequest { 9 | // Required. The model name to use with the format name=models/{model}. 10 | final String model; 11 | 12 | // Required. The free-form input text given to the model as a prompt. 13 | // 14 | // Given a prompt, the model will generate a TextCompletion response it 15 | // predicts as the completion of the input text. 16 | final TextPrompt prompt; 17 | 18 | // Controls the randomness of the output. 19 | // Note: The default value varies by model, see the `Model.temperature` 20 | // attribute of the `Model` returned the `getModel` function. 21 | // 22 | // Values can range from [0.0,1.0], 23 | // inclusive. A value closer to 1.0 will produce responses that are more 24 | // varied and creative, while a value closer to 0.0 will typically result in 25 | // more straightforward responses from the model. 26 | final double? temperature; 27 | 28 | // Number of generated responses to return. 29 | // 30 | // This value must be between [1, 8], inclusive. If unset, this will default 31 | // to 1. 32 | final int? candidateCount; 33 | 34 | // The maximum number of tokens to include in a candidate. 35 | // 36 | // If unset, this will default to 64. 37 | final int? maxOutputTokens; 38 | 39 | // The maximum cumulative probability of tokens to consider when sampling. 40 | // 41 | // The model uses combined Top-k and nucleus sampling. 42 | // 43 | // Tokens are sorted based on their assigned probabilities so that only the 44 | // most liekly tokens are considered. Top-k sampling directly limits the 45 | // maximum number of tokens to consider, while Nucleus sampling limits number 46 | // of tokens based on the cumulative probability. 47 | // 48 | // Note: The default value varies by model, see the `Model.top_p` 49 | // attribute of the `Model` returned the `getModel` function. 50 | final double? topP; 51 | 52 | // The maximum number of tokens to consider when sampling. 53 | // 54 | // The model uses combined Top-k and nucleus sampling. 55 | // 56 | // Top-k sampling considers the set of `top_k` most probable tokens. 57 | // Defaults to 40. 58 | // 59 | // Note: The default value varies by model, see the `Model.top_k` 60 | // attribute of the `Model` returned the `getModel` function. 61 | final int? topK; 62 | 63 | // A list of unique `SafetySetting` instances for blocking unsafe content. 64 | // 65 | // that will be enforced on the `GenerateTextRequest.prompt` and 66 | // `GenerateTextResponse.candidates`. There should not be more than one 67 | // setting for each `SafetyCategory` type. The API will block any prompts and 68 | // responses that fail to meet the thresholds set by these settings. This list 69 | // overrides the default settings for each `SafetyCategory` specified in the 70 | // safety_settings. If there is no `SafetySetting` for a given 71 | // `SafetyCategory` provided in the list, the API will use the default safety 72 | // setting for that category. 73 | final List? safetySettings; 74 | 75 | // The set of character sequences (up to 5) that will stop output generation. 76 | // If specified, the API will stop at the first appearance of a stop 77 | // sequence. The stop sequence will not be included as part of the response. 78 | final List? stopSequences; 79 | 80 | GenerateTextRequest({ 81 | required this.model, 82 | required this.prompt, 83 | this.temperature, 84 | this.candidateCount, 85 | this.maxOutputTokens, 86 | this.topP, 87 | this.topK, 88 | this.safetySettings, 89 | this.stopSequences, 90 | }); 91 | 92 | @override 93 | String toString() { 94 | return 'GenerateTextRequest(model: $model, prompt: $prompt, temperature: $temperature, candidateCount: $candidateCount, maxOutputTokens: $maxOutputTokens, topP: $topP, topK: $topK, safetySettings: $safetySettings, stopSequences: $stopSequences)'; 95 | } 96 | 97 | @override 98 | bool operator ==(Object other) { 99 | if (identical(this, other)) return true; 100 | final listEquals = const DeepCollectionEquality().equals; 101 | 102 | return other is GenerateTextRequest && 103 | other.model == model && 104 | other.prompt == prompt && 105 | other.temperature == temperature && 106 | other.candidateCount == candidateCount && 107 | other.maxOutputTokens == maxOutputTokens && 108 | other.topP == topP && 109 | other.topK == topK && 110 | listEquals(other.safetySettings, safetySettings) && 111 | listEquals(other.stopSequences, stopSequences); 112 | } 113 | 114 | @override 115 | int get hashCode { 116 | return model.hashCode ^ 117 | prompt.hashCode ^ 118 | temperature.hashCode ^ 119 | candidateCount.hashCode ^ 120 | maxOutputTokens.hashCode ^ 121 | topP.hashCode ^ 122 | topK.hashCode ^ 123 | safetySettings.hashCode ^ 124 | stopSequences.hashCode; 125 | } 126 | 127 | GenerateTextRequest copyWith({ 128 | String? model, 129 | TextPrompt? prompt, 130 | double? temperature, 131 | int? candidateCount, 132 | int? maxOutputTokens, 133 | double? topP, 134 | int? topK, 135 | List? safetySettings, 136 | List? stopSequences, 137 | }) { 138 | return GenerateTextRequest( 139 | model: model ?? this.model, 140 | prompt: prompt ?? this.prompt, 141 | temperature: temperature ?? this.temperature, 142 | candidateCount: candidateCount ?? this.candidateCount, 143 | maxOutputTokens: maxOutputTokens ?? this.maxOutputTokens, 144 | topP: topP ?? this.topP, 145 | topK: topK ?? this.topK, 146 | safetySettings: safetySettings ?? this.safetySettings, 147 | stopSequences: stopSequences ?? this.stopSequences, 148 | ); 149 | } 150 | 151 | Map toMap() { 152 | return { 153 | 'model': model, 154 | 'prompt': prompt.toMap(), 155 | 'temperature': temperature, 156 | 'candidateCount': candidateCount, 157 | 'maxOutputTokens': maxOutputTokens, 158 | 'topP': topP, 159 | 'topK': topK, 160 | 'safetySettings': safetySettings?.map((x) => x.toMap()).toList(), 161 | 'stopSequences': stopSequences, 162 | }; 163 | } 164 | 165 | factory GenerateTextRequest.fromMap(Map map) { 166 | return GenerateTextRequest( 167 | model: map['model'] ?? '', 168 | prompt: TextPrompt.fromMap(map['prompt']), 169 | temperature: map['temperature']?.toDouble(), 170 | candidateCount: map['candidateCount']?.toInt(), 171 | maxOutputTokens: map['maxOutputTokens']?.toInt(), 172 | topP: map['topP']?.toDouble(), 173 | topK: map['topK']?.toInt(), 174 | safetySettings: map['safetySettings'] != null 175 | ? List.from( 176 | map['safetySettings']?.map((x) => SafetySetting.fromMap(x))) 177 | : null, 178 | stopSequences: List.from(map['stopSequences']), 179 | ); 180 | } 181 | 182 | String toJson() => json.encode(toMap()); 183 | 184 | factory GenerateTextRequest.fromJson(String source) => 185 | GenerateTextRequest.fromMap(json.decode(source)); 186 | } 187 | 188 | class GenerateTextResponse { 189 | // Candidate responses from the model. 190 | final List candidates; 191 | 192 | // A set of content filtering metadata for the prompt and response 193 | // text. 194 | // 195 | // This indicates which `SafetyCategory`(s) blocked a 196 | // candidate from this response, the lowest `HarmProbability` 197 | // that triggered a block, and the HarmThreshold setting for that category. 198 | // This indicates the smallest change to the `SafetySettings` that would be 199 | // necessary to unblock at least 1 response. 200 | // 201 | // The blocking is configured by the `SafetySettings` in the request (or the 202 | // default `SafetySettings` of the API). 203 | final List filters; 204 | 205 | // Returns any safety feedback related to content filtering. 206 | final List safetyFeedback; 207 | 208 | GenerateTextResponse({ 209 | required this.candidates, 210 | required this.filters, 211 | required this.safetyFeedback, 212 | }); 213 | 214 | @override 215 | String toString() => 216 | 'GenerateTextResponse(candidates: $candidates, filters: $filters, safetyFeedback: $safetyFeedback)'; 217 | 218 | @override 219 | bool operator ==(Object other) { 220 | if (identical(this, other)) return true; 221 | final listEquals = const DeepCollectionEquality().equals; 222 | 223 | return other is GenerateTextResponse && 224 | listEquals(other.candidates, candidates) && 225 | listEquals(other.filters, filters) && 226 | listEquals(other.safetyFeedback, safetyFeedback); 227 | } 228 | 229 | @override 230 | int get hashCode => 231 | candidates.hashCode ^ filters.hashCode ^ safetyFeedback.hashCode; 232 | 233 | GenerateTextResponse copyWith({ 234 | List? candidates, 235 | List? filters, 236 | List? safetyFeedback, 237 | }) { 238 | return GenerateTextResponse( 239 | candidates: candidates ?? this.candidates, 240 | filters: filters ?? this.filters, 241 | safetyFeedback: safetyFeedback ?? this.safetyFeedback, 242 | ); 243 | } 244 | 245 | Map toMap() { 246 | return { 247 | 'candidates': candidates.map((x) => x.toMap()).toList(), 248 | 'filters': filters.map((x) => x.toMap()).toList(), 249 | 'safetyFeedback': safetyFeedback.map((x) => x.toMap()).toList(), 250 | }; 251 | } 252 | 253 | factory GenerateTextResponse.fromMap(Map map) { 254 | return GenerateTextResponse( 255 | candidates: List.from( 256 | map['candidates']?.map((x) => TextCompletion.fromMap(x)) ?? []), 257 | filters: List.from( 258 | map['filters']?.map((x) => ContentFilter.fromMap(x)) ?? []), 259 | safetyFeedback: List.from( 260 | map['safetyFeedback']?.map((x) => SafetyFeedback.fromMap(x)) ?? []), 261 | ); 262 | } 263 | 264 | String toJson() => json.encode(toMap()); 265 | 266 | factory GenerateTextResponse.fromJson(String source) => 267 | GenerateTextResponse.fromMap(json.decode(source)); 268 | } 269 | 270 | class TextPrompt { 271 | // Required. The prompt text. 272 | final String text; 273 | 274 | TextPrompt({ 275 | required this.text, 276 | }); 277 | 278 | @override 279 | String toString() => 'TextPrompt(text: $text)'; 280 | 281 | @override 282 | bool operator ==(Object other) { 283 | if (identical(this, other)) return true; 284 | 285 | return other is TextPrompt && other.text == text; 286 | } 287 | 288 | @override 289 | int get hashCode => text.hashCode; 290 | 291 | TextPrompt copyWith({ 292 | String? text, 293 | }) { 294 | return TextPrompt( 295 | text: text ?? this.text, 296 | ); 297 | } 298 | 299 | Map toMap() { 300 | return { 301 | 'text': text, 302 | }; 303 | } 304 | 305 | factory TextPrompt.fromMap(Map map) { 306 | return TextPrompt( 307 | text: map['text'] ?? '', 308 | ); 309 | } 310 | 311 | String toJson() => json.encode(toMap()); 312 | 313 | factory TextPrompt.fromJson(String source) => 314 | TextPrompt.fromMap(json.decode(source)); 315 | } 316 | 317 | class TextCompletion { 318 | // Output only. The generated text returned from the model. 319 | final String output; 320 | 321 | // Ratings for the safety of a response. 322 | // 323 | // There is at most one rating per category. 324 | final List safetyRatings; 325 | 326 | // Output only. Citation information for model-generated `output` in this 327 | // `TextCompletion`. 328 | // 329 | // This field may be populated with attribution information for any text 330 | // included in the `output`. 331 | final CitationMetadata? citationMetadata; 332 | 333 | TextCompletion({ 334 | required this.output, 335 | required this.safetyRatings, 336 | this.citationMetadata, 337 | }); 338 | 339 | @override 340 | String toString() => 341 | 'TextCompletion(output: $output, safetyRatings: $safetyRatings, citationMetadata: $citationMetadata)'; 342 | 343 | @override 344 | bool operator ==(Object other) { 345 | if (identical(this, other)) return true; 346 | final listEquals = const DeepCollectionEquality().equals; 347 | 348 | return other is TextCompletion && 349 | other.output == output && 350 | listEquals(other.safetyRatings, safetyRatings) && 351 | other.citationMetadata == citationMetadata; 352 | } 353 | 354 | @override 355 | int get hashCode => 356 | output.hashCode ^ safetyRatings.hashCode ^ citationMetadata.hashCode; 357 | 358 | TextCompletion copyWith({ 359 | String? output, 360 | List? safetyRatings, 361 | CitationMetadata? citationMetadata, 362 | }) { 363 | return TextCompletion( 364 | output: output ?? this.output, 365 | safetyRatings: safetyRatings ?? this.safetyRatings, 366 | citationMetadata: citationMetadata ?? this.citationMetadata, 367 | ); 368 | } 369 | 370 | Map toMap() { 371 | return { 372 | 'output': output, 373 | 'safetyRatings': safetyRatings.map((x) => x.toMap()).toList(), 374 | 'citationMetadata': citationMetadata?.toMap(), 375 | }; 376 | } 377 | 378 | factory TextCompletion.fromMap(Map map) { 379 | return TextCompletion( 380 | output: map['output'] ?? '', 381 | safetyRatings: List.from( 382 | map['safetyRatings']?.map((x) => SafetyRating.fromMap(x))), 383 | citationMetadata: map['citationMetadata'] != null 384 | ? CitationMetadata.fromMap(map['citationMetadata']) 385 | : null, 386 | ); 387 | } 388 | 389 | String toJson() => json.encode(toMap()); 390 | 391 | factory TextCompletion.fromJson(String source) => 392 | TextCompletion.fromMap(json.decode(source)); 393 | } 394 | 395 | class EmbedTextRequest { 396 | // Required. The model name to use with the format model=models/{model}. 397 | final String model; 398 | 399 | // Required. The free-form input text that the model will turn into an 400 | // embedding. 401 | final String text; 402 | 403 | EmbedTextRequest({ 404 | required this.model, 405 | required this.text, 406 | }); 407 | 408 | @override 409 | String toString() => 'EmbedTextRequest(model: $model, text: $text)'; 410 | 411 | @override 412 | bool operator ==(Object other) { 413 | if (identical(this, other)) return true; 414 | 415 | return other is EmbedTextRequest && 416 | other.model == model && 417 | other.text == text; 418 | } 419 | 420 | @override 421 | int get hashCode => model.hashCode ^ text.hashCode; 422 | 423 | EmbedTextRequest copyWith({ 424 | String? model, 425 | String? text, 426 | }) { 427 | return EmbedTextRequest( 428 | model: model ?? this.model, 429 | text: text ?? this.text, 430 | ); 431 | } 432 | 433 | Map toMap() { 434 | return { 435 | 'model': model, 436 | 'text': text, 437 | }; 438 | } 439 | 440 | factory EmbedTextRequest.fromMap(Map map) { 441 | return EmbedTextRequest( 442 | model: map['model'] ?? '', 443 | text: map['text'] ?? '', 444 | ); 445 | } 446 | 447 | String toJson() => json.encode(toMap()); 448 | 449 | factory EmbedTextRequest.fromJson(String source) => 450 | EmbedTextRequest.fromMap(json.decode(source)); 451 | } 452 | 453 | class EmbedTextResponse { 454 | // Output only. The embedding generated from the input text. 455 | final Embedding? embedding; 456 | 457 | EmbedTextResponse({ 458 | this.embedding, 459 | }); 460 | 461 | @override 462 | String toString() => 'EmbedTextResponse(embedding: $embedding)'; 463 | 464 | @override 465 | bool operator ==(Object other) { 466 | if (identical(this, other)) return true; 467 | 468 | return other is EmbedTextResponse && other.embedding == embedding; 469 | } 470 | 471 | @override 472 | int get hashCode => embedding.hashCode; 473 | 474 | EmbedTextResponse copyWith({ 475 | Embedding? embedding, 476 | }) { 477 | return EmbedTextResponse( 478 | embedding: embedding ?? this.embedding, 479 | ); 480 | } 481 | 482 | Map toMap() { 483 | return { 484 | 'embedding': embedding?.toMap(), 485 | }; 486 | } 487 | 488 | factory EmbedTextResponse.fromMap(Map map) { 489 | return EmbedTextResponse( 490 | embedding: 491 | map['embedding'] != null ? Embedding.fromMap(map['embedding']) : null, 492 | ); 493 | } 494 | 495 | String toJson() => json.encode(toMap()); 496 | 497 | factory EmbedTextResponse.fromJson(String source) => 498 | EmbedTextResponse.fromMap(json.decode(source)); 499 | } 500 | 501 | class Embedding { 502 | // The embedding values. 503 | final List value; 504 | 505 | Embedding({ 506 | required this.value, 507 | }); 508 | 509 | @override 510 | String toString() => 'Embedding(value: $value)'; 511 | 512 | @override 513 | bool operator ==(Object other) { 514 | if (identical(this, other)) return true; 515 | final listEquals = const DeepCollectionEquality().equals; 516 | 517 | return other is Embedding && listEquals(other.value, value); 518 | } 519 | 520 | @override 521 | int get hashCode => value.hashCode; 522 | 523 | Embedding copyWith({ 524 | List? value, 525 | }) { 526 | return Embedding( 527 | value: value ?? this.value, 528 | ); 529 | } 530 | 531 | Map toMap() { 532 | return { 533 | 'value': value, 534 | }; 535 | } 536 | 537 | factory Embedding.fromMap(Map map) { 538 | return Embedding( 539 | value: List.from(map['value']), 540 | ); 541 | } 542 | 543 | String toJson() => json.encode(toMap()); 544 | 545 | factory Embedding.fromJson(String source) => 546 | Embedding.fromMap(json.decode(source)); 547 | } 548 | -------------------------------------------------------------------------------- /lib/src/helpers/client.dart: -------------------------------------------------------------------------------- 1 | import 'dart:convert'; 2 | 3 | import 'package:http/http.dart' as http; 4 | import 'package:palm_api/src/helpers/exception.dart'; 5 | import 'package:palm_api/src/helpers/palm_api_version.dart'; 6 | 7 | const _defaultAPIEndpoint = 'https://generativelanguage.googleapis.com'; 8 | 9 | class PalmClient { 10 | final String _baseUrl; 11 | 12 | http.Client httpClient; // Instantiate the HTTP client 13 | 14 | PalmClient({ 15 | /// Change the baseUrl for proxy implementation 16 | String? baseUrl, 17 | http.Client? client, 18 | /// Select API version of PaLM API 19 | PalmApiVersion apiVersion = PalmApiVersion.v1Beta2, 20 | }) : httpClient = client ?? http.Client(), 21 | _baseUrl = baseUrl ?? '$_defaultAPIEndpoint/${apiVersion.name}'; 22 | 23 | /// General method to make a GET request. 24 | /// 25 | /// [path] is appended to the base URL to form the full URL to connect to. 26 | /// 27 | /// Returns the JSON body of the response, parsed as a Map. 28 | Future> get(String path) async { 29 | Uri url = Uri.parse('$_baseUrl/$path'); 30 | var response = await httpClient.get(url); 31 | 32 | // If the request was successful, parse and return the JSON response body. 33 | if (response.statusCode == 200) { 34 | return json.decode(response.body); 35 | } 36 | 37 | // If the server responds with another status code, we throw an 38 | // exception with the status code and the error body. 39 | throw PalmApiException(response.body, response.statusCode); 40 | } 41 | 42 | /// Method to make POST request. 43 | /// 44 | /// [path] is appended to the base URL and [body] is the request payload. 45 | /// 46 | /// Returns the JSON body of the response. 47 | Future> post( 48 | String path, 49 | Map body, 50 | ) async { 51 | Uri url = Uri.parse('$_baseUrl/$path'); 52 | 53 | var response = await httpClient.post( 54 | url, 55 | headers: {"Content-Type": "application/json"}, 56 | body: json.encode(body), 57 | ); 58 | 59 | // If the server returns a OK response, parse and return the JSON response body. 60 | if (response.statusCode == 200) { 61 | return json.decode(response.body); 62 | } 63 | 64 | throw PalmApiException(response.body, response.statusCode); 65 | } 66 | 67 | /// Don't forget to close the httpClient when it's no longer used. 68 | void dispose() { 69 | httpClient.close(); 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /lib/src/helpers/deep_collection_equality.dart: -------------------------------------------------------------------------------- 1 | /// A class to perform deep equality checks on collections. 2 | /// 3 | /// This class compares collections (List, Set, Map) recursively for equality. 4 | /// It can also compute a hash code for a collection. 5 | class DeepCollectionEquality { 6 | const DeepCollectionEquality(); 7 | 8 | /// Compares [object1] and [object2] deeply for equality. 9 | /// 10 | /// Returns true if both objects are deeply equal; otherwise, returns false. 11 | bool equals(Object? object1, Object? object2) { 12 | if (identical(object1, object2)) { 13 | return true; 14 | } 15 | if (object1 is List && object2 is List) { 16 | return _compareLists(object1, object2); 17 | } 18 | if (object1 is Set && object2 is Set) { 19 | return _compareSets(object1, object2); 20 | } 21 | if (object1 is Map && object2 is Map) { 22 | return _compareMaps(object1, object2); 23 | } 24 | 25 | if (object1?.runtimeType == object2?.runtimeType) { 26 | return object1 == object2; 27 | } 28 | 29 | return false; 30 | } 31 | 32 | /// Computes a hash code for [object]. 33 | /// 34 | /// If [object] is a collection (List, Set, Map), computes a hash code recursively. 35 | /// Otherwise, returns the hash code of the object. 36 | int hash(Object? object) { 37 | if (object == null) { 38 | return 0; 39 | } 40 | 41 | var hashCode = 0; 42 | if (object is List) { 43 | for (var element in object) { 44 | hashCode = 31 * hashCode + hash(element); 45 | } 46 | } else if (object is Set) { 47 | for (var element in object) { 48 | hashCode = 31 * hashCode + hash(element); 49 | } 50 | } else if (object is Map) { 51 | for (var key in object.keys) { 52 | hashCode = 31 * hashCode + hash(key); 53 | hashCode = 31 * hashCode + hash(object[key]); 54 | } 55 | } else { 56 | hashCode = object.hashCode; 57 | } 58 | 59 | return hashCode; 60 | } 61 | 62 | // Compares two lists [list1] and [list2] deeply for equality. 63 | /// 64 | /// Returns true if both lists are deeply equal; otherwise, returns false. 65 | bool _compareLists(List list1, List list2) { 66 | if (list1.length != list2.length) { 67 | return false; 68 | } 69 | for (var i = 0; i < list1.length; i++) { 70 | if (!equals(list1[i], list2[i])) { 71 | return false; 72 | } 73 | } 74 | 75 | return true; 76 | } 77 | 78 | /// Compares two sets [set1] and [set2] deeply for equality. 79 | /// 80 | /// Returns true if both sets are deeply equal; otherwise, returns false. 81 | bool _compareSets(Set set1, Set set2) { 82 | if (set1.length != set2.length) { 83 | return false; 84 | } 85 | for (var element1 in set1) { 86 | bool isEqual = false; 87 | for (var element2 in set2) { 88 | if (equals(element1, element2)) { 89 | isEqual = true; 90 | break; 91 | } 92 | } 93 | if (!isEqual) { 94 | return false; 95 | } 96 | } 97 | 98 | return true; 99 | } 100 | 101 | /// Compares two maps [map1] and [map2] deeply for equality. 102 | /// 103 | /// Returns true if both maps are deeply equal; otherwise, returns false. 104 | bool _compareMaps(Map map1, Map map2) { 105 | if (map1.length != map2.length) { 106 | return false; 107 | } 108 | for (var key in map1.keys) { 109 | if (!map2.containsKey(key) || !equals(map1[key], map2[key])) { 110 | return false; 111 | } 112 | } 113 | 114 | return true; 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /lib/src/helpers/exception.dart: -------------------------------------------------------------------------------- 1 | class PalmApiException implements Exception { 2 | final String message; 3 | final int code; 4 | 5 | PalmApiException(this.message, this.code); 6 | 7 | @override 8 | String toString() => 'PalmApiException(code: $code, message: $message)'; 9 | } 10 | 11 | class MethodNotSupportedException implements Exception { 12 | String model; 13 | String methodName; 14 | List supportedMethods; 15 | 16 | MethodNotSupportedException({ 17 | required this.model, 18 | required this.methodName, 19 | required this.supportedMethods, 20 | }); 21 | 22 | String get message => 23 | 'Method $methodName is not supported for model $model. Supported methods are: $supportedMethods'; 24 | 25 | @override 26 | String toString() => 'GenerationMethodNotSupported(message: $message)'; 27 | } 28 | -------------------------------------------------------------------------------- /lib/src/helpers/export.dart: -------------------------------------------------------------------------------- 1 | export './client.dart'; 2 | export './exception.dart'; 3 | export 'palm_model_variations.dart'; 4 | -------------------------------------------------------------------------------- /lib/src/helpers/palm_api_version.dart: -------------------------------------------------------------------------------- 1 | enum PalmApiVersion { 2 | v1Beta2('v1beta2'), 3 | v1Beta3('v1beta3'); 4 | 5 | const PalmApiVersion(this.name); 6 | 7 | final String name; 8 | } 9 | -------------------------------------------------------------------------------- /lib/src/helpers/palm_model_variations.dart: -------------------------------------------------------------------------------- 1 | import 'package:palm_api/src/dto/model.dto.dart'; 2 | 3 | class PalmModel { 4 | const PalmModel._(); 5 | 6 | static Model fromModelName(String modelName) { 7 | switch (modelName) { 8 | case 'chat-bison-001': 9 | return chatBison001; 10 | case 'text-bison-001': 11 | return textBison001; 12 | case 'embedding-gecko-001': 13 | return embeddingGecko001; 14 | default: 15 | throw ArgumentError('Model $modelName not found'); 16 | } 17 | } 18 | 19 | static Model chatBison001 = Model.fromMap( 20 | { 21 | "name": "models/chat-bison-001", 22 | "baseModelId": "", 23 | "version": "001", 24 | "displayName": "Chat Bison", 25 | "description": "Chat-optimized generative language model.", 26 | "inputTokenLimit": 4096, 27 | "outputTokenLimit": 1024, 28 | "supportedGenerationMethods": ["generateMessage", "countMessageTokens"], 29 | "temperature": 0.25, 30 | "topP": 0.95, 31 | "topK": 40 32 | }, 33 | ); 34 | static Model textBison001 = Model.fromMap({ 35 | "name": "models/text-bison-001", 36 | "baseModelId": "", 37 | "version": "001", 38 | "displayName": "Text Bison", 39 | "description": "Model targeted for text generation.", 40 | "inputTokenLimit": 8196, 41 | "outputTokenLimit": 1024, 42 | "supportedGenerationMethods": ["generateText", "countTextTokens"], 43 | "temperature": 0.7, 44 | "topP": 0.95, 45 | "topK": 40 46 | }); 47 | static Model embeddingGecko001 = Model.fromMap({ 48 | "name": "models/embedding-gecko-001", 49 | "baseModelId": "", 50 | "version": "001", 51 | "displayName": "Embedding Gecko", 52 | "description": "Obtain a distributed representation of a text.", 53 | "inputTokenLimit": 1024, 54 | "outputTokenLimit": 1, 55 | "supportedGenerationMethods": ["embedText"], 56 | "temperature": null, 57 | "topP": null, 58 | "topK": null 59 | }); 60 | } 61 | -------------------------------------------------------------------------------- /lib/src/palm_api_base.dart: -------------------------------------------------------------------------------- 1 | export './dto/export.dart'; 2 | export './helpers/export.dart'; 3 | export './services/export.dart'; 4 | -------------------------------------------------------------------------------- /lib/src/services/base_service.dart: -------------------------------------------------------------------------------- 1 | import 'package:palm_api/src/helpers/client.dart'; 2 | 3 | abstract class BaseService { 4 | final PalmClient apiClient; 5 | 6 | final String _apiKey; 7 | 8 | BaseService({ 9 | required String apiKey, 10 | PalmClient? apiClient, 11 | }) : apiClient = apiClient ?? PalmClient(), 12 | _apiKey = apiKey; 13 | 14 | String get keyParam => '?key=$_apiKey'; 15 | 16 | /// Returns a function that builds a model path with the given model name. 17 | /// Make sure to add it as models/:modelId: prefix to the path. 18 | String Function(String) getModelPathBuilder(String model) { 19 | var modelPath = model; 20 | if (!modelPath.startsWith('models/')) { 21 | modelPath = 'models/$model'; 22 | } 23 | return (String path) => '$modelPath$path$keyParam'; 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /lib/src/services/discuss_service.dart: -------------------------------------------------------------------------------- 1 | import 'package:palm_api/src/dto/discuss_service.dto.dart'; 2 | import 'package:palm_api/src/services/base_service.dart'; 3 | 4 | /// DiscussService deals with all chat related API endpoints. It provides 5 | /// an organized way of interactively working with the language model API. 6 | class DiscussService extends BaseService { 7 | DiscussService({ 8 | required super.apiKey, 9 | super.apiClient, 10 | }); 11 | 12 | /// Generate a response for a given text using a specific model. 13 | /// 14 | /// Throws an ApiException if the request fails. 15 | /// Otherwise, returns a GenerateMessageResponse. 16 | Future generateMessage({ 17 | required String model, 18 | required MessagePrompt prompt, 19 | double? temperature, 20 | int? candidateCount, 21 | double? topP, 22 | int? topK, 23 | }) async { 24 | final modelPathBuilder = getModelPathBuilder(model); 25 | final requestParams = GenerateMessageRequest( 26 | model: model, 27 | prompt: prompt, 28 | temperature: temperature, 29 | candidateCount: candidateCount, 30 | topP: topP, 31 | topK: topK, 32 | ); 33 | 34 | final response = await apiClient.post( 35 | modelPathBuilder(':generateMessage'), 36 | requestParams.toMap(), 37 | ); 38 | 39 | return GenerateMessageResponse.fromMap(response); 40 | } 41 | 42 | /// Count message tokens in a string. 43 | /// 44 | /// Throws an ApiException if the request fails. 45 | /// Otherwise, returns a CountMessageTokensResponse. 46 | Future countMessageTokens({ 47 | required String model, 48 | required String text, 49 | }) async { 50 | final modelPathBuilder = getModelPathBuilder(model); 51 | final request = CountMessageTokensRequest( 52 | model: model, 53 | prompt: MessagePrompt(messages: [Message(content: text)]), 54 | ); 55 | 56 | final response = await apiClient.post( 57 | modelPathBuilder(':countMessageTokens'), 58 | request.toMap(), 59 | ); 60 | 61 | return CountMessageTokensResponse.fromMap(response); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /lib/src/services/export.dart: -------------------------------------------------------------------------------- 1 | export './discuss_service.dart'; 2 | export './model_service.dart'; 3 | export './text_service.dart'; 4 | -------------------------------------------------------------------------------- /lib/src/services/generative_language_service.dart: -------------------------------------------------------------------------------- 1 | import 'package:palm_api/src/dto/discuss_service.dto.dart'; 2 | import 'package:palm_api/src/dto/model.dto.dart'; 3 | import 'package:palm_api/src/dto/text_service.dto.dart'; 4 | import 'package:palm_api/src/helpers/exception.dart'; 5 | import 'package:palm_api/src/services/base_service.dart'; 6 | import 'package:palm_api/src/services/discuss_service.dart'; 7 | import 'package:palm_api/src/services/text_service.dart'; 8 | 9 | class GenerativeLanguage extends BaseService { 10 | final Model model; 11 | final TextService _textService; 12 | final DiscussService _discussionService; 13 | 14 | GenerativeLanguage({ 15 | required this.model, 16 | required super.apiKey, 17 | super.apiClient, 18 | }) : _textService = TextService( 19 | apiKey: apiKey, 20 | apiClient: apiClient, 21 | ), 22 | _discussionService = DiscussService( 23 | apiKey: apiKey, 24 | apiClient: apiClient, 25 | ); 26 | 27 | void _checkMethodSupportAndThrow(String methodName) { 28 | final supportedMethods = model.supportedGenerationMethods; 29 | if (!supportedMethods.contains(methodName)) { 30 | throw MethodNotSupportedException( 31 | model: model.name, 32 | methodName: methodName, 33 | supportedMethods: supportedMethods, 34 | ); 35 | } 36 | } 37 | 38 | Future embedText( 39 | String text, 40 | ) async { 41 | _checkMethodSupportAndThrow('embedText'); 42 | return _textService.embedText( 43 | model: model.name, 44 | text: text, 45 | ); 46 | } 47 | 48 | Future generateMessage( 49 | GenerateMessageRequest params, 50 | ) async { 51 | _checkMethodSupportAndThrow('generateMessage'); 52 | return _discussionService.generateMessage( 53 | model: model.name, 54 | prompt: params.prompt, 55 | temperature: params.temperature, 56 | candidateCount: params.candidateCount, 57 | topP: params.topP, 58 | topK: params.topK, 59 | ); 60 | } 61 | 62 | Future countMessageTokens({ 63 | required String text, 64 | }) async { 65 | _checkMethodSupportAndThrow('countMessageTokens'); 66 | return _discussionService.countMessageTokens( 67 | model: model.name, 68 | text: text, 69 | ); 70 | } 71 | 72 | Future generateText(GenerateTextRequest params) async { 73 | _checkMethodSupportAndThrow('generateText'); 74 | return _textService.generateText( 75 | model: model.name, 76 | prompt: params.prompt, 77 | temperature: params.temperature, 78 | candidateCount: params.candidateCount, 79 | maxOutputTokens: params.maxOutputTokens, 80 | topP: params.topP, 81 | topK: params.topK, 82 | safetySettings: params.safetySettings, 83 | stopSequences: params.stopSequences, 84 | ); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /lib/src/services/model_service.dart: -------------------------------------------------------------------------------- 1 | import 'package:palm_api/src/dto/model.dto.dart'; 2 | import 'package:palm_api/src/services/base_service.dart'; 3 | 4 | /// ModelService deals with all model related API endpoints, providing a higher 5 | /// level and organized way of managing models in the API. 6 | class ModelService extends BaseService { 7 | ModelService({ 8 | required super.apiKey, 9 | super.apiClient, 10 | }); 11 | 12 | /// Get information about a specific model. 13 | /// 14 | /// Throws an ApiException if the request fails. 15 | /// Otherwise, returns a Model containing the model details. 16 | Future getModel(String modelId) async { 17 | final response = await apiClient.get('models/$modelId$keyParam'); 18 | 19 | return Model.fromMap(response); 20 | } 21 | 22 | /// Lists all the available models. 23 | /// 24 | /// Throws an ApiException if the request fails. 25 | /// Otherwise, returns a list of Model instances. 26 | Future> listModels() async { 27 | final response = await apiClient.get('models$keyParam'); 28 | 29 | final List models = (response['models'] as List) 30 | .map((model) => Model.fromMap(model)) 31 | .toList(); 32 | 33 | return models; 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /lib/src/services/text_service.dart: -------------------------------------------------------------------------------- 1 | import 'package:palm_api/src/dto/safety.dto.dart'; 2 | import 'package:palm_api/src/dto/text_service.dto.dart'; 3 | import 'package:palm_api/src/services/base_service.dart'; 4 | 5 | /// TextService deals with all message related API endpoints. It provides 6 | /// an organized way of interactively working with the language model API. 7 | class TextService extends BaseService { 8 | TextService({ 9 | required super.apiKey, 10 | super.apiClient, 11 | }); 12 | 13 | /// Generate an embedding for a given text using a specific model. 14 | /// 15 | /// Throws an ApiException if the request fails. 16 | /// Otherwise, returns an Embedding. 17 | Future embedText({ 18 | required String model, 19 | required String text, 20 | }) async { 21 | final modelPathBuilder = getModelPathBuilder(model); 22 | final request = EmbedTextRequest(model: model, text: text); 23 | 24 | final response = await apiClient.post( 25 | modelPathBuilder(':embedText'), 26 | request.toMap(), 27 | ); 28 | 29 | return EmbedTextResponse.fromMap(response); 30 | } 31 | 32 | /// Generate a response text message from's model given an input text message. 33 | /// 34 | /// Throws an ApiException if the request fails. 35 | /// Otherwise, returns a GenerateTextResponse. 36 | Future generateText({ 37 | required String model, 38 | required TextPrompt prompt, 39 | double? temperature, 40 | int? candidateCount, 41 | int? maxOutputTokens, 42 | double? topP, 43 | int? topK, 44 | List? safetySettings, 45 | List? stopSequences, 46 | }) async { 47 | final params = GenerateTextRequest( 48 | model: model, 49 | prompt: prompt, 50 | temperature: temperature, 51 | candidateCount: candidateCount, 52 | maxOutputTokens: maxOutputTokens, 53 | topP: topP, 54 | topK: topK, 55 | safetySettings: safetySettings, 56 | stopSequences: stopSequences, 57 | ); 58 | 59 | final modelPathBuilder = getModelPathBuilder(model); 60 | 61 | final response = await apiClient.post( 62 | modelPathBuilder(':generateText'), 63 | params.toMap(), 64 | ); 65 | 66 | return GenerateTextResponse.fromMap(response); 67 | } 68 | 69 | Future generateTextFromMakerSuite( 70 | Map params) async { 71 | final modelPathBuilder = getModelPathBuilder(params['model']); 72 | final response = await apiClient.post( 73 | modelPathBuilder(':generateText'), 74 | params, 75 | ); 76 | 77 | return GenerateTextResponse.fromMap(response); 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /open_api.yml: -------------------------------------------------------------------------------- 1 | openapi: 3.0.0 2 | 3 | info: 4 | title: Generative Language API 5 | description: API for generating text and messages using models like PaLM. 6 | version: 1.0.0 7 | 8 | servers: 9 | - url: https://generativelanguage.googleapis.com/v1beta2 10 | 11 | paths: 12 | /models/{model}/text: 13 | post: 14 | summary: Generate text from model 15 | operationId: generateText 16 | parameters: 17 | - $ref: "#/components/parameters/ModelParameter" 18 | requestBody: 19 | content: 20 | application/json: 21 | schema: 22 | $ref: "#/components/schemas/GenerateTextRequest" 23 | responses: 24 | 200: 25 | $ref: "#/components/responses/TextGenerationResponse" 26 | 400: 27 | $ref: "#/components/responses/BadRequest" 28 | 29 | /models/{model}/message: 30 | post: 31 | summary: Generate message from model 32 | operationId: generateMessage 33 | parameters: 34 | - $ref: "#/components/parameters/ModelParameter" 35 | requestBody: 36 | content: 37 | application/json: 38 | schema: 39 | $ref: "#/components/schemas/GenerateMessageRequest" 40 | responses: 41 | 200: 42 | $ref: "#/components/responses/MessageGenerationResponse" 43 | 400: 44 | $ref: "#/components/responses/BadRequest" 45 | 46 | /models: 47 | get: 48 | $ref: "#/paths/~1models/get" 49 | 50 | components: 51 | parameters: 52 | ModelParameter: 53 | name: model 54 | in: path 55 | required: true 56 | schema: 57 | type: string 58 | description: Name of the model to use 59 | 60 | schemas: 61 | Model: 62 | type: object 63 | properties: 64 | name: 65 | type: string 66 | description: The resource name of the model 67 | baseModelId: 68 | type: string 69 | description: Base model name 70 | version: 71 | type: string 72 | description: Model version number 73 | displayName: 74 | type: string 75 | description: Human readable display name 76 | description: 77 | type: string 78 | description: Description of the model 79 | inputTokenLimit: 80 | type: integer 81 | description: Max input tokens 82 | outputTokenLimit: 83 | type: integer 84 | description: Max output tokens 85 | supportedGenerationMethods: 86 | type: array 87 | items: 88 | type: string 89 | description: Supported generation methods 90 | temperature: 91 | type: number 92 | description: Default temperature 93 | topP: 94 | type: number 95 | description: Default top-p 96 | topK: 97 | type: integer 98 | description: Default top-k 99 | 100 | MessagePrompt: 101 | type: object 102 | properties: 103 | messages: 104 | type: array 105 | items: 106 | $ref: "#/components/schemas/Message" 107 | examples: 108 | type: array 109 | items: 110 | $ref: "#/components/schemas/Example" 111 | 112 | Example: 113 | type: object 114 | properties: 115 | input: 116 | $ref: "#/components/schemas/Message" 117 | output: 118 | $ref: "#/components/schemas/Message" 119 | 120 | Message: 121 | type: object 122 | properties: 123 | author: 124 | type: string 125 | description: Author 126 | content: 127 | type: string 128 | description: Content 129 | citationMetadata: 130 | $ref: "#/components/schemas/CitationMetadata" 131 | 132 | TextPrompt: 133 | type: object 134 | properties: 135 | text: 136 | type: string 137 | description: Input text 138 | 139 | SafetySetting: 140 | type: object 141 | properties: 142 | category: 143 | $ref: "#/components/schemas/HarmCategory" 144 | threshold: 145 | $ref: "#/components/schemas/HarmBlockThreshold" 146 | 147 | HarmCategory: 148 | type: string 149 | enum: 150 | - HARM_CATEGORY_UNSPECIFIED 151 | - HARM_CATEGORY_DEROGATORY 152 | - HARM_CATEGORY_TOXICITY 153 | - HARM_CATEGORY_VIOLENCE 154 | - HARM_CATEGORY_SEXUAL 155 | - HARM_CATEGORY_MEDICAL 156 | - HARM_CATEGORY_DANGEROUS 157 | description: Categories of harmful content 158 | 159 | HarmBlockThreshold: 160 | type: string 161 | enum: 162 | - HARM_BLOCK_THRESHOLD_UNSPECIFIED 163 | - BLOCK_LOW_AND_ABOVE 164 | - BLOCK_MEDIUM_AND_ABOVE 165 | - BLOCK_ONLY_HIGH 166 | - BLOCK_NONE 167 | description: Thresholds for blocking content 168 | 169 | GenerateTextRequest: 170 | type: object 171 | properties: 172 | prompt: 173 | $ref: "#/components/schemas/TextPrompt" 174 | safetySettings: 175 | type: array 176 | items: 177 | $ref: "#/components/schemas/SafetySetting" 178 | stopSequences: 179 | type: array 180 | items: 181 | type: string 182 | description: Stop sequences 183 | temperature: 184 | type: number 185 | description: Temperature parameter 186 | candidateCount: 187 | type: integer 188 | description: Number of candidates 189 | maxOutputTokens: 190 | type: integer 191 | description: Max output tokens 192 | topP: 193 | type: number 194 | description: Top-p parameter 195 | topK: 196 | type: integer 197 | description: Top-k parameter 198 | 199 | GenerateMessageRequest: 200 | type: object 201 | properties: 202 | prompt: 203 | $ref: "#/components/schemas/MessagePrompt" 204 | temperature: 205 | type: number 206 | candidateCount: 207 | type: integer 208 | 209 | GenerateTextResponse: 210 | type: object 211 | properties: 212 | candidates: 213 | type: array 214 | items: 215 | $ref: "#/components/schemas/TextCompletion" 216 | filters: 217 | type: array 218 | items: 219 | $ref: "#/components/schemas/ContentFilter" 220 | safetyFeedback: 221 | type: array 222 | items: 223 | $ref: "#/components/schemas/SafetyFeedback" 224 | 225 | GenerateMessageResponse: 226 | type: object 227 | properties: 228 | candidates: 229 | type: array 230 | items: 231 | $ref: "#/components/schemas/Message" 232 | messages: 233 | type: array 234 | items: 235 | $ref: "#/components/schemas/Message" 236 | filters: 237 | type: array 238 | items: 239 | $ref: "#/components/schemas/ContentFilter" 240 | 241 | TextCompletion: 242 | type: object 243 | properties: 244 | output: 245 | type: string 246 | description: Generated text 247 | safetyRatings: 248 | type: array 249 | items: 250 | $ref: "#/components/schemas/SafetyRating" 251 | citationMetadata: 252 | $ref: "#/components/schemas/CitationMetadata" 253 | 254 | CitationMetadata: 255 | type: object 256 | properties: 257 | citationSources: 258 | type: array 259 | items: 260 | $ref: "#/components/schemas/CitationSource" 261 | 262 | CitationSource: 263 | type: object 264 | properties: 265 | startIndex: 266 | type: integer 267 | description: Start index 268 | endIndex: 269 | type: integer 270 | description: End index 271 | uri: 272 | type: string 273 | description: Cited URI 274 | license: 275 | type: string 276 | description: License info 277 | 278 | ContentFilter: 279 | type: object 280 | properties: 281 | reason: 282 | $ref: "#/components/schemas/BlockReason" 283 | message: 284 | type: string 285 | description: Explanation message 286 | 287 | BlockReason: 288 | type: string 289 | enum: 290 | - BLOCKED_REASON_UNSPECIFIED 291 | - SAFETY 292 | - OTHER 293 | description: Reasons for blocking content 294 | 295 | SafetyRating: 296 | type: object 297 | properties: 298 | category: 299 | $ref: "#/components/schemas/HarmCategory" 300 | probability: 301 | $ref: "#/components/schemas/HarmProbability" 302 | 303 | HarmProbability: 304 | type: string 305 | enum: 306 | - HARM_PROBABILITY_UNSPECIFIED 307 | - NEGLIGIBLE 308 | - LOW 309 | - MEDIUM 310 | - HIGH 311 | description: Levels of harmful probability 312 | 313 | SafetyFeedback: 314 | type: object 315 | properties: 316 | rating: 317 | $ref: "#/components/schemas/SafetyRating" 318 | setting: 319 | $ref: "#/components/schemas/SafetySetting" 320 | 321 | Error: 322 | type: object 323 | properties: 324 | message: 325 | type: string 326 | 327 | responses: 328 | TextGenerationResponse: 329 | description: Text generation success. 330 | content: 331 | application/json: 332 | schema: 333 | $ref: "#/components/schemas/GenerateTextResponse" 334 | 335 | MessageGenerationResponse: 336 | description: Message generation success. 337 | content: 338 | application/json: 339 | schema: 340 | $ref: "#/components/schemas/GenerateMessageResponse" 341 | 342 | BadRequest: 343 | description: Bad request. 344 | content: 345 | application/json: 346 | schema: 347 | $ref: "#/components/schemas/Error" 348 | 349 | securitySchemes: 350 | OAuth: 351 | type: oauth2 352 | flows: 353 | authorizationCode: 354 | authorizationUrl: https://auth.example.com/authorize 355 | tokenUrl: https://auth.example.com/token 356 | scopes: 357 | generative-language: Access generative language models 358 | -------------------------------------------------------------------------------- /palm_api.md: -------------------------------------------------------------------------------- 1 | # Generative Language API 2 | 3 | ## Overview 4 | 5 | The PaLM API allows developers to build generative AI applications using the PaLM model. Large Language Models (LLMs) are a powerful, versatile type of machine learning model that enables computers to comprehend and generate natural language through a series of prompts. The PaLM API is based on Google's next generation LLM, PaLM. It excels at a variety of different tasks like code generation, reasoning, and writing. You can use the PaLM API to build generative AI applications for use cases like content generation, dialogue agents, summarization and classification systems, and more. 6 | 7 | ### API Version 8 | 9 | By default, this project use v1beta2 version of the API. If you want to use higher version of the API select the version in the service to use 10 | 11 | ```dart 12 | service = TextService(apiClient: PalmClient(apiVersion: PalmApiVersion.v1Beta3), ...); 13 | ``` 14 | 15 | ### Service Endpoint 16 | 17 | ``` 18 | Service: generativelanguage.googleapis.com 19 | ``` 20 | 21 | To call this service, we recommend that you use the Google-provided client libraries. If your application needs to use your own libraries to call this service, use the following information when you make the API requests. 22 | 23 | ``` 24 | Service endpoint: 25 | https://generativelanguage.googleapis.com 26 | 27 | REST 28 | Resource: v1beta2.models 29 | ``` 30 | 31 | ### Methods 32 | 33 | | Method | Description | 34 | |-|-| 35 | | `POST /v1beta2/{model=models/*}:countMessageTokens` | Runs a model's tokenizer on a string and returns the token count. | 36 | | `POST /v1beta2/{model=models/*}:embedText` | Generates an embedding from the model given an input message. | 37 | | `POST /v1beta2/{model=models/*}:generateMessage` | Generates a response from the model given an input MessagePrompt. | 38 | | `POST /v1beta2/{model=models/*}:generateText` | Generates a response from the model given an input message. | 39 | | `GET /v1beta2/{name=models/*}` | Gets information about a specific Model. | 40 | | `GET /v1beta2/models` | Lists models available through the API. | 41 | 42 | ### Resource: models 43 | 44 | The `Model` resource provides information about a Generative Language Model. 45 | 46 | ```json 47 | { 48 | "name": "string", 49 | "baseModelId": "string", 50 | "version": "string", 51 | "displayName": "string", 52 | "description": "string", 53 | "inputTokenLimit": "integer", 54 | "outputTokenLimit": "integer", 55 | "supportedGenerationMethods": ["string"], 56 | "temperature": "number", 57 | "topP": "number", 58 | "topK": "integer" 59 | } 60 | ``` 61 | 62 | - `name`: Required. The resource name of the Model. Format: `models/{model}` 63 | - `baseModelId`: Required. The name of the base model. 64 | - `version`: Required. The version number of the model. 65 | - `displayName`: The human-readable name of the model. 66 | - `description`: A short description of the model. 67 | - `inputTokenLimit`: Maximum number of input tokens allowed for this model. 68 | - `outputTokenLimit`: Maximum number of output tokens available for this model. 69 | - `supportedGenerationMethods[]`: The model's supported generation methods. 70 | - `temperature`: Controls the randomness of the output. 71 | - `topP`: For Nucleus sampling. 72 | - `topK`: For Top-k sampling. 73 | 74 | 75 | ### Models Methods 76 | 77 | - `countMessageTokens`: Runs a model's tokenizer on a string and returns the token count. 78 | - `embedText`: Generates an embedding from the model given an input message. 79 | - `generateMessage`: Generates a response from the model given an input MessagePrompt. 80 | - `generateText`: Generates a response from the model given an input message. 81 | - `get`: Gets information about a specific Model. 82 | - `list`: Lists models available through the API. 83 | 84 | ## countMessageTokens 85 | 86 | Runs a model's tokenizer on a string and returns the token count. 87 | 88 | **HTTP Request** 89 | 90 | ``` 91 | POST https://generativelanguage.googleapis.com/v1beta2/{model=models/*}:countMessageTokens 92 | ``` 93 | 94 | **Path Parameters** 95 | 96 | - `model`: Required. The model's resource name. 97 | 98 | **Request Body** 99 | 100 | ```json 101 | { 102 | "prompt": { 103 | "object": "MessagePrompt" 104 | } 105 | } 106 | ``` 107 | 108 | - `prompt`: Required. The prompt whose token count is to be returned. 109 | 110 | **Response Body** 111 | 112 | ```json 113 | { 114 | "tokenCount": "integer" 115 | } 116 | ``` 117 | 118 | - `tokenCount`: The number of tokens that the model tokenizes the prompt into. 119 | 120 | 121 | ## embedText 122 | 123 | Generates an embedding from the model given an input message. 124 | 125 | **HTTP Request** 126 | 127 | ``` 128 | POST https://generativelanguage.googleapis.com/v1beta2/{model=models/*}:embedText 129 | ``` 130 | 131 | **Path Parameters** 132 | 133 | - `model`: Required. The model name to use. 134 | 135 | **Request Body** 136 | 137 | ```json 138 | { 139 | "text": "string" 140 | } 141 | ``` 142 | 143 | - `text`: Required. The input text to generate embedding for. 144 | 145 | **Response Body** 146 | 147 | ```json 148 | { 149 | "embedding": { 150 | "object": "Embedding" 151 | } 152 | } 153 | ``` 154 | 155 | - `embedding`: Output only. The embedding generated from the input text. 156 | 157 | 158 | ## generateMessage 159 | 160 | Generates a response from the model given an input MessagePrompt. 161 | 162 | **HTTP Request** 163 | 164 | ``` 165 | POST https://generativelanguage.googleapis.com/v1beta2/{model=models/*}:generateMessage 166 | ``` 167 | 168 | **Path Parameters** 169 | 170 | - `model`: Required. The name of the model to use. 171 | 172 | **Request Body** 173 | 174 | ```json 175 | { 176 | "prompt": { 177 | "object": "MessagePrompt" 178 | }, 179 | "temperature": "number", 180 | "candidateCount": "integer", 181 | "topP": "number", 182 | "topK": "integer" 183 | } 184 | ``` 185 | 186 | - `prompt`: Required. The input prompt. 187 | - `temperature`: Optional. Controls output randomness. 188 | - `candidateCount`: Optional. Number of responses to return. 189 | - `topP`: Optional. Nucleus sampling parameter. 190 | - `topK`: Optional. Top-k sampling parameter. 191 | 192 | **Response Body** 193 | 194 | ```json 195 | { 196 | "candidates": [ 197 | { 198 | "object": "Message" 199 | } 200 | ], 201 | "messages": [ 202 | { 203 | "object": "Message" 204 | } 205 | ], 206 | "filters": [ 207 | { 208 | "object": "ContentFilter" 209 | } 210 | ] 211 | } 212 | ``` 213 | 214 | - `candidates[]`: Candidate response messages. 215 | - `messages[]`: Conversation history. 216 | - `filters[]`: Content filtering metadata. 217 | 218 | ## generateText 219 | 220 | Generates a response from the model given an input message. 221 | 222 | **HTTP Request** 223 | 224 | ``` 225 | POST https://generativelanguage.googleapis.com/v1beta2/{model=models/*}:generateText 226 | ``` 227 | 228 | **Path Parameters** 229 | 230 | - `model`: Required. The model name to use. 231 | 232 | **Request Body** 233 | 234 | ```json 235 | { 236 | "prompt": { 237 | "object": "TextPrompt" 238 | }, 239 | "safetySettings": [ 240 | { 241 | "object": "SafetySetting" 242 | } 243 | ], 244 | "stopSequences": [ 245 | "string" 246 | ], 247 | "temperature": "number", 248 | "candidateCount": "integer", 249 | "maxOutputTokens": "integer", 250 | "topP": "number", 251 | "topK": "integer" 252 | } 253 | ``` 254 | 255 | - `prompt`: Required. The input prompt text. 256 | - `safetySettings[]`: Optional. Safety settings. 257 | - `stopSequences[]`: Optional. Stop sequence strings. 258 | - `temperature`: Optional. Controls output randomness. 259 | - `candidateCount`: Optional. Number of responses to return. 260 | - `maxOutputTokens`: Optional. Maximum output length. 261 | - `topP`: Optional. Nucleus sampling parameter. 262 | - `topK`: Optional. Top-k sampling parameter. 263 | 264 | **Response Body** 265 | 266 | ```json 267 | { 268 | "candidates": [ 269 | { 270 | "object": "TextCompletion" 271 | } 272 | ], 273 | "filters": [ 274 | { 275 | "object": "ContentFilter" 276 | } 277 | ], 278 | "safetyFeedback": [ 279 | { 280 | "object": "SafetyFeedback" 281 | } 282 | ] 283 | } 284 | ``` 285 | 286 | - `candidates[]`: Candidate response messages. 287 | - `filters[]`: Content filtering metadata. 288 | - `safetyFeedback[]`: Safety feedback. 289 | 290 | 291 | ## models.get 292 | 293 | Gets information about a specific Model. 294 | 295 | **HTTP Request** 296 | 297 | ``` 298 | GET https://generativelanguage.googleapis.com/v1beta2/{name=models/*} 299 | ``` 300 | 301 | **Path Parameters** 302 | 303 | - `name`: Required. The resource name of the model. 304 | 305 | **Response Body** 306 | 307 | Returns a `Model` resource. 308 | 309 | 310 | Here is the embedText documentation in Markdown format: 311 | 312 | ## embedText 313 | 314 | Generates an embedding from the model given one or more input messages. 315 | 316 | **HTTP Request** 317 | 318 | ``` 319 | POST https://generativelanguage.googleapis.com/v1beta2/{model=models/*}:embedText 320 | ``` 321 | 322 | **Path Parameters** 323 | 324 | - `model`: Required. The model name to use. 325 | 326 | **Request Body** 327 | 328 | ```json 329 | { 330 | "texts": [ 331 | "string" 332 | ] 333 | } 334 | ``` 335 | 336 | - `texts`: Required. One or more input texts to generate embeddings for. 337 | 338 | **Response Body** 339 | 340 | ```json 341 | { 342 | "embeddings": [ 343 | { 344 | "object": "Embedding" 345 | } 346 | ] 347 | } 348 | ``` 349 | 350 | - `embeddings[]`: Output only. The embeddings generated from the input texts, in the same order. 351 | 352 | 353 | ### Embedding 354 | 355 | A generated embedding for text. 356 | 357 | ```json 358 | { 359 | "values": [ 360 | "number" 361 | ] 362 | } 363 | ``` 364 | 365 | - `values[]`: The embedding values. 366 | 367 | Let me know if you would like me to modify or expand this embedText Markdown documentation further. 368 | 369 | ## models.list 370 | 371 | Lists models available through the API. 372 | 373 | **HTTP Request** 374 | 375 | ``` 376 | GET https://generativelanguage.googleapis.com/v1beta2/models 377 | ``` 378 | 379 | **Query Parameters** 380 | 381 | - `pageSize`: Maximum number of models to return per page. 382 | - `pageToken`: Page token received from a previous call. 383 | 384 | **Response Body** 385 | 386 | ```json 387 | { 388 | "models": [ 389 | { 390 | "object": "Model" 391 | } 392 | ], 393 | "nextPageToken": "string" 394 | } 395 | ``` 396 | 397 | - `models[]`: The returned Models. 398 | - `nextPageToken`: Token to retrieve the next page. 399 | 400 | 401 | ## Objects 402 | 403 | ### CitationMetadata 404 | 405 | Metadata about the source of generated content. 406 | 407 | ```json 408 | { 409 | "citationSources": [ 410 | { 411 | "object": "CitationSource" 412 | } 413 | ] 414 | } 415 | ``` 416 | 417 | - `citationSources[]`: Citations to sources for the content. 418 | 419 | ### CitationSource 420 | 421 | Source citation for a portion of the content. 422 | 423 | ```json 424 | { 425 | "startIndex": "integer", 426 | "endIndex": "integer", 427 | "uri": "string", 428 | "license": "string" 429 | } 430 | ``` 431 | 432 | - `startIndex`: Start of attributed segment. 433 | - `endIndex`: End of attributed segment. 434 | - `uri`: URI of attributed source. 435 | - `license`: License for attributed code. 436 | 437 | ### ContentFilter 438 | 439 | Content filtering metadata. 440 | 441 | ```json 442 | { 443 | "reason": "enum", 444 | "message": "string" 445 | } 446 | ``` 447 | 448 | - `reason`: The filtering reason. 449 | - `message`: Description of the filtering. 450 | 451 | ### SafetySetting 452 | 453 | Safety setting configuration. 454 | 455 | ```json 456 | { 457 | "category": "enum", 458 | "threshold": "enum" 459 | } 460 | ``` 461 | 462 | - `category`: Required. The safety category. 463 | - `threshold`: Required. The blocking threshold. 464 | 465 | ### TextPrompt 466 | 467 | Input text prompt. 468 | 469 | ```json 470 | { 471 | "text": "string" 472 | } 473 | ``` 474 | 475 | - `text`: Required. The prompt text. 476 | 477 | ### TextCompletion 478 | 479 | Generated text response. 480 | 481 | ```json 482 | { 483 | "output": "string", 484 | "safetyRatings": [ 485 | { 486 | "object": "SafetyRating" 487 | } 488 | ], 489 | "citationMetadata": { 490 | "object": "CitationMetadata" 491 | } 492 | } 493 | ``` 494 | 495 | - `output`: Output only. The generated text. 496 | - `safetyRatings[]`: Safety ratings. 497 | - `citationMetadata`: Attribution metadata. 498 | 499 | ### SafetyRating 500 | 501 | Safety rating for content. 502 | 503 | ```json 504 | { 505 | "category": "enum", 506 | "probability": "enum" 507 | } 508 | ``` 509 | 510 | - `category`: Required. The safety category. 511 | - `probability`: Required. The harm probability. 512 | 513 | ### SafetyFeedback 514 | 515 | Safety feedback for the request. 516 | 517 | ```json 518 | { 519 | "rating": { 520 | "object": "SafetyRating" 521 | }, 522 | "setting": { 523 | "object": "SafetySetting" 524 | } 525 | } 526 | ``` 527 | 528 | - `rating`: Safety rating that triggered blocking. 529 | - `setting`: Safety setting applied. 530 | 531 | ### Message 532 | 533 | Structured message unit. 534 | 535 | ```json 536 | { 537 | "author": "string", 538 | "content": "string", 539 | "citationMetadata": { 540 | "object": "CitationMetadata" 541 | } 542 | } 543 | ``` 544 | 545 | - `author`: Optional. Message author. 546 | - `content`: Required. Message text. 547 | - `citationMetadata`: Attribution metadata. 548 | 549 | ### MessagePrompt 550 | 551 | Structured prompt with context, examples, and messages. 552 | 553 | ```json 554 | { 555 | "context": "string", 556 | "examples": [ 557 | { 558 | "object": "Example" 559 | } 560 | ], 561 | "messages": [ 562 | { 563 | "object": "Message" 564 | } 565 | ] 566 | } 567 | ``` 568 | 569 | - `context`: Optional. Grounding context. 570 | - `examples[]`: Optional. Input/output examples. 571 | - `messages[]`: Required. Conversation history. 572 | 573 | ### Example 574 | 575 | Input/output example. 576 | 577 | ```json 578 | { 579 | "input": { 580 | "object": "Message" 581 | }, 582 | "output": { 583 | "object": "Message" 584 | } 585 | } 586 | ``` 587 | 588 | - `input`: Example input. 589 | - `output`: Expected output. 590 | 591 | 592 | ## Prompt Precedence 593 | 594 | The `MessagePrompt` contains `context`, `examples`, and `messages` fields. These have the following precedence if the total input size exceeds the model's `inputTokenLimit`: 595 | 596 | 1. `context` - The context will be kept first if provided. Context gives high-level instructions and guidance to the model. 597 | 598 | 2. `examples` - Example inputs and outputs will be kept next. Examples demonstrate desired model behavior. 599 | 600 | 3. `messages` - Message history will be truncated last. Older messages are dropped to fit within limits. 601 | 602 | This means `context` and `examples` take precedence over `messages` when the input must be truncated. 603 | 604 | For example: 605 | 606 | ```json 607 | { 608 | "context": "Translate the following sentences to French", 609 | 610 | "examples": [ 611 | { 612 | "input": { 613 | "content": "Hello there!" 614 | }, 615 | "output": { 616 | "content": "Bonjour!" 617 | } 618 | } 619 | ], 620 | 621 | "messages": [ 622 | { 623 | "content": "Hello my friend." 624 | }, 625 | { 626 | "content": "How are you today?" 627 | } 628 | ] 629 | } 630 | ``` 631 | 632 | If this exceeds the `inputTokenLimit`, the oldest `messages` will be dropped first. The `context` and `examples` will be preserved, since they provide important guidance to the model. 633 | 634 | Let me know if this helps explain the precedence and relationship between the different `MessagePrompt` fields! -------------------------------------------------------------------------------- /pubspec.yaml: -------------------------------------------------------------------------------- 1 | name: palm_api 2 | description: A Dart client for interacting with the PaLM API, allowing developers to build generative AI applications. 3 | version: 0.0.3 4 | repository: https://github.com/leoafarias/palm_api_dart 5 | 6 | environment: 7 | sdk: ^3.0.3 8 | 9 | dependencies: 10 | http: ^1.1.0 11 | 12 | dev_dependencies: 13 | dotenv: ^4.1.0 14 | lints: ^2.1.1 15 | mockito: ^5.4.2 16 | test: ^1.24.6 17 | -------------------------------------------------------------------------------- /scripts/get_codebase.dart: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env dart 2 | 3 | import 'dart:io'; 4 | 5 | void main() { 6 | final rootDirectory = Directory.current; 7 | final libFolder = Directory('${rootDirectory.path}/lib'); 8 | final codebaseFile = File('${rootDirectory.path}/issues/codebase.md'); 9 | 10 | if (codebaseFile.existsSync()) { 11 | codebaseFile.deleteSync(recursive: true); 12 | } 13 | 14 | codebaseFile.createSync(recursive: true); 15 | 16 | final files = libFolder.listSync(recursive: true, followLinks: false); 17 | for (final file in files) { 18 | if (file.path.endsWith('.dart')) { 19 | final relativeFilePath = file.path.substring(file.path.indexOf('lib')); 20 | final content = (file as File).readAsStringSync(); 21 | 22 | codebaseFile.writeAsStringSync( 23 | '## $relativeFilePath\n\n```dart\n$content\n```\n\n', 24 | mode: FileMode.append); 25 | } 26 | } 27 | 28 | print('Codebase.md file created successfully!'); 29 | } 30 | -------------------------------------------------------------------------------- /test/helpers/client_test.dart: -------------------------------------------------------------------------------- 1 | import 'package:http/http.dart'; 2 | import 'package:http/testing.dart'; 3 | import 'package:palm_api/palm_api.dart'; 4 | import 'package:palm_api/src/helpers/client.dart'; 5 | import 'package:palm_api/src/helpers/palm_api_version.dart'; 6 | import 'package:test/test.dart'; 7 | 8 | void main() { 9 | test('client should use first version of API if there arent any version selected', () async { 10 | late Uri url; 11 | final client = PalmClient( 12 | client: MockClient((request) { 13 | url = request.url; 14 | return Future.value(Response('{}', 200)); 15 | }), 16 | ); 17 | 18 | await client.get(''); 19 | 20 | expect(url.pathSegments.contains(PalmApiVersion.v1Beta2.name), true); 21 | }); 22 | 23 | test('client should use selected version of API if there are a version selected', () async { 24 | final apiVersion = PalmApiVersion.v1Beta3; 25 | late Uri url; 26 | final client = PalmClient( 27 | apiVersion: apiVersion, 28 | client: MockClient((request) { 29 | url = request.url; 30 | return Future.value(Response('{}', 200)); 31 | }), 32 | ); 33 | 34 | await client.get(''); 35 | 36 | expect(url.pathSegments.contains(apiVersion.name), true); 37 | }); 38 | } 39 | -------------------------------------------------------------------------------- /test/helpers/deep_collection_equality_test.dart: -------------------------------------------------------------------------------- 1 | import 'package:palm_api/src/helpers/deep_collection_equality.dart'; 2 | import 'package:test/test.dart'; 3 | 4 | void main() { 5 | final deepEquality = DeepCollectionEquality(); 6 | 7 | group('DeepCollectionEquality', () { 8 | test('should compare lists deeply', () { 9 | final list1 = [ 10 | 1, 11 | [2, 3], 12 | 4 13 | ]; 14 | final list2 = [ 15 | 1, 16 | [2, 3], 17 | 4 18 | ]; 19 | final list3 = [ 20 | 1, 21 | [2, 4], 22 | 4 23 | ]; 24 | 25 | expect(deepEquality.equals(list1, list2), true); 26 | expect(deepEquality.equals(list1, list3), false); 27 | }); 28 | 29 | test('should compare sets deeply', () { 30 | final set1 = { 31 | 1, 32 | {2, 3}, 33 | 4 34 | }; 35 | final set2 = { 36 | 4, 37 | {3, 2}, 38 | 1 39 | }; 40 | final set3 = { 41 | 1, 42 | {2, 4}, 43 | 4 44 | }; 45 | 46 | expect(deepEquality.equals(set1, set2), true); 47 | expect(deepEquality.equals(set1, set3), false); 48 | }); 49 | 50 | test('should compare maps deeply', () { 51 | final map1 = { 52 | 'a': 1, 53 | 'b': {'c': 2, 'd': 3}, 54 | 'e': 4, 55 | }; 56 | final map2 = { 57 | 'e': 4, 58 | 'b': {'d': 3, 'c': 2}, 59 | 'a': 1, 60 | }; 61 | final map3 = { 62 | 'a': 1, 63 | 'b': {'c': 2, 'd': 4}, 64 | 'e': 4, 65 | }; 66 | 67 | expect(deepEquality.equals(map1, map2), true); 68 | expect(deepEquality.equals(map1, map3), false); 69 | }); 70 | 71 | test('should compare custom objects deeply', () { 72 | final object1 = CustomObject(1, CustomObject(2, null)); 73 | final object2 = CustomObject(1, CustomObject(2, null)); 74 | final object3 = CustomObject(1, CustomObject(3, null)); 75 | 76 | expect(deepEquality.equals(object1, object2), true); 77 | expect(deepEquality.equals(object1, object3), false); 78 | }); 79 | 80 | test('should compute hash codes deeply', () { 81 | final list = [ 82 | 1, 83 | [2, 3], 84 | 4 85 | ]; 86 | final set = { 87 | 1, 88 | {2, 3}, 89 | 4 90 | }; 91 | final map = { 92 | 'a': 1, 93 | 'b': {'c': 2, 'd': 3}, 94 | 'e': 4, 95 | }; 96 | final object = CustomObject(1, CustomObject(2, null)); 97 | 98 | final listHashCode = deepEquality.hash(list); 99 | final setHashCode = deepEquality.hash(set); 100 | final mapHashCode = deepEquality.hash(map); 101 | final objectHashCode = deepEquality.hash(object); 102 | 103 | expect(listHashCode, isNotNull); 104 | expect(setHashCode, isNotNull); 105 | expect(mapHashCode, isNotNull); 106 | expect(objectHashCode, isNotNull); 107 | }); 108 | }); 109 | } 110 | 111 | class CustomObject { 112 | final int value; 113 | final CustomObject? next; 114 | 115 | CustomObject(this.value, this.next); 116 | 117 | @override 118 | bool operator ==(Object other) { 119 | if (identical(this, other)) return true; 120 | if (other is! CustomObject) return false; 121 | 122 | return value == other.value && next == other.next; 123 | } 124 | 125 | @override 126 | int get hashCode => value.hashCode ^ next.hashCode; 127 | } 128 | -------------------------------------------------------------------------------- /test/helpers/list_extension.dart: -------------------------------------------------------------------------------- 1 | extension ListExtension on List { 2 | /// Returns firstWhereOrNull 3 | T? firstWhereOrNull(bool Function(T) test) { 4 | for (var element in this) { 5 | if (test(element)) { 6 | return element; 7 | } 8 | } 9 | return null; 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /test/helpers/mock_client.dart: -------------------------------------------------------------------------------- 1 | import 'dart:convert'; 2 | 3 | import 'package:http/http.dart' as http; 4 | import 'package:http/testing.dart'; 5 | import 'package:palm_api/palm_api.dart'; 6 | 7 | // ignore: constant_identifier_names 8 | const MOCK_API_KEY = 'MOCK_API_KEY'; 9 | 10 | // ignore: non_constant_identifier_names 11 | final MockPalmClient = PalmClient(client: _mockHttpClient); 12 | 13 | final _mockHttpClient = MockClient((request) async { 14 | // pathSegments[0] is always v1beta2 15 | final firstSegment = request.url.pathSegments[1]; 16 | final segmentsCount = request.url.pathSegments.length; 17 | 18 | if (firstSegment == 'models') { 19 | if (segmentsCount >= 3) { 20 | final modelName = 21 | request.url.pathSegments[2]; // Get the modelId from the path 22 | try { 23 | final model = PalmModel.fromModelName(modelName); 24 | 25 | return http.Response(model.toJson(), 200); 26 | } on ArgumentError { 27 | return http.Response('Not Found', 404); 28 | } 29 | } else { 30 | final modelsPayload = { 31 | 'models': [ 32 | PalmModel.chatBison001.toMap(), 33 | PalmModel.textBison001.toMap(), 34 | PalmModel.embeddingGecko001.toMap(), 35 | ] 36 | }; 37 | 38 | return http.Response(json.encode(modelsPayload), 200); 39 | } 40 | } 41 | 42 | return http.Response('Not Found', 404); 43 | }); 44 | -------------------------------------------------------------------------------- /test/palm_api_test.dart: -------------------------------------------------------------------------------- 1 | import 'package:palm_api/palm_api.dart'; 2 | import 'package:test/test.dart'; 3 | 4 | import 'testing_helpers.dart'; 5 | 6 | void main() { 7 | group('A group of tests', () { 8 | final textService = TextService(apiKey: TestEnv.palmApiKey); 9 | final discussService = DiscussService(apiKey: TestEnv.palmApiKey); 10 | 11 | test('Generative Text', () async { 12 | final response = await textService.generateText( 13 | model: PalmModel.textBison001.name, 14 | prompt: TextPrompt( 15 | text: 'How do I microwave hot water?', 16 | ), 17 | ); 18 | 19 | expect(response.candidates, isNotEmpty); 20 | }); 21 | 22 | test('Generative Message', () async { 23 | final response = await discussService.generateMessage( 24 | model: PalmModel.chatBison001.name, 25 | prompt: MessagePrompt( 26 | messages: [Message(content: 'Wikipedia definition of water?')], 27 | ), 28 | ); 29 | 30 | expect(response.candidates, isNotEmpty); 31 | }); 32 | // final awesome = Awesome(); 33 | 34 | // setUp(() { 35 | // // Additional setup goes here. 36 | // }); 37 | 38 | // test('First Test', () { 39 | // expect(awesome.isAwesome, isTrue); 40 | // }); 41 | }); 42 | } 43 | -------------------------------------------------------------------------------- /test/services/model_service_test.dart: -------------------------------------------------------------------------------- 1 | import 'package:palm_api/palm_api.dart'; 2 | import 'package:test/test.dart'; 3 | 4 | import '../helpers/list_extension.dart'; 5 | import '../helpers/mock_client.dart'; 6 | import '../testing_helpers.dart'; 7 | 8 | void main() { 9 | group('Model Service', () { 10 | test('Retrieves a model by id', () async { 11 | final modelService = ModelService( 12 | apiKey: TestEnv.palmApiKey, 13 | apiClient: MockPalmClient, 14 | ); 15 | 16 | const textBison001 = 'text-bison-001'; 17 | const chatBison001 = 'chat-bison-001'; 18 | const embeddingGecko001 = 'embedding-gecko-001'; 19 | 20 | final futures = [ 21 | modelService.getModel(textBison001), 22 | modelService.getModel(chatBison001), 23 | modelService.getModel(embeddingGecko001), 24 | ]; 25 | 26 | final models = await Future.wait(futures); 27 | 28 | expect(models, isA>()); 29 | expect(models.length, 3); 30 | expect(models[0].name, equals(PalmModel.textBison001.name)); 31 | expect(models[1].name, equals(PalmModel.chatBison001.name)); 32 | expect(models[2].name, equals(PalmModel.embeddingGecko001.name)); 33 | }); 34 | 35 | test('List all models', () async { 36 | final modelService = ModelService( 37 | apiKey: TestEnv.palmApiKey, 38 | apiClient: MockPalmClient, 39 | ); 40 | 41 | List models = await modelService.listModels(); 42 | expect(models, isA>()); 43 | expect(models.length, 3); 44 | 45 | final textBison001 = models.firstWhereOrNull( 46 | (model) => model.name == PalmModel.textBison001.name, 47 | ); 48 | 49 | final chatBison001 = models.firstWhereOrNull( 50 | (model) => model.name == PalmModel.chatBison001.name, 51 | ); 52 | 53 | final embeddingGecko001 = models.firstWhereOrNull( 54 | (model) => model.name == PalmModel.embeddingGecko001.name, 55 | ); 56 | 57 | expect(textBison001, isA()); 58 | expect(textBison001?.name, equals('models/text-bison-001')); 59 | 60 | expect(chatBison001, isA()); 61 | expect(chatBison001?.name, equals('models/chat-bison-001')); 62 | 63 | expect(embeddingGecko001, isA()); 64 | expect(embeddingGecko001?.name, equals('models/embedding-gecko-001')); 65 | }); 66 | }); 67 | } 68 | -------------------------------------------------------------------------------- /test/testing_helpers.dart: -------------------------------------------------------------------------------- 1 | import 'dart:io'; 2 | 3 | import 'package:dotenv/dotenv.dart'; 4 | 5 | final isCI = Platform.environment['IS_CI'] == 'true'; 6 | 7 | class TestEnv { 8 | static final DotEnv _env = DotEnv(includePlatformEnvironment: true); 9 | static get palmApiKey { 10 | if (isCI) return Platform.environment['PALM_API_KEY']; 11 | _env.load(); 12 | return _env['PALM_API_KEY']; 13 | } 14 | } 15 | --------------------------------------------------------------------------------