├── .env.example ├── .github └── workflows │ └── publish.yml ├── .gitignore ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── check_models.py ├── llm_perplexity.py ├── pyproject.toml ├── pytest.ini ├── run_selective_tests.py ├── setup.sh └── test_llm_perplexity.py /.env.example: -------------------------------------------------------------------------------- 1 | # Perplexity API key - required for all tests 2 | LLM_PERPLEXITY_KEY=your_perplexity_api_key_here 3 | 4 | # OpenRouter API key - required only for OpenRouter tests 5 | # LLM_OPENROUTER_KEY=your_openrouter_api_key_here 6 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish Python Package 2 | 3 | on: 4 | release: 5 | types: [created] 6 | 7 | permissions: 8 | contents: read 9 | 10 | jobs: 11 | test: 12 | runs-on: ubuntu-latest 13 | strategy: 14 | matrix: 15 | python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] 16 | steps: 17 | - uses: actions/checkout@v4 18 | - name: Set up Python ${{ matrix.python-version }} 19 | uses: actions/setup-python@v5 20 | with: 21 | python-version: ${{ matrix.python-version }} 22 | cache: pip 23 | cache-dependency-path: pyproject.toml 24 | - name: Install dependencies 25 | run: | 26 | pip install -e '.[test]' 27 | deploy: 28 | runs-on: ubuntu-latest 29 | environment: release 30 | permissions: 31 | id-token: write 32 | steps: 33 | - uses: actions/checkout@v4 34 | - name: Set up Python 35 | uses: actions/setup-python@v5 36 | with: 37 | python-version: "3.12" 38 | cache: pip 39 | cache-dependency-path: pyproject.toml 40 | - name: Install dependencies 41 | run: | 42 | pip install setuptools wheel build 43 | - name: Build 44 | run: | 45 | python -m build 46 | - name: Publish 47 | uses: pypa/gh-action-pypi-publish@release/v1 48 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .venv 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | venv 6 | .eggs 7 | .pytest_cache 8 | *.egg-info 9 | .DS_Store 10 | .vscode 11 | dist 12 | build 13 | .history 14 | /.idea 15 | .env -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include README.md 3 | include pytest.ini 4 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: setup test test-standard test-invalid test-all 2 | 3 | setup: 4 | pip install -e . 5 | pip install pytest python-dotenv pillow rich 6 | 7 | test-standard: 8 | pytest test_llm_perplexity.py::test_standard_models -v 9 | 10 | test-invalid: 11 | pytest test_llm_perplexity.py::test_invalid_options -v 12 | 13 | test-all: 14 | pytest test_llm_perplexity.py -v 15 | 16 | # Default test target runs all tests 17 | test: test-all 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # llm-perplexity 2 | 3 | [![PyPI](https://img.shields.io/pypi/v/llm-perplexity.svg)](https://pypi.org/project/llm-perplexity/) 4 | [![Changelog](https://img.shields.io/github/v/release/hex/llm-perplexity?include_prereleases&label=changelog)](https://github.com/hex/llm-perplexity/releases) 5 | [![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](https://github.com/hex/llm-perplexity/blob/main/LICENSE) 6 | 7 | LLM access to pplx-api 3 by Perplexity Labs 8 | 9 | ## Installation 10 | 11 | Install this plugin in the same environment as [LLM](https://llm.datasette.io/). 12 | 13 | ```bash 14 | llm install llm-perplexity 15 | ``` 16 | 17 | ## Usage 18 | 19 | First, set an [API key](https://www.perplexity.ai/settings/api) for Perplexity AI: 20 | 21 | ```bash 22 | llm keys set perplexity 23 | # Paste key here 24 | ``` 25 | 26 | Run `llm models` to list the models, and `llm models --options` to include a list of their options. 27 | 28 | Run prompts like this: 29 | 30 | ### Standard Models 31 | 32 | ```bash 33 | # Flagship model 34 | llm -m sonar-pro 'Fun facts about AI' 35 | 36 | # Base model 37 | llm -m sonar 'Fun facts about walruses' 38 | ``` 39 | 40 | ### Online Models with Web Search 41 | 42 | ```bash 43 | # Flagship model with web search - for up-to-date information 44 | llm -m sonar-pro-online 'Latest AI research in 2025' 45 | 46 | # Filter search by recency - restrict to recent sources 47 | llm -m sonar-pro-online --option search_recency_filter day 'Tech news today' 48 | 49 | # Filter search by recency - specific time periods 50 | llm -m sonar-pro-online --option search_recency_filter week 'Tech news this week' 51 | llm -m sonar-pro-online --option search_recency_filter month 'Tech news this month' 52 | llm -m sonar-pro-online --option search_recency_filter hour 'Very recent news' 53 | 54 | # Filter search by domain - specify allowed domains 55 | llm -m sonar-pro-online --option search_domain_filter github.com,arxiv.org 'LLM advancements' 56 | ``` 57 | 58 | ### Other Available Models 59 | 60 | ```bash 61 | # Research and reasoning models 62 | llm -m sonar-deep-research 'Complex research question' 63 | llm -m sonar-reasoning-pro 'Problem solving task' 64 | llm -m sonar-reasoning 'Logical reasoning' 65 | llm -m r1-1776 'Fun facts about seals' 66 | ``` 67 | 68 | ### Advanced Options 69 | 70 | The plugin supports various parameters to customize model behavior: 71 | 72 | ```bash 73 | # Control randomness (0.0 to 2.0, higher = more random) 74 | llm -m sonar-pro --option temperature 0.7 'Generate creative ideas' 75 | 76 | # Nucleus sampling threshold (alternative to temperature) 77 | llm -m sonar-pro --option top_p 0.9 'Generate varied responses' 78 | 79 | # Token filtering (between 0 and 2048) 80 | llm -m sonar-pro --option top_k 40 'Generate focused content' 81 | 82 | # Limit response length 83 | llm -m sonar-pro --option max_tokens 500 'Summarize this article' 84 | 85 | # Return related questions 86 | llm -m sonar-pro-online --option return_related_questions true 'How does quantum computing work?' 87 | ``` 88 | 89 | ### Using Images with Perplexity 90 | 91 | The plugin supports sending images to Perplexity models for analysis (multi-modal input): 92 | 93 | ```bash 94 | # Analyze an image with Perplexity 95 | llm -m sonar-pro --option image_path /path/to/your/image.jpg 'What can you tell me about this image?' 96 | 97 | # Ask specific questions about an image 98 | llm -m sonar-pro --option image_path /path/to/screenshot.png 'What text appears in this screenshot?' 99 | 100 | # Multi-modal conversation with an image 101 | llm -m sonar-pro --option image_path /path/to/diagram.png 'Explain the process shown in this diagram' 102 | ``` 103 | 104 | Note: Only certain Perplexity models support image inputs. Currently the following formats are supported: PNG, JPEG, and GIF. 105 | 106 | ### OpenRouter Access 107 | 108 | You can also access these models through OpenRouter. First install the OpenRouter plugin: 109 | 110 | ```bash 111 | llm install llm-openrouter 112 | ``` 113 | 114 | Then set your OpenRouter API key: 115 | 116 | ```bash 117 | llm keys set openrouter 118 | ``` 119 | 120 | Use the `--option use_openrouter true` flag to route requests through OpenRouter: 121 | 122 | ```bash 123 | llm -m sonar-pro --option use_openrouter true 'Fun facts about pelicans' 124 | ``` 125 | 126 | ## Development 127 | 128 | To set up this plugin locally, first checkout the code. Then create a new virtual environment: 129 | 130 | ```bash 131 | cd llm-perplexity 132 | python3 -m venv venv 133 | source venv/bin/activate 134 | ``` 135 | 136 | Now install the dependencies and test dependencies: 137 | 138 | ```bash 139 | llm install -e '.[test]' 140 | ``` 141 | 142 | ### Running Tests 143 | 144 | The test suite is comprehensive and tests all example commands from the documentation with actual API calls. 145 | 146 | Before running tests, you need to set up your environment variables: 147 | 148 | 1. Copy the `.env.example` file to `.env`: 149 | ```bash 150 | cp .env.example .env 151 | ``` 152 | 153 | 2. Edit the `.env` file and add your Perplexity API key: 154 | ``` 155 | LLM_PERPLEXITY_KEY=your_perplexity_api_key_here 156 | ``` 157 | 158 | 3. (Optional) If you want to test OpenRouter integration, also add your OpenRouter API key: 159 | ``` 160 | LLM_OPENROUTER_KEY=your_openrouter_api_key_here 161 | ``` 162 | 163 | 4. Install the package and test dependencies using one of these methods: 164 | 165 | **Using the setup script:** 166 | ```bash 167 | ./setup.sh 168 | ``` 169 | 170 | **Using make:** 171 | ```bash 172 | make setup 173 | ``` 174 | 175 | **Manually:** 176 | ```bash 177 | pip install -e . 178 | pip install pytest python-dotenv pillow 179 | ``` 180 | 181 | Run the tests with pytest: 182 | 183 | ```bash 184 | # Run all tests 185 | pytest test_llm_perplexity.py 186 | 187 | # Using make 188 | make test 189 | 190 | # Run a specific test 191 | pytest test_llm_perplexity.py::test_standard_models 192 | ``` 193 | 194 | Note: Running the full test suite will make real API calls to Perplexity, which may incur costs depending on your account plan. 195 | 196 | This plugin was made after the [llm-claude-3](https://github.com/simonw/llm-claude-3) plugin by Simon Willison. -------------------------------------------------------------------------------- /check_models.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Script to check which Perplexity models are available with the current API key. 4 | """ 5 | import os 6 | import sys 7 | import llm 8 | import dotenv 9 | from rich.console import Console 10 | from rich.table import Table 11 | 12 | # Load environment variables from .env file 13 | dotenv.load_dotenv() 14 | 15 | # Check if API key is set 16 | api_key = os.environ.get("LLM_PERPLEXITY_KEY") 17 | if not api_key: 18 | print("Error: LLM_PERPLEXITY_KEY environment variable not set.") 19 | print("Please set it in your .env file or export it in your shell.") 20 | sys.exit(1) 21 | 22 | # Models to check 23 | MODELS = [ 24 | "sonar-pro", 25 | "sonar", 26 | "sonar-pro-online", 27 | "sonar-deep-research", 28 | "sonar-reasoning-pro", 29 | "sonar-reasoning", 30 | "r1-1776" 31 | ] 32 | 33 | console = Console() 34 | table = Table(title="Perplexity Model Availability") 35 | table.add_column("Model", style="cyan") 36 | table.add_column("Available", style="green") 37 | table.add_column("Error", style="red") 38 | 39 | # Check each model 40 | console.print("Checking model availability...\n") 41 | 42 | for model_id in MODELS: 43 | try: 44 | model = llm.get_model(model_id) 45 | # Simple ping with minimal token usage 46 | response = model.prompt("Hi", stream=False) 47 | text = response.text().strip() 48 | status = "✅ Yes" if text else "❌ No response" 49 | error = "" 50 | except Exception as e: 51 | status = "❌ No" 52 | error = str(e) 53 | 54 | table.add_row(model_id, status, error) 55 | 56 | console.print(table) 57 | console.print("\nNote: Some models may require specific subscription tiers.") 58 | console.print("If you're having trouble with a specific model, check your API key permissions.") -------------------------------------------------------------------------------- /llm_perplexity.py: -------------------------------------------------------------------------------- 1 | import llm 2 | from llm.utils import ( 3 | remove_dict_none_values, 4 | simplify_usage_dict, 5 | ) 6 | from openai import OpenAI 7 | from pydantic import Field, field_validator, model_validator 8 | from typing import Optional, List, Dict 9 | 10 | # Model capabilities 11 | MODEL_CAPABILITIES = { 12 | "sonar-pro": {"web_search": False}, 13 | "sonar": {"web_search": False}, 14 | "sonar-pro-online": {"web_search": True}, 15 | "sonar-deep-research": {"web_search": False}, 16 | "sonar-reasoning-pro": {"web_search": False}, 17 | "sonar-reasoning": {"web_search": False}, 18 | "r1-1776": {"web_search": False} 19 | } 20 | 21 | @llm.hookimpl 22 | def register_models(register): 23 | # https://docs.perplexity.ai/guides/model-cards 24 | for model_id, capabilities in MODEL_CAPABILITIES.items(): 25 | register(Perplexity(model_id, capabilities)) 26 | 27 | class PerplexityOptions(llm.Options): 28 | max_tokens: Optional[int] = Field( 29 | description="The maximum number of completion tokens returned by the API. The total number of tokens requested in max_tokens plus the number of prompt tokens sent in messages must not exceed the context window token limit of model requested. If left unspecified, then the model will generate tokens until either it reaches its stop token or the end of its context window", 30 | default=None, 31 | ) 32 | 33 | temperature: Optional[float] = Field( 34 | description="The amount of randomness in the response, valued between 0 inclusive and 2 exclusive. Higher values are more random, and lower values are more deterministic", 35 | default=1, 36 | ) 37 | 38 | top_p: Optional[float] = Field( 39 | description="The nucleus sampling threshold, valued between 0 and 1 inclusive. For each subsequent token, the model considers the results of the tokens with 'top_p' probability mass. We recommend either altering 'top_k' or 'top_p', but not both", 40 | default=None, 41 | ) 42 | 43 | top_k: Optional[int] = Field( 44 | description="The number of tokens to keep for highest 'top-k' filtering, specified as an integer between 0 and 2048 inclusive. If set to 0, 'top-k' filtering is disabled. We recommend either altering 'top_k' or 'top_p', but not both", 45 | default=None, 46 | ) 47 | 48 | stream: Optional[bool] = Field( 49 | description="Determines whether or not to incrementally stream the response with server-sent events with 'content-type: text/event-stream'", 50 | default=True, 51 | ) 52 | 53 | presence_penalty: Optional[float] = Field( 54 | description="A value between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. Incompatible with 'frequency_penalty'", 55 | default=None, 56 | ) 57 | 58 | frequency_penalty: Optional[float] = Field( 59 | description="A multiplicative penalty greater than 0. Values greater than 1.0 penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. A value of 1.0 means no penalty. Incompatible with 'presence_penalty'", 60 | default=None, 61 | ) 62 | 63 | search_recency_filter: Optional[str] = Field( 64 | description="Filter search results by time period. Options include 'day', 'week', 'month', 'hour', or 'none'. Only applicable for online models.", 65 | default=None, 66 | ) 67 | 68 | search_domain_filter: Optional[str] = Field( 69 | description="Filter search results by domain. Provide a comma-separated list of domains to include. Only applicable for online models.", 70 | default=None, 71 | ) 72 | 73 | return_related_questions: Optional[bool] = Field( 74 | description="Whether to return related questions in the response.", 75 | default=False, 76 | ) 77 | 78 | image_path: Optional[str] = Field( 79 | description="Path to an image file to include in the request. The image will be encoded as base64 and sent along with the text prompt.", 80 | default=None, 81 | ) 82 | 83 | @field_validator("temperature") 84 | @classmethod 85 | def validate_temperature(cls, temperature): 86 | if not (0.0 <= temperature < 2.0): 87 | raise ValueError("temperature must be in range 0-2") 88 | return temperature 89 | 90 | @field_validator("top_p") 91 | @classmethod 92 | def validate_top_p(cls, top_p): 93 | if top_p is not None and not (0.0 <= top_p <= 1.0): 94 | raise ValueError("top_p must be in range 0.0-1.0") 95 | return top_p 96 | 97 | @field_validator("top_k") 98 | @classmethod 99 | def validate_top_k(cls, top_k): 100 | if top_k is not None and top_k <= 0 or top_k > 2048: 101 | raise ValueError("top_k must be in range 0-2048") 102 | return top_k 103 | 104 | @field_validator("search_recency_filter") 105 | @classmethod 106 | def validate_search_recency_filter(cls, recency_filter): 107 | if recency_filter is not None and recency_filter not in ["day", "week", "month", "hour", "none"]: 108 | raise ValueError("search_recency_filter must be one of: 'day', 'week', 'month', 'hour', or 'none'") 109 | return recency_filter 110 | 111 | @field_validator("search_domain_filter") 112 | @classmethod 113 | def validate_search_domain_filter(cls, domain_filter): 114 | if domain_filter is not None: 115 | domains = [d.strip() for d in domain_filter.split(",")] 116 | if not all(d and "." in d for d in domains): 117 | raise ValueError("search_domain_filter must be a comma-separated list of valid domains") 118 | return domain_filter 119 | 120 | @model_validator(mode="after") 121 | def validate_temperature_top_p(self): 122 | if self.temperature != 1.0 and self.top_p is not None: 123 | raise ValueError("Only one of temperature and top_p can be set") 124 | return self 125 | 126 | 127 | class Perplexity(llm.Model): 128 | needs_key = "perplexity" 129 | key_env_var = "LLM_PERPLEXITY_KEY" 130 | model_id = "perplexity" 131 | can_stream = True 132 | base_url = "https://api.perplexity.ai" 133 | 134 | class Options(PerplexityOptions): 135 | use_openrouter: Optional[bool] = Field( 136 | description="Whether to use OpenRouter API instead of direct Perplexity API", 137 | default=False, 138 | ) 139 | 140 | def __init__(self, model_id, capabilities: Optional[Dict] = None): 141 | self.model_id = model_id 142 | self.capabilities = capabilities or {} 143 | 144 | @staticmethod 145 | def combine_chunks(chunks: List) -> dict: 146 | content = "" 147 | role = None 148 | finish_reason = None 149 | # If any of them have log probability, we're going to persist 150 | # those later on 151 | logprobs = [] 152 | usage = {} 153 | citations = {} 154 | 155 | for item in chunks: 156 | if hasattr(item, "usage") and item.usage: 157 | usage = item.usage.model_dump() 158 | 159 | if hasattr(item, "citations") and item.citations: 160 | # Store citations for later processing 161 | citations = item.citations 162 | 163 | for choice in item.choices: 164 | if choice.logprobs and hasattr(choice.logprobs, "top_logprobs"): 165 | logprobs.append( 166 | { 167 | "text": choice.text if hasattr(choice, "text") else None, 168 | "top_logprobs": choice.logprobs.top_logprobs, 169 | } 170 | ) 171 | 172 | if not hasattr(choice, "delta"): 173 | content += choice.text 174 | continue 175 | role = choice.delta.role 176 | if choice.delta.content is not None: 177 | content += choice.delta.content 178 | if choice.finish_reason is not None: 179 | finish_reason = choice.finish_reason 180 | 181 | 182 | # Imitations of the OpenAI API may be missing some of these fields 183 | combined = { 184 | "content": content, 185 | "role": role, 186 | "finish_reason": finish_reason, 187 | "usage": usage, 188 | "citations": citations, 189 | } 190 | if logprobs: 191 | combined["logprobs"] = logprobs 192 | if chunks: 193 | for key in ("id", "object", "model", "created", "index"): 194 | value = getattr(chunks[0], key, None) 195 | if value is not None: 196 | combined[key] = value 197 | 198 | return combined 199 | 200 | def build_messages(self, prompt, conversation) -> List[dict]: 201 | messages = [] 202 | if prompt.system: 203 | messages.append({"role": "system", "content": prompt.system}) 204 | if conversation: 205 | for response in conversation.responses: 206 | messages.extend( 207 | [ 208 | { 209 | "role": "user", 210 | "content": response.prompt.prompt, 211 | }, 212 | {"role": "assistant", "content": response.text()}, 213 | ] 214 | ) 215 | 216 | # Handle multi-modal input (text + image) 217 | if prompt.options.image_path: 218 | import base64 219 | import os 220 | import mimetypes 221 | 222 | # Get mime type based on file extension 223 | image_path = prompt.options.image_path 224 | mime_type, _ = mimetypes.guess_type(image_path) 225 | if not mime_type or not mime_type.startswith('image/'): 226 | mime_type = 'image/png' # Default if we can't determine 227 | 228 | # Read and encode the image 229 | try: 230 | with open(image_path, 'rb') as img_file: 231 | encoded_image = base64.b64encode(img_file.read()).decode('utf-8') 232 | 233 | # Create message with both text and image 234 | messages.append({ 235 | "role": "user", 236 | "content": [ 237 | { 238 | "type": "text", 239 | "text": prompt.prompt 240 | }, 241 | { 242 | "type": "image_url", 243 | "image_url": { 244 | "url": f"data:{mime_type};base64,{encoded_image}" 245 | } 246 | } 247 | ] 248 | }) 249 | except Exception as e: 250 | raise llm.ModelError(f"Error processing image: {str(e)}") 251 | else: 252 | # Standard text-only message 253 | messages.append({"role": "user", "content": prompt.prompt}) 254 | 255 | return messages 256 | 257 | def set_usage(self, response, usage): 258 | if not usage: 259 | return 260 | 261 | input_tokens = usage.pop("prompt_tokens") 262 | output_tokens = usage.pop("completion_tokens") 263 | usage.pop("total_tokens") 264 | response.set_usage( 265 | input=input_tokens, output=output_tokens, details=simplify_usage_dict(usage) 266 | ) 267 | 268 | @staticmethod 269 | def format_citations(citations, prefix="\n\n## Citations:\n") -> str: 270 | if not citations: 271 | return "" 272 | 273 | formatted = prefix 274 | for i, citation in enumerate(citations, 1): 275 | if isinstance(citation, dict) and "url" in citation: 276 | citation_text = citation["url"] 277 | if "title" in citation: 278 | citation_text = f"{citation['title']} - {citation_text}" 279 | formatted += f"[{i}] {citation_text}\n" 280 | else: 281 | formatted += f"[{i}] {citation}\n" 282 | return formatted 283 | 284 | def execute(self, prompt, stream, response, conversation): 285 | if prompt.options.use_openrouter: 286 | if not any(p["name"] == "llm-openrouter" for p in llm.get_plugins()): 287 | raise llm.ModelError( 288 | "OpenRouter support requires the llm-openrouter plugin. " 289 | "Install it with: llm install llm-openrouter" 290 | ) 291 | api_key = llm.get_key("openrouter", "LLM_OPENROUTER_KEY") 292 | base_url = "https://openrouter.ai/api/v1" 293 | model_id = ( 294 | f"meta-llama/llama-3.3-70b-instruct" 295 | if self.model_id == "llama-3.3-70b-instruct" 296 | else f"perplexity/{self.model_id}" 297 | ) 298 | else: 299 | api_key = self.get_key() 300 | base_url = self.base_url 301 | model_id = self.model_id 302 | 303 | client = OpenAI(api_key=api_key, base_url=base_url) 304 | 305 | kwargs = { 306 | "model": model_id, 307 | "messages": self.build_messages(prompt, conversation), 308 | "stream": stream, 309 | "max_tokens": prompt.options.max_tokens or None, 310 | } 311 | 312 | if prompt.options.top_p: 313 | kwargs["top_p"] = prompt.options.top_p 314 | else: 315 | kwargs["temperature"] = prompt.options.temperature 316 | 317 | if prompt.options.top_k: 318 | kwargs["top_k"] = prompt.options.top_k 319 | 320 | # Add search parameters for online models 321 | if prompt.options.search_recency_filter and self.capabilities.get("web_search"): 322 | kwargs["search_recency_filter"] = prompt.options.search_recency_filter 323 | 324 | if prompt.options.search_domain_filter and self.capabilities.get("web_search"): 325 | # Validate non-empty domain filter 326 | domains = [d.strip() for d in prompt.options.search_domain_filter.split(",") if d.strip()] 327 | if domains: 328 | kwargs["search_domain_filter"] = ",".join(domains) 329 | 330 | # Add options for return values 331 | if prompt.options.return_related_questions: 332 | kwargs["return_related_questions"] = prompt.options.return_related_questions 333 | 334 | if stream: 335 | completion = client.chat.completions.create(**kwargs) 336 | chunks = [] 337 | usage = None 338 | citations = None 339 | 340 | for chunk in completion: 341 | chunks.append(chunk) 342 | if hasattr(chunk, "usage") and chunk.usage: 343 | usage = chunk.usage.model_dump() 344 | if hasattr(chunk, "citations") and chunk.citations: 345 | citations = chunk.citations 346 | try: 347 | content = chunk.choices[0].delta.content 348 | except IndexError: 349 | content = None 350 | if content is not None: 351 | yield content 352 | response.response_json = remove_dict_none_values(Perplexity.combine_chunks(chunks)) 353 | 354 | if citations: 355 | yield self.format_citations(citations) 356 | 357 | else: 358 | completion = client.chat.completions.create(**kwargs) 359 | response.response_json = remove_dict_none_values(completion.model_dump()) 360 | usage = completion.usage.model_dump() 361 | yield completion.choices[0].message.content 362 | if hasattr(completion, "citations") and completion.citations: 363 | yield self.format_citations(completion.citations) 364 | self.set_usage(response, usage) 365 | response._prompt_json = {"messages": kwargs["messages"]} 366 | 367 | def __str__(self): 368 | return f"Perplexity: {self.model_id}" 369 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "llm-perplexity" 7 | version = "2025.4.1" 8 | description = "LLM access to pplx-api 3 by Perplexity Labs" 9 | readme = "README.md" 10 | authors = [{name = "hex"}] 11 | license = {text = "Apache-2.0"} 12 | dependencies = [ 13 | "llm", 14 | "openai>=1.0.0", 15 | ] 16 | requires-python = ">=3.7" 17 | 18 | [project.optional-dependencies] 19 | test = [ 20 | "pytest>=7.0.0", 21 | "python-dotenv>=1.0.0", 22 | "pillow>=9.0.0", 23 | ] 24 | 25 | [project.urls] 26 | Homepage = "https://github.com/hex/llm-perplexity" 27 | Changelog = "https://github.com/hex/llm-perplexity/releases" 28 | 29 | [project.entry-points.llm] 30 | perplexity = "llm_perplexity" 31 | 32 | [tool.setuptools] 33 | packages = [] 34 | py-modules = ["llm_perplexity"] 35 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | testpaths = test_llm_perplexity.py 3 | python_files = test_*.py 4 | python_classes = Test* 5 | python_functions = test_* 6 | timeout = 60 7 | timeout_method = thread 8 | -------------------------------------------------------------------------------- /run_selective_tests.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Script to selectively run pytest tests for models that are available with the current API key. 4 | """ 5 | import os 6 | import sys 7 | import subprocess 8 | import dotenv 9 | import llm 10 | 11 | # Load environment variables from .env file 12 | dotenv.load_dotenv() 13 | 14 | # Check if API key is set 15 | api_key = os.environ.get("LLM_PERPLEXITY_KEY") 16 | if not api_key: 17 | print("Error: LLM_PERPLEXITY_KEY environment variable not set.") 18 | print("Please set it in your .env file or export it in your shell.") 19 | sys.exit(1) 20 | 21 | print("Checking model availability...") 22 | working_models = [] 23 | 24 | # Models to check 25 | ALL_MODELS = [ 26 | "sonar-pro", 27 | "sonar", 28 | "sonar-pro-online", 29 | "sonar-deep-research", 30 | "sonar-reasoning-pro", 31 | "sonar-reasoning", 32 | "r1-1776" 33 | ] 34 | 35 | # Check each model by attempting a simple prompt 36 | for model_id in ALL_MODELS: 37 | print(f"Checking {model_id}...", end="", flush=True) 38 | try: 39 | model = llm.get_model(model_id) 40 | # Simple ping with minimal token usage and a short timeout 41 | response = model.prompt("Hi", stream=False) 42 | text = response.text().strip() 43 | if text: 44 | print(" ✅ Working") 45 | working_models.append(model_id) 46 | else: 47 | print(" ❌ No response") 48 | except Exception as e: 49 | print(f" ❌ Error: {str(e)}") 50 | 51 | if not working_models: 52 | print("\nNo working models found. Please check your API key permissions.") 53 | sys.exit(1) 54 | 55 | # Build pytest expression to run tests only for working models 56 | expressions = [] 57 | 58 | # Standard models test 59 | standard_models = list(set(working_models) & set(["sonar-pro", "sonar-small", "sonar-medium", "sonar"])) 60 | if standard_models: 61 | patterns = [f"test_standard_models[{model}-" for model in standard_models] 62 | expressions.extend(patterns) 63 | 64 | # Online models test - only if we have online models working 65 | online_models = list(set(working_models) & set(["sonar-pro-online", "sonar-small-online", "sonar-medium-online"])) 66 | if online_models: 67 | patterns = [f"test_online_models_and_filters[{model}-" for model in online_models] 68 | expressions.extend(patterns) 69 | 70 | # Other models test 71 | other_models = list(set(working_models) & set([ 72 | "sonar-deep-research", "sonar-reasoning-pro", "sonar-reasoning", 73 | "mistral-7b", "codellama-34b", "llama-2-70b", "r1-1776" 74 | ])) 75 | if other_models: 76 | patterns = [f"test_other_models[{model}-" for model in other_models] 77 | expressions.extend(patterns) 78 | 79 | # Add the remaining tests that don't depend on specific models 80 | if "sonar-pro" in working_models: 81 | expressions.extend([ 82 | "test_advanced_options", 83 | "test_image_analysis", 84 | "test_streaming_response", 85 | "test_invalid_options", 86 | ]) 87 | 88 | if "sonar-pro-online" in working_models: 89 | expressions.append("test_citations_handling") 90 | 91 | # Check if OpenRouter key is set and add that test if so 92 | if os.environ.get("LLM_OPENROUTER_KEY") and "sonar-small" in working_models: 93 | expressions.append("test_openrouter_access") 94 | 95 | # Build the pytest command 96 | pytest_expression = " or ".join(expressions) 97 | command = f"python -m pytest test_llm_perplexity.py::'{pytest_expression}' -v" 98 | 99 | print("\nRunning selective tests for working models:") 100 | print(f"Working models: {', '.join(working_models)}") 101 | print(f"Command: {command}\n") 102 | 103 | # Run the pytest command 104 | subprocess.run(command, shell=True) -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup script for llm-perplexity testing 3 | 4 | echo "Installing llm-perplexity in development mode..." 5 | pip install -e . 6 | 7 | echo "Installing test dependencies..." 8 | pip install pytest python-dotenv pillow 9 | 10 | echo "Setup complete! You can now run the tests with:" 11 | echo "pytest test_llm_perplexity.py" 12 | echo "" 13 | echo "To run specific tests:" 14 | echo "pytest test_llm_perplexity.py::test_standard_models" 15 | echo "pytest test_llm_perplexity.py::test_invalid_options" 16 | -------------------------------------------------------------------------------- /test_llm_perplexity.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | import llm 4 | import tempfile 5 | from pathlib import Path 6 | from PIL import Image, ImageDraw 7 | import dotenv 8 | import traceback 9 | import sys 10 | 11 | # Load environment variables from .env file 12 | dotenv.load_dotenv() 13 | 14 | # Skip tests if the API key isn't set 15 | requires_api_key = pytest.mark.skipif( 16 | not os.environ.get("LLM_PERPLEXITY_KEY"), 17 | reason="LLM_PERPLEXITY_KEY environment variable not set", 18 | ) 19 | 20 | @pytest.fixture 21 | def temp_image(): 22 | """Create a temporary test image with text for image-based tests.""" 23 | with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp: 24 | # Create a simple test image with text 25 | img = Image.new('RGB', (300, 200), color='white') 26 | draw = ImageDraw.Draw(img) 27 | 28 | # Add some text to the image 29 | draw.text((50, 80), "Hello Perplexity!", fill="black") 30 | img.save(tmp.name) 31 | tmp_path = tmp.name 32 | 33 | yield tmp_path 34 | 35 | # Clean up the temp file 36 | os.unlink(tmp_path) 37 | 38 | # Test standard models with parameterization 39 | @requires_api_key 40 | @pytest.mark.parametrize( 41 | "model_id,prompt", 42 | [ 43 | ("sonar-pro", "Fun facts about AI"), 44 | ("sonar", "Fun facts about walruses"), 45 | ], 46 | ) 47 | def test_standard_models(model_id, prompt): 48 | """Test standard models listed in the README.""" 49 | try: 50 | print(f"\nTesting model: {model_id}") 51 | model = llm.get_model(model_id) 52 | print(f"Model instance: {model}") 53 | response = model.prompt(prompt, stream=False) 54 | assert response is not None 55 | assert len(response.text()) > 0 56 | print(f"Test passed for {model_id}") 57 | except Exception as e: 58 | print(f"Error testing {model_id}: {str(e)}") 59 | traceback.print_exc(file=sys.stdout) 60 | raise 61 | 62 | # Test online models and search filters 63 | @requires_api_key 64 | @pytest.mark.parametrize( 65 | "model_id,prompt,options", 66 | [ 67 | # Basic online model 68 | ("sonar-pro-online", "Latest AI research in 2025", {}), 69 | ], 70 | ) 71 | def test_online_models_and_filters(model_id, prompt, options): 72 | """Test online models with various search filters.""" 73 | try: 74 | print(f"\nTesting online model: {model_id} with options: {options}") 75 | model = llm.get_model(model_id) 76 | print(f"Model instance: {model}") 77 | response = model.prompt(prompt, stream=False, **options) 78 | assert response is not None 79 | assert len(response.text()) > 0 80 | print(f"Test passed for {model_id} with options: {options}") 81 | except Exception as e: 82 | print(f"Error testing {model_id} with options {options}: {str(e)}") 83 | traceback.print_exc(file=sys.stdout) 84 | raise 85 | 86 | # Test other models with parameterization 87 | @requires_api_key 88 | @pytest.mark.parametrize( 89 | "model_id,prompt", 90 | [ 91 | # Test one of each category to minimize API costs 92 | ("sonar-deep-research", "Complex research question"), 93 | ("sonar-reasoning-pro", "Problem solving task"), 94 | ("sonar-reasoning", "Logical reasoning"), 95 | ("r1-1776", "Fun facts about seals"), 96 | ], 97 | ) 98 | def test_other_models(model_id, prompt): 99 | """Test other models mentioned in the README.""" 100 | try: 101 | print(f"\nTesting other model: {model_id}") 102 | model = llm.get_model(model_id) 103 | print(f"Model instance: {model}") 104 | # Add a timeout to prevent hanging 105 | response = model.prompt(prompt, stream=False) 106 | assert response is not None 107 | assert len(response.text()) > 0 108 | print(f"Test passed for {model_id}") 109 | except Exception as e: 110 | print(f"Error testing {model_id}: {str(e)}") 111 | traceback.print_exc(file=sys.stdout) 112 | raise 113 | 114 | # Test advanced options with parameterization 115 | @requires_api_key 116 | @pytest.mark.parametrize( 117 | "options,prompt", 118 | [ 119 | ({"temperature": 0.7}, "Generate creative ideas"), 120 | ({"top_p": 0.9}, "Generate varied responses"), 121 | ({"top_k": 40}, "Generate focused content"), 122 | ({"max_tokens": 100}, "Summarize this article"), 123 | ({"return_related_questions": True}, "How does quantum computing work?"), 124 | ], 125 | ) 126 | def test_advanced_options(options, prompt): 127 | """Test advanced options mentioned in the README.""" 128 | try: 129 | print(f"\nTesting advanced options: {options}") 130 | model = llm.get_model("sonar-pro") 131 | print(f"Model instance: {model}") 132 | response = model.prompt(prompt, stream=False, **options) 133 | assert response is not None 134 | assert len(response.text()) > 0 135 | print(f"Test passed with options: {options}") 136 | 137 | # For max_tokens, check that response isn't too long 138 | if "max_tokens" in options: 139 | assert len(response.text()) < 2000 # Rough check that max_tokens is working 140 | except Exception as e: 141 | print(f"Error testing with options {options}: {str(e)}") 142 | traceback.print_exc(file=sys.stdout) 143 | raise 144 | 145 | @requires_api_key 146 | def test_image_analysis(temp_image): 147 | """Test analyzing an image.""" 148 | try: 149 | print(f"\nTesting image analysis") 150 | model = llm.get_model("sonar-pro") 151 | print(f"Model instance: {model}") 152 | 153 | # Create a model-specific prompt 154 | prompt = llm.Prompt( 155 | "What can you tell me about this image?", 156 | model=model, 157 | options={"image_path": temp_image} 158 | ) 159 | response = model.execute(prompt) 160 | 161 | assert response is not None 162 | assert len(response.text()) > 0 163 | 164 | # The model should mention text or something about the image 165 | text = response.text().lower() 166 | assert any(word in text for word in ["text", "image", "white", "hello", "perplexity"]) 167 | print(f"Image analysis test passed") 168 | except Exception as e: 169 | print(f"Error testing image analysis: {str(e)}") 170 | traceback.print_exc(file=sys.stdout) 171 | raise 172 | 173 | @requires_api_key 174 | def test_streaming_response(): 175 | """Test streaming responses.""" 176 | try: 177 | print(f"\nTesting streaming response") 178 | model = llm.get_model("sonar-pro") 179 | print(f"Model instance: {model}") 180 | response = model.prompt("Tell me about streaming data", stream=True) 181 | chunks = list(response) # Consume the stream by converting the iterator to a list 182 | 183 | assert len(chunks) > 0 184 | assert "".join(chunks) # Make sure we got some content 185 | print(f"Streaming test passed") 186 | except Exception as e: 187 | print(f"Error testing streaming: {str(e)}") 188 | traceback.print_exc(file=sys.stdout) 189 | raise 190 | 191 | # Test error handling for invalid options 192 | @requires_api_key 193 | @pytest.mark.parametrize( 194 | "option_name,invalid_value,expected_exception", 195 | [ 196 | ("temperature", 3.0, ValueError), # Outside valid range 197 | ("top_p", 2.0, ValueError), # Outside valid range 198 | ("top_k", 3000, ValueError), # Outside valid range 199 | ("search_recency_filter", "invalid_filter", ValueError), # Invalid value 200 | ], 201 | ) 202 | def test_invalid_options(option_name, invalid_value, expected_exception): 203 | """Test error handling for invalid option values.""" 204 | try: 205 | print(f"\nTesting invalid option: {option_name}={invalid_value}") 206 | model = llm.get_model("sonar-pro") 207 | print(f"Model instance: {model}") 208 | with pytest.raises(expected_exception): 209 | # Create a prompt object with the invalid option 210 | prompt = llm.Prompt( 211 | "This should fail", 212 | model=model, 213 | options={option_name: invalid_value} 214 | ) 215 | model.execute(prompt) 216 | print(f"Invalid option test passed for {option_name}={invalid_value}") 217 | except Exception as e: 218 | print(f"Error testing invalid option {option_name}={invalid_value}: {str(e)}") 219 | traceback.print_exc(file=sys.stdout) 220 | raise 221 | 222 | @requires_api_key 223 | def test_citations_handling(): 224 | """Test that citations are properly handled for online models.""" 225 | try: 226 | print(f"\nTesting citations handling") 227 | model = llm.get_model("sonar-pro-online") 228 | print(f"Model instance: {model}") 229 | response = model.prompt( 230 | "What are the latest developments in generative AI?", 231 | stream=False 232 | ) 233 | assert response is not None 234 | text = response.text() 235 | assert len(text) > 0 236 | print(f"Citations test passed") 237 | except Exception as e: 238 | print(f"Error testing citations: {str(e)}") 239 | traceback.print_exc(file=sys.stdout) 240 | raise 241 | 242 | # Only run OpenRouter test if the key is set 243 | @pytest.mark.skipif( 244 | not os.environ.get("LLM_OPENROUTER_KEY"), 245 | reason="LLM_OPENROUTER_KEY environment variable not set", 246 | ) 247 | def test_openrouter_access(): 248 | """Test accessing models through OpenRouter.""" 249 | try: 250 | print(f"\nTesting OpenRouter access") 251 | model = llm.get_model("sonar-small") 252 | print(f"Model instance: {model}") 253 | 254 | # Create a model-specific prompt 255 | prompt = llm.Prompt( 256 | "Fun facts about pelicans", 257 | model=model, 258 | options={"use_openrouter": True} 259 | ) 260 | response = model.execute(prompt) 261 | 262 | assert response is not None 263 | assert len(response.text()) > 0 264 | print(f"OpenRouter test passed") 265 | except Exception as e: 266 | print(f"Error testing OpenRouter: {str(e)}") 267 | traceback.print_exc(file=sys.stdout) 268 | raise 269 | --------------------------------------------------------------------------------