├── examples ├── images │ ├── image.png │ └── upscaled.png ├── search_images.py ├── bard.py ├── downloaders.py ├── image_reverse.py ├── async_search_images.py ├── gpt.py ├── llama.py ├── mistral.py ├── async_downloaders.py ├── async_image_reverse.py ├── async_bard.py ├── gemini.py ├── upscale.py ├── async_gpt.py ├── async_llama.py ├── async_gemini.py ├── async_mistral.py ├── async_upscale.py ├── models.py ├── generate.py ├── antinsfw.py ├── gemini_vision.py ├── async_antinsfw.py ├── async_gemini_vision.py └── async_generate.py ├── lexica ├── utils.py ├── __init__.py ├── constants.py ├── core.py └── core_async.py ├── LICENSE ├── .github └── workflows │ └── python-publish.yml ├── setup.py ├── README.md └── .gitignore /examples/images/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Qewertyy/LexicaAPI/HEAD/examples/images/image.png -------------------------------------------------------------------------------- /examples/images/upscaled.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Qewertyy/LexicaAPI/HEAD/examples/images/upscaled.png -------------------------------------------------------------------------------- /lexica/utils.py: -------------------------------------------------------------------------------- 1 | def clean_dict(value: dict) -> dict: 2 | return {key: value for key, value in value.items() if value is not None} if value else {} -------------------------------------------------------------------------------- /examples/search_images.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | from lexica import Client 4 | 5 | def main(query: str) -> dict: 6 | client = Client() 7 | response = client.SearchImages(query) 8 | return response 9 | 10 | if __name__ == "__main__": 11 | print(main("akeno himejima")) -------------------------------------------------------------------------------- /examples/bard.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | from lexica import Client, languageModels 4 | 5 | def main(prompt: str) -> dict: 6 | client = Client() 7 | response = client.ChatCompletion(prompt,languageModels.bard) 8 | return response 9 | 10 | if __name__ == "__main__": 11 | print(main("hello, who are you?")) -------------------------------------------------------------------------------- /examples/downloaders.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | from lexica import Client 4 | 5 | def main(url: str) -> dict: 6 | client = Client() 7 | response = client.MediaDownloaders("instagram",url) 8 | return response 9 | 10 | if __name__ == "__main__": 11 | print(main("https://www.instagram.com/p/Cz2HsJ5NRDf/")) -------------------------------------------------------------------------------- /examples/image_reverse.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | from lexica import Client 4 | 5 | def main(imgUrl: str) -> dict: 6 | client = Client() 7 | response = client.ImageReverse(imgUrl) 8 | return response 9 | 10 | if __name__ == "__main__": 11 | print(main("https://graph.org/file/abd8ff7611c2af0108b3d.jpg")) -------------------------------------------------------------------------------- /lexica/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 Qewertyy, MIT License 2 | 3 | from lexica.core import Client 4 | from lexica.core_async import AsyncClient 5 | from lexica.constants import languageModels, Messages 6 | 7 | __all__ = ["Client", "AsyncClient", "languageModels", "Messages"] 8 | 9 | __version__ = "1.6.0" 10 | __author__ = "Qewertyy " 11 | -------------------------------------------------------------------------------- /examples/async_search_images.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | from lexica import AsyncClient 4 | import asyncio 5 | 6 | async def main(query: str) -> dict: 7 | client = AsyncClient() 8 | response = await client.SearchImages(query) 9 | return response 10 | 11 | if __name__ == "__main__": 12 | print(asyncio.run(main("akeno himejima"))) -------------------------------------------------------------------------------- /examples/gpt.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | from lexica import Client, languageModels, Messages 4 | 5 | def main(prompt: str) -> dict: 6 | client = Client() 7 | response = client.ChatCompletion([Messages(content=prompt,role="user")],languageModels.gpt) 8 | return response 9 | 10 | if __name__ == "__main__": 11 | print(main("hello, who are you?")) -------------------------------------------------------------------------------- /examples/llama.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | from lexica import Client,languageModels, Messages 4 | 5 | def main(prompt: str) -> dict: 6 | client = Client() 7 | response = client.ChatCompletion([Messages(content=prompt,role="user")],languageModels.llama) 8 | return response 9 | 10 | if __name__ == "__main__": 11 | print(main("hello, who are you?")) -------------------------------------------------------------------------------- /examples/mistral.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | from lexica import Client,languageModels, Messages 4 | 5 | def main(prompt: str) -> dict: 6 | client = Client() 7 | response = client.ChatCompletion([Messages(content=prompt,role="user")],languageModels.mistral) 8 | return response 9 | 10 | if __name__ == "__main__": 11 | print(main("hello, who are you?")) -------------------------------------------------------------------------------- /examples/async_downloaders.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | from lexica import AsyncClient 4 | import asyncio 5 | 6 | async def main(url: str) -> dict: 7 | client = AsyncClient() 8 | response = await client.MediaDownloaders("instagram",url) 9 | return response 10 | 11 | if __name__ == "__main__": 12 | print(asyncio.run(main("https://www.instagram.com/p/Cz2HsJ5NRDf/"))) -------------------------------------------------------------------------------- /examples/async_image_reverse.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | from lexica import AsyncClient 4 | import asyncio 5 | 6 | async def main(imgUrl: str) -> dict: 7 | client = AsyncClient() 8 | response = await client.ImageReverse(imgUrl) 9 | return response 10 | 11 | if __name__ == "__main__": 12 | print(asyncio.run(main("https://graph.org/file/abd8ff7611c2af0108b3d.jpg"))) -------------------------------------------------------------------------------- /examples/async_bard.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | import asyncio 4 | from lexica import AsyncClient, languageModels 5 | 6 | async def async_main(prompt: str) -> dict: 7 | client = AsyncClient() 8 | response = await client.ChatCompletion(prompt,languageModels.bard) 9 | return response 10 | 11 | if __name__ == "__main__": 12 | print(asyncio.run(async_main("hello, who are you?"))) -------------------------------------------------------------------------------- /examples/gemini.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | from lexica import Client,languageModels 4 | from lexica.constants import Messages 5 | 6 | def main(prompt: str) -> dict: 7 | client = Client() 8 | response = client.ChatCompletion([Messages(content=prompt,role="user")],languageModels.gemini) 9 | return response 10 | 11 | if __name__ == "__main__": 12 | print(main("hello, who are you?")) -------------------------------------------------------------------------------- /examples/upscale.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | from lexica import Client 4 | 5 | def main(image: bytes) -> bytes: 6 | client = Client() 7 | imageBytes = client.upscale(image) 8 | with open('examples/images/upscaled.png', 'wb') as f: 9 | f.write(imageBytes) 10 | 11 | if __name__ == "__main__": 12 | image = open('examples/images/image.png', 'rb').read() 13 | main(image) -------------------------------------------------------------------------------- /examples/async_gpt.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | import asyncio 4 | from lexica import AsyncClient,languageModels,Messages 5 | 6 | async def async_main(prompt: str) -> dict: 7 | client = AsyncClient() 8 | response = await client.ChatCompletion([Messages(content=prompt,role="user")],languageModels.gpt) 9 | return response 10 | 11 | if __name__ == "__main__": 12 | print(asyncio.run(async_main("hello, who are you?"))) -------------------------------------------------------------------------------- /examples/async_llama.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | import asyncio 4 | from lexica import AsyncClient, languageModels, Messages 5 | 6 | async def async_main(prompt: str) -> dict: 7 | client = AsyncClient() 8 | response = await client.ChatCompletion([Messages(content=prompt,role="user")],languageModels.llama) 9 | return response 10 | 11 | if __name__ == "__main__": 12 | print(asyncio.run(async_main("hello, who are you?"))) -------------------------------------------------------------------------------- /examples/async_gemini.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | import asyncio 4 | from lexica import AsyncClient, languageModels, Messages 5 | 6 | async def async_main(prompt: str) -> dict: 7 | client = AsyncClient() 8 | response = await client.ChatCompletion([Messages(content=prompt,role="user")],languageModels.gemini) 9 | return response 10 | 11 | if __name__ == "__main__": 12 | print(asyncio.run(async_main("hello, who are you?"))) -------------------------------------------------------------------------------- /examples/async_mistral.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | import asyncio 4 | from lexica import AsyncClient, languageModels, Messages 5 | 6 | async def async_main(prompt: str) -> dict: 7 | client = AsyncClient() 8 | response = await client.ChatCompletion([Messages(content=prompt,role="user")],languageModels.mistral) 9 | return response 10 | 11 | if __name__ == "__main__": 12 | print(asyncio.run(async_main("hello, who are you?"))) -------------------------------------------------------------------------------- /examples/async_upscale.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | from lexica import AsyncClient 4 | import asyncio 5 | 6 | async def async_main(image: bytes) -> bytes: 7 | client = AsyncClient() 8 | imageBytes = await client.upscale(image) 9 | await client.close() 10 | with open('upscaled.png', 'wb') as f: 11 | f.write(imageBytes) 12 | 13 | if __name__ == "__main__": 14 | image = open('examples/images/image.png', 'rb').read() 15 | asyncio.run(async_main(image)) -------------------------------------------------------------------------------- /examples/models.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | # async usage 4 | 5 | from lexica import AsyncClient 6 | import asyncio 7 | 8 | async def async_main() -> dict: 9 | client = AsyncClient() 10 | response = await client.models 11 | await client.close() 12 | return response 13 | 14 | # sync usage 15 | 16 | from lexica import Client 17 | 18 | def main() -> dict: 19 | client = Client() 20 | return client.models 21 | 22 | 23 | if __name__ == "__main__": 24 | #print(main()) 25 | print(asyncio.run(async_main())) -------------------------------------------------------------------------------- /examples/generate.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | import time 4 | from lexica import Client 5 | 6 | def main(model_id:int,prompt:str,negative_prompt:str) -> dict: 7 | client = Client() 8 | resp = client.generate(model_id,prompt,negative_prompt) 9 | print(resp) 10 | task_id = resp['task_id'] 11 | request_id = resp['request_id'] 12 | time.sleep(60) # sleep for period of time to allow the task to be processed 13 | response = client.getImages(task_id,request_id) 14 | return response 15 | 16 | 17 | if __name__ == "__main__": 18 | print(main(2,"1girl, white hair, purple eyes, portrait, realistic, towel, (onsen), sidelighting, wallpaper","nsfw")) -------------------------------------------------------------------------------- /examples/antinsfw.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | from lexica import Client 4 | 5 | def main(image_url: str) -> dict: 6 | client = Client() 7 | response = client.AntiNsfw(image_url,29) 8 | print(response) 9 | if 'sfw' in response['content'] and response['content']['sfw'] == True: 10 | return "This image is safe for work." 11 | elif 'isNsfw' in response['content']: 12 | if response['content']['isNsfw'] == True: 13 | return "This image is not safe for work." 14 | else: 15 | return "This image is safe for work." 16 | else: 17 | return "This image is not safe for work." 18 | 19 | if __name__ == "__main__": 20 | print(main("https://graph.org/file/de1888dd4fdfbc647c398.jpg")) -------------------------------------------------------------------------------- /examples/gemini_vision.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | import base64,mimetypes 4 | from lexica import Client,languageModels 5 | 6 | def main(prompt: str,images:list) -> dict: 7 | client = Client() 8 | imageInfo = [] 9 | for image in images: 10 | with open(image,"rb") as imageFile: 11 | data = base64.b64encode(imageFile.read()).decode("utf-8") 12 | mime_type,_= mimetypes.guess_type(image) 13 | imageInfo.append({ 14 | "data": data, 15 | "mime_type": mime_type 16 | }) 17 | response = client.ChatCompletion(prompt,languageModels.geminiVision,images=imageInfo) 18 | return response 19 | 20 | if __name__ == "__main__": 21 | print(main("what's this?",["./examples/images/image.png"])) -------------------------------------------------------------------------------- /examples/async_antinsfw.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | from lexica import AsyncClient 4 | import asyncio 5 | 6 | async def main(image_url: str) -> dict: 7 | client = AsyncClient() 8 | response = await client.AntiNsfw(image_url,29) 9 | await client.close() 10 | print(response) 11 | if 'sfw' in response['content'] and response['content']['sfw'] == True: 12 | return "This image is safe for work." 13 | elif 'isNsfw' in response['content']: 14 | if response['content']['isNsfw'] == True: 15 | return "This image is not safe for work." 16 | else: 17 | return "This image is safe for work." 18 | else: 19 | return "This image is not safe for work." 20 | 21 | if __name__ == "__main__": 22 | print(asyncio.run(main("https://graph.org/file/a642e642fe01a917fd5b5.jpg"))) -------------------------------------------------------------------------------- /examples/async_gemini_vision.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | import asyncio 4 | import base64 5 | import mimetypes 6 | from lexica import AsyncClient, languageModels 7 | 8 | async def async_main(prompt: str,images: list) -> dict: 9 | client = AsyncClient() 10 | imageInfo = [] 11 | for image in images: 12 | with open(image,"rb") as imageFile: 13 | data = base64.b64encode(imageFile.read()).decode("utf-8") 14 | mime_type,_= mimetypes.guess_type(image) 15 | imageInfo.append({ 16 | "data": data, 17 | "mime_type": mime_type 18 | }) 19 | response = await client.ChatCompletion(prompt,languageModels.geminiVision,images=imageInfo) 20 | return response 21 | 22 | if __name__ == "__main__": 23 | print(asyncio.run(async_main("whats this?",["./examples/images/image.png"]))) -------------------------------------------------------------------------------- /examples/async_generate.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | import asyncio 4 | from lexica import AsyncClient 5 | 6 | async def main(model_id:int,prompt:str,negative_prompt:str) -> dict: 7 | client = AsyncClient() 8 | resp = await client.generate(model_id,prompt,negative_prompt) 9 | print(resp) 10 | task_id = resp['task_id'] 11 | request_id = resp['request_id'] 12 | await asyncio.sleep(30) # sleep for period of time to allow the task to be processed 13 | response = await client.getImages(task_id,request_id) 14 | return response 15 | 16 | # if status is not "completed" then use this method to get the images 17 | 18 | async def getImages(task_id:str,request_id:str) -> dict: 19 | client = AsyncClient() 20 | response = await client.getImages(task_id,request_id) 21 | return response 22 | 23 | if __name__ == "__main__": 24 | asyncio.run(main(2,"1girl, white hair, purple eyes, portrait, realistic, towel,sidelighting, wallpaper","")) 25 | #print(asyncio.run(getImages("14248074744444933","f884f17b7b78856"))) # task id, request id -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Qewertyy 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /lexica/constants.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | import re, os 4 | from dataclasses import dataclass 5 | from typing import Literal 6 | 7 | BASE_URL = "https://lexica.qewertyy.dev" 8 | ALTERNATE_URL = "https://api.qewertyy.dev" 9 | 10 | dirpath = os.path.dirname(os.path.abspath(__file__)) 11 | with open(dirpath + "/__init__.py") as f: 12 | match = re.search(r"""^__version__ = ['"]([^'"]*)['"]""", f.read(), re.M) 13 | if not match: 14 | raise RuntimeError("__init__.py doesn't contain __version__") 15 | version = match.groups()[0] 16 | 17 | SESSION_HEADERS = { 18 | "Host": "lexica.qewertyy.dev", 19 | "User-Agent": f"Lexica/{version}", 20 | } 21 | 22 | 23 | class languageModels(object): 24 | gemma = {"modelId": 1, "name": "Gemma"} 25 | bard = {"modelId": 20, "name": "Bard"} 26 | mistral = {"modelId": 21, "name": "LLAMA 2"} 27 | llama = {"modelId": 18, "name": "LLAMA"} 28 | gpt = {"modelId": 5, "name": "ChatGPT"} 29 | gemini = {"modelId": 23, "name": "Gemini-Pro"} 30 | geminiVision = {"modelId": 24, "name": "Gemini-Pro-Vision"} 31 | openhermes = {"modelId": 27, "name": "OpenHermes"} 32 | 33 | 34 | @dataclass 35 | class Messages: 36 | content: str 37 | role: Literal["assistant", "user"] 38 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: sdk-openapi 10 | 11 | on: 12 | push: 13 | branches: 14 | - dev 15 | 16 | permissions: 17 | contents: read 18 | 19 | jobs: 20 | deploy: 21 | 22 | runs-on: ubuntu-latest 23 | 24 | steps: 25 | - uses: actions/checkout@v3 26 | - name: Set up Python 27 | uses: actions/setup-python@v3 28 | with: 29 | python-version: '3.x' 30 | - name: Install dependencies 31 | run: | 32 | python -m pip install --upgrade pip 33 | pip install build 34 | pip install requests 35 | pip install setuptools 36 | - name: Build package 37 | run: python -m build 38 | - name: Publish package 39 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 40 | with: 41 | user: __token__ 42 | password: ${{ secrets.PYPI_API_TOKEN }} 43 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 Qewertyy, MIT License 2 | 3 | from setuptools import setup,find_packages 4 | import re 5 | 6 | def get_version(): 7 | filename = "lexica/__init__.py" 8 | with open(filename) as f: 9 | match = re.search(r"""^__version__ = ['"]([^'"]*)['"]""", f.read(), re.M) 10 | if not match: 11 | raise RuntimeError("{} doesn't contain __version__".format(filename)) 12 | version = match.groups()[0] 13 | return version 14 | 15 | def get_long_description(): 16 | with open("README.md", encoding="UTF-8") as f: 17 | long_description = f.read() 18 | return long_description 19 | 20 | setup( 21 | name="lexica-api", 22 | version=get_version(), 23 | author="Qewertyy", 24 | author_email="Qewertyy.irl@gmail.com", 25 | description="The python package for api.qewertyy.dev", 26 | url="https://github.com/Qewertyy/LexicaAPI", 27 | python_requires=">=3.8", 28 | long_description=get_long_description(), 29 | long_description_content_type="text/markdown", 30 | packages=find_packages(), 31 | install_requires=[ 32 | "httpx[http2]" 33 | ], 34 | keywords="Python, API, Bard, Google Bard, Large Language Model, Chatbot API, Google API, Chatbot, Image Generations, Latent Diffusion, State of Art, Image Reverse Search, Reverse Image Search", 35 | classifiers=[ 36 | "Programming Language :: Python", 37 | "Programming Language :: Python :: 3.7", 38 | "Programming Language :: Python :: 3.8", 39 | ] 40 | ) 41 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Here are some projects which utilizes the LexicaAPI. 2 | 3 | ### AverageAI 4 | - **Name:** [AverageAI](https://ai.qewertyy.dev) 5 | - **Description:** Image Generations and LLMs. 6 | 7 | ### AverageImages 8 | - **Name:** [AverageImages](https://images.qewertyy.dev) 9 | - **Description:** Search Images on google and bing. 10 | 11 | ### Upscale 12 | - **Name:** [Upscale](https://upscale.qewertyy.dev) 13 | - **Description:** Upscale Images. 14 | 15 | ### AverageNews 16 | - **Name:** [AverageNews](https://news.qewertyy.dev) 17 | - **Description:** News App. 18 | 19 | ### Social-DL 20 | - **Name:** [Social-DL](https://social-dl.vercel.app) 21 | - **Description:** Download Videos/Images from social media. 22 | 23 | ### TelegramBots 24 | [Miko](https://github.com/Awesome-Tofu/miko-bot), [AntiNSFWBot](https://telegram.me/ProtectYourGroupsRobot), [Sung](https://github.com/Dhruv-Tara/Sung), [GameInfoBot](https://github.com/barryspacezero/Telegram-GameInfoBot), [YaeMiko](https://github.com/Infamous-Hydra/YaeMiko), [FilterBot](https://github.com/Codeflix-Bots/AutoFilter), [News](https://github.com/SOMEH1NG/TechNewsDigest) [etc..](https://github.com/search?q=https%3A%2F%2Fapi.qewertyy.dev&type=code) 25 | ## Usages 26 | LLM's 27 | ```python 28 | from lexica import Client, languageModels 29 | 30 | def main(prompt: str) -> dict: 31 | client = Client() 32 | response = client.palm(prompt,languageModels.gemini) 33 | return response 34 | 35 | if __name__ == "__main__": 36 | print(main("hello world")) 37 | ``` 38 | 39 | Upscale an image. 40 | ```python 41 | from lexica import Client 42 | 43 | def main(image: bytes) -> bytes: 44 | client = Client() 45 | imageBytes = client.upscale(image) 46 | with open('upscaled.png', 'wb') as f: 47 | f.write(imageBytes) 48 | 49 | if __name__ == "__main__": 50 | image = open('examples/images/image.png', 'rb').read() 51 | main(image) 52 | ``` 53 | 54 | Anti-NSFW 55 | ```python 56 | from lexica import Client 57 | 58 | def main(image_url: str) -> dict: 59 | client = Client() 60 | response = client.AntiNsfw(image_url) 61 | if response['content']['sfw'] == True: 62 | return "This image is safe for work." 63 | else: 64 | return "This image is not safe for work." 65 | 66 | if __name__ == "__main__": 67 | print(main("https://graph.org/file/13e95c6cc932530823391.png")) 68 | ``` 69 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | .pypirc 162 | -------------------------------------------------------------------------------- /lexica/core.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | import base64, httpx 4 | from lexica.constants import * 5 | from lexica.utils import * 6 | from typing import Union, Dict, List 7 | 8 | 9 | class Client: 10 | """ 11 | Sync Client 12 | """ 13 | 14 | def __init__( 15 | self: "Client", 16 | ): 17 | """ 18 | Initialize the class 19 | """ 20 | self.url = BASE_URL 21 | self.session = httpx.Client(http2=True) 22 | self.timeout = 60 23 | self.headers = SESSION_HEADERS 24 | self.models = self.getModels() 25 | 26 | def _request(self: "Client", **kwargs) -> Union[Dict, bytes]: 27 | self.headers.update(kwargs.get("headers", {})) 28 | contents = {"json": {}, "data": {}, "files": {}} 29 | for i in list(contents): 30 | if i in kwargs: 31 | contents[i] = clean_dict(kwargs.get(i)) 32 | response = self.session.request( 33 | method=kwargs.get("method", "GET"), 34 | url=kwargs.get("url"), 35 | headers=self.headers, 36 | params=kwargs.get("params"), 37 | data=contents.get("data"), 38 | json=contents.get("json"), 39 | files=contents.get("files"), 40 | timeout=self.timeout, 41 | ) 42 | if response.status_code != 200: 43 | raise Exception(f"API error {response.text}") 44 | if response.headers.get("content-type") in [ 45 | "image/png", 46 | "image/jpeg", 47 | "image/jpg", 48 | ]: 49 | return response.content 50 | rdata = response.json() 51 | if rdata["code"] == 0: 52 | raise Exception(f"API error {response.text}") 53 | return rdata 54 | 55 | def getModels(self) -> dict: 56 | resp = self._request(url=f"{self.url}/models") 57 | return resp 58 | 59 | def ChatCompletion( 60 | self: "Client", 61 | messages: Union[List[Messages], str], 62 | model: dict = languageModels.gemini, 63 | **kwargs, 64 | ) -> dict: 65 | """ 66 | Get an answer from LLMs' for the given prompt 67 | Example: 68 | >>> client = Client() 69 | >>> response = client.ChatCompletion("Hello, Who are you?",0) 70 | >>> print(response) 71 | 72 | Args: 73 | prompt (str): Input text for the query. 74 | model (dict): Model dict of the LLM defaults to palm. 75 | 76 | Returns: 77 | dict: Answer from the API in the following format: 78 | { 79 | "message": str, 80 | "content": str, 81 | "code": int 82 | } 83 | """ 84 | model_id = model.get("modelId", 1) 85 | 86 | payload = {"model_id": model_id, **kwargs} 87 | 88 | if isinstance(messages, list) and all( 89 | isinstance(m, Messages) for m in messages 90 | ): 91 | if model_id in [20, 24]: 92 | payload["prompt"] = "\n".join( 93 | [m.content for m in messages if m.role == "user"] 94 | ) 95 | else: 96 | payload["messages"] = [ 97 | {"content": m.content, "role": m.role} for m in messages 98 | ] 99 | 100 | elif isinstance(messages, str): 101 | if model_id in [20, 24]: 102 | payload["prompt"] = messages 103 | else: 104 | payload["messages"] = [ 105 | {"content": "You are a helpful assistant", "role": "assistant"}, 106 | {"content": messages, "role": "user"}, 107 | ] 108 | else: 109 | raise ValueError( 110 | "Invalid input: messages must be a list of Messages or a string." 111 | ) 112 | 113 | resp = self._request( 114 | url=f"{self.url}/models", 115 | method="POST", 116 | json=payload, 117 | headers={"content-type": "application/json"}, 118 | ) 119 | return resp 120 | 121 | def upscale( 122 | self: "Client", 123 | image: bytes = None, 124 | image_url: str = None, 125 | format: str = "binary", 126 | ) -> bytes: 127 | """ 128 | Upscale an image 129 | Example: 130 | >>> client = Client() 131 | >>> response = client.upscale(image) 132 | >>> with open('upscaled.png', 'wb') as f: 133 | f.write(response) 134 | 135 | Args: 136 | image (bytes): Image in bytes. 137 | Returns: 138 | bytes: Upscaled image in bytes. 139 | """ 140 | payload = { 141 | "format": format, 142 | } 143 | if image and not image_url: 144 | payload.setdefault("image_data", base64.b64encode(image).decode("utf-8")) 145 | elif not image and not image_url: 146 | raise Exception("Either image or image_url is required") 147 | else: 148 | payload.setdefault("image_url", image_url) 149 | content = self._request(url=f"{self.url}/upscale", method="POST", json=payload) 150 | return content 151 | 152 | def generate( 153 | self: "Client", 154 | model_id: int, 155 | prompt: str, 156 | negative_prompt: str = "", 157 | images: int = 1, 158 | ) -> dict: 159 | """ 160 | Generate image from a prompt 161 | Example: 162 | >>> client = Client() 163 | >>> response = client.generate(model_id,prompt,negative_prompt) 164 | >>> print(response) 165 | 166 | Args: 167 | prompt (str): Input text for the query. 168 | negative_prompt (str): Input text for the query. 169 | 170 | Returns: 171 | dict: Answer from the API in the following format: 172 | { 173 | "message": str, 174 | "task_id": int, 175 | "request_id": str 176 | } 177 | """ 178 | payload = { 179 | "model_id": model_id, 180 | "prompt": prompt, 181 | "negative_prompt": negative_prompt, # optional 182 | "num_images": images, # optional number of images to generate (default: 1) and max 4 183 | } 184 | resp = self._request( 185 | url=f"{self.url}/models/inference", 186 | method="POST", 187 | json=payload, 188 | headers={"content-type": "application/json"}, 189 | ) 190 | return resp 191 | 192 | def getImages(self: "Client", task_id: str, request_id: str) -> dict: 193 | """ 194 | Generate image from a prompt 195 | Example: 196 | >>> client = Client() 197 | >>> response = client.getImages(task_id,request_id) 198 | >>> print(response) 199 | 200 | Args: 201 | prompt (str): Input text for the query. 202 | negative_prompt (str): Input text for the query. 203 | 204 | Returns: 205 | dict: Answer from the API in the following format: 206 | { 207 | "message": str, 208 | "img_urls": array, 209 | "code": int 210 | } 211 | """ 212 | payload = {"task_id": task_id, "request_id": request_id} 213 | resp = self._request( 214 | url=f"{self.url}/models/inference/task", 215 | method="POST", 216 | json=payload, 217 | headers={"content-type": "application/json"}, 218 | ) 219 | return resp 220 | 221 | def ImageReverse(self: "Client", imageUrl: str, engine: str = "goole") -> dict: 222 | """ 223 | Reverse search an image 224 | Example: 225 | >>> client = Client() 226 | >>> response = client.ImageReverse(imageUrl) 227 | >>> print(response) 228 | 229 | Args: 230 | imageUrl (str): url of the image for reverse search. 231 | 232 | Returns: 233 | dict: Answer from the API in the following format: 234 | { 235 | "message": str, 236 | "content": { 237 | "bestResults": array, 238 | "relatedContent": array, #optional 239 | "others": array #optional 240 | }, 241 | "code": int 242 | } 243 | """ 244 | resp = self._request( 245 | url=f"{self.url}/image-reverse/{engine}", 246 | method="POST", 247 | params={"img_url": imageUrl}, 248 | ) 249 | return resp 250 | 251 | def MediaDownloaders(self: "Client", platform: str, url: str): 252 | """ 253 | Returns with downloadable links for the given social media url 254 | Example: 255 | >>> client = Client() 256 | >>> response = client.MediaDownloaders(platform,url) 257 | >>> print(response) 258 | 259 | Args: 260 | platform (str): social media platform name. 261 | url (str): url of the post. 262 | 263 | Returns: 264 | dict: Answer from the API in the following format: 265 | { 266 | "message": str, 267 | "content": { 268 | "url": array, 269 | "mediaUrls": array, 270 | "by": str, 271 | "title": str, 272 | }, 273 | "code": int 274 | } 275 | """ 276 | resp = self._request( 277 | url=f"{self.url}/downloaders/{platform}", method="POST", params={"url": url} 278 | ) 279 | return resp 280 | 281 | def SearchImages( 282 | self: "Client", query: str, page: int = 0, engine: str = "google" 283 | ) -> dict: 284 | """ 285 | Search for images 286 | Example: 287 | >>> client = Client() 288 | >>> response = client.SearchImages(query) 289 | >>> print(response) 290 | 291 | Args: 292 | query (str): query to perform the search. 293 | 294 | Returns: 295 | dict: Answer from the API in the following format: 296 | { 297 | "message": str, 298 | "content": [], 299 | "code": int 300 | } 301 | """ 302 | resp = self._request( 303 | url=f"{self.url}/image-search/{engine}", 304 | method="POST", 305 | params={"query": query}, 306 | ) 307 | return resp 308 | 309 | def AntiNsfw(self: "Client", imageUrl: str, modelId: int = 28) -> dict: 310 | """ 311 | Check for an image if it is safe for work or not 312 | Example: 313 | >>> client = Client() 314 | >>> response = client.AntiNsfw(imageUrl) 315 | >>> print(response) 316 | 317 | Args: 318 | imageUrl (str): url of the image for reverse search. 319 | 320 | Returns: 321 | dict: Answer from the API in the following format: 322 | { 323 | "message": str, 324 | "content": { 325 | "sfw": bool #true if sfw (safe for work) else false 326 | }, 327 | "code": int 328 | } 329 | """ 330 | resp = self._request( 331 | url=f"{self.url}/anti-nsfw", 332 | method="POST", 333 | params={"img_url": imageUrl, "model_id": modelId}, 334 | ) 335 | return resp 336 | -------------------------------------------------------------------------------- /lexica/core_async.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Qewertyy, MIT License 2 | 3 | import base64 4 | from typing import Union, Dict, List 5 | from httpx import AsyncClient as AsyncHttpxClient 6 | from lexica.constants import * 7 | from lexica.utils import * 8 | 9 | 10 | class AsyncClient: 11 | """ 12 | Async Client 13 | """ 14 | 15 | def __init__( 16 | self: "AsyncClient", 17 | ): 18 | """ 19 | Initialize the class 20 | """ 21 | self.url = BASE_URL 22 | self.session = AsyncHttpxClient( 23 | http2=True, 24 | ) 25 | self.headers = SESSION_HEADERS 26 | self.timeout = 60 27 | # self.models = self.getModels() 28 | 29 | async def _request(self: "AsyncClient", **kwargs) -> Union[Dict, bytes]: 30 | self.headers.update(kwargs.get("headers", {})) 31 | contents = {"json": {}, "data": {}, "files": {}} 32 | # for i in list(contents): 33 | # if i in kwargs: 34 | # contents[i] = clean_dict(kwargs.get(i)) 35 | response = await self.session.request( 36 | method=kwargs.get("method", "GET"), 37 | url=kwargs.get("url"), 38 | headers=self.headers, 39 | content=kwargs.get("content"), 40 | params=kwargs.get("params"), 41 | data=kwargs.get("data"), 42 | json=kwargs.get("json"), 43 | files=kwargs.get("files"), 44 | timeout=self.timeout, 45 | ) 46 | if response.status_code != 200: 47 | raise Exception(f"API error {response.text}") 48 | if response.headers.get("content-type") in [ 49 | "image/png", 50 | "image/jpeg", 51 | "image/jpg", 52 | ]: 53 | return response.content 54 | rdata = response.json() 55 | if rdata["code"] == 0: 56 | raise Exception(f"API error {response.text}") 57 | return rdata 58 | 59 | async def getModels(self) -> dict: 60 | resp = await self._request(url=f"{self.url}/models") 61 | return resp 62 | 63 | async def __aenter__(self): 64 | return self 65 | 66 | async def close(self) -> None: 67 | """Close async session""" 68 | return await self.session.aclose() 69 | 70 | async def ChatCompletion( 71 | self: "AsyncClient", 72 | messages: Union[List[Messages], str], 73 | model: dict = languageModels.gemini, 74 | **kwargs, 75 | ) -> dict: 76 | """ 77 | Get an answer from LLMs' for the given prompt 78 | Example: 79 | >>> client = AsyncClient() 80 | >>> response = await client.ChatCompletion("Hello, Who are you?") 81 | >>> print(response) 82 | 83 | Args: 84 | prompt (str): Input text for the query. 85 | model (dict): Model dict of the LLM defaults to palm. 86 | 87 | Returns: 88 | dict: Answer from the API in the following format: 89 | { 90 | "status": str, 91 | "content": str, 92 | "code": int 93 | } 94 | """ 95 | model_id = model.get("modelId", 1) 96 | 97 | payload = {"model_id": model_id, **kwargs} 98 | 99 | if isinstance(messages, list) and all( 100 | isinstance(m, Messages) for m in messages 101 | ): 102 | if model_id in [20, 24]: 103 | payload["prompt"] = "\n".join( 104 | [m.content for m in messages if m.role == "user"] 105 | ) 106 | else: 107 | payload["messages"] = [ 108 | {"content": m.content, "role": m.role} for m in messages 109 | ] 110 | 111 | elif isinstance(messages, str): 112 | if model_id in [20, 24]: 113 | payload["prompt"] = messages 114 | else: 115 | payload["messages"] = [ 116 | {"content": "You are a helpful assistant", "role": "assistant"}, 117 | {"content": messages, "role": "user"}, 118 | ] 119 | else: 120 | raise ValueError( 121 | "Invalid input: messages must be a list of Messages or a string." 122 | ) 123 | resp = await self._request( 124 | url=f"{self.url}/models", 125 | method="POST", 126 | json=payload, 127 | headers={"content-type": "application/json"}, 128 | ) 129 | return resp 130 | 131 | async def upscale( 132 | self: "AsyncClient", 133 | image: bytes = None, 134 | image_url: str = None, 135 | format: str = "binary", 136 | ) -> bytes: 137 | """ 138 | Upscale an image 139 | Example: 140 | >>> client = AsyncClient() 141 | >>> response = await client.upscale(image) 142 | >>> with open('upscaled.png', 'wb') as f: 143 | f.write(response) 144 | 145 | Args: 146 | image (bytes): Image in bytes. 147 | Returns: 148 | bytes: Upscaled image in bytes. 149 | """ 150 | payload = { 151 | "format": format, 152 | } 153 | if image and not image_url: 154 | payload.setdefault("image_data", base64.b64encode(image).decode("utf-8")) 155 | elif not image and not image_url: 156 | raise Exception("No image or image_url provided") 157 | else: 158 | payload.setdefault("image_url", image_url) 159 | content = await self._request( 160 | url=f"{self.url}/upscale", method="POST", json=payload 161 | ) 162 | return content 163 | 164 | async def generate( 165 | self: "AsyncClient", 166 | model_id: int, 167 | prompt: str, 168 | negative_prompt: str = "", 169 | images: int = 1, 170 | ) -> dict: 171 | """ 172 | Generate image from a prompt 173 | Example: 174 | >>> client = AsyncClient() 175 | >>> response = await client.generate(model_id,prompt,negative_prompt) 176 | >>> print(response) 177 | 178 | Args: 179 | prompt (str): Input text for the query. 180 | negative_prompt (str): Input text for the query. 181 | 182 | Returns: 183 | dict: Answer from the API in the following format: 184 | { 185 | "message": str, 186 | "task_id": int, 187 | "request_id": str, 188 | "code": int 189 | } 190 | """ 191 | payload = { 192 | "model_id": model_id, 193 | "prompt": prompt, 194 | "negative_prompt": negative_prompt, # optional 195 | "num_images": images, # optional number of images to generate (default: 1) and max 4 196 | } 197 | resp = await self._request( 198 | url=f"{self.url}/models/inference", 199 | method="POST", 200 | json=payload, 201 | headers={"content-type": "application/json"}, 202 | ) 203 | return resp 204 | 205 | async def getImages(self: "AsyncClient", task_id: str, request_id: str) -> dict: 206 | """ 207 | Generate image from a prompt 208 | Example: 209 | >>> client = AsyncClient() 210 | >>> response = client.getImages(task_id,request_id) 211 | >>> print(response) 212 | 213 | Args: 214 | prompt (str): Input text for the query. 215 | negative_prompt (str): Input text for the query. 216 | 217 | Returns: 218 | dict: Answer from the API in the following format: 219 | { 220 | "message": str, 221 | "img_urls": array, 222 | "code": int 223 | } 224 | """ 225 | payload = {"task_id": task_id, "request_id": request_id} 226 | resp = await self._request( 227 | url=f"{self.url}/models/inference/task", 228 | method="POST", 229 | json=payload, 230 | headers={"content-type": "application/json"}, 231 | ) 232 | return resp 233 | 234 | async def ImageReverse( 235 | self: "AsyncClient", imageUrl: str, engine: str = "google" 236 | ) -> dict: 237 | """ 238 | Reverse search an image 239 | Example: 240 | >>> client = AsyncClient() 241 | >>> response = await client.ImageReverse(imageUrl) 242 | >>> print(response) 243 | 244 | Args: 245 | imageUrl (str): url of the image for reverse search. 246 | 247 | Returns: 248 | dict: Answer from the API in the following format: 249 | { 250 | "message": str, 251 | "content": { 252 | "bestResults": array, 253 | "relatedContent": array, #optional 254 | "others": array #optional 255 | }, 256 | "code": int 257 | } 258 | """ 259 | resp = await self._request( 260 | url=f"{self.url}/image-reverse/{engine}", 261 | method="POST", 262 | params={"img_url": imageUrl}, 263 | ) 264 | return resp 265 | 266 | async def MediaDownloaders(self: "AsyncClient", platform: str, url: str) -> dict: 267 | """ 268 | Downloadable links for the given social media url 269 | Example: 270 | >>> client = AsyncClient() 271 | >>> response = client.MediaDownloaders(platform,url) 272 | >>> print(response) 273 | 274 | Args: 275 | platform (str): social media platform name. 276 | url (str): url of the post. 277 | 278 | Returns: 279 | dict: Answer from the API in the following format: 280 | { 281 | "message": str, 282 | "content": { 283 | "url": array, 284 | "mediaUrls": array, 285 | "by": str, 286 | "title": str, 287 | }, 288 | "code": int 289 | } 290 | """ 291 | resp = await self._request( 292 | url=f"{self.url}/downloaders/{platform}", method="POST", params={"url": url} 293 | ) 294 | return resp 295 | 296 | async def SearchImages( 297 | self: "AsyncClient", query: str, page: int = 0, engine: str = "google" 298 | ) -> dict: 299 | """ 300 | Search for images 301 | Example: 302 | >>> client = AsyncClient() 303 | >>> response = await client.SearchImages(query) 304 | >>> print(response) 305 | 306 | Args: 307 | query (str): query to perform the search. 308 | 309 | Returns: 310 | dict: Answer from the API in the following format: 311 | { 312 | "message": str, 313 | "content": [], 314 | "code": int 315 | } 316 | """ 317 | resp = await self._request( 318 | url=f"{self.url}/image-search/{engine}", 319 | method="POST", 320 | params={"query": query, "page": page}, 321 | ) 322 | return resp 323 | 324 | async def AntiNsfw(self: "AsyncClient", imageUrl: str, modelId: int = 28) -> dict: 325 | """ 326 | Check for an image if it is safe for work or not 327 | Example: 328 | >>> client = AsyncClient() 329 | >>> response = await client.AntiNsfw(imageUrl) 330 | >>> print(response) 331 | 332 | Args: 333 | imageUrl (str): url of the image for anti nsfw. 334 | 335 | Returns: 336 | dict: Answer from the API in the following format: 337 | { 338 | "message": str, 339 | "content": { 340 | "sfw": bool #true if sfw (safe for work) else false 341 | }, 342 | "code": int 343 | } 344 | """ 345 | resp = await self._request( 346 | url=f"{self.url}/anti-nsfw", 347 | method="POST", 348 | params={"img_url": imageUrl, "model_id": modelId}, 349 | ) 350 | return resp 351 | --------------------------------------------------------------------------------