├── .github └── workflows │ └── python-publish.yml ├── .gitignore ├── LICENSE ├── README.md ├── aify ├── __init__.py ├── __main__.py ├── _auth.py ├── _entry.py ├── _env.py ├── _error.py ├── _logging.py ├── _program.py ├── _web_template.py ├── embeddings.py ├── embeddings_openai.py ├── embeddings_sentence_transformers.py ├── memories │ ├── __init__.py │ └── google_cloud_datastore.py └── memory.py ├── bin └── aify ├── build.sh ├── docs ├── app_template.md ├── assets │ └── images │ │ └── screenshots │ │ ├── aify_webui_new_start_1_screenshot.png │ │ ├── aify_webui_new_start_screenshot.png │ │ └── aify_webui_screenshot.png ├── customized │ └── main.html ├── deploy_to_clouds │ └── google_appengine.md ├── dive_into_apps.md ├── enhance_with_python.md ├── examples │ ├── chatbot.md │ └── llm_generation.md ├── getting_started.md ├── index.md └── rest_api.md ├── examples ├── .env ├── .gitignore ├── chatbot.yml ├── comments.yml ├── deploy-to-google-appengine │ ├── app.yaml │ └── index.yaml ├── emoji.yml ├── helpers.py ├── indie_hacker.yml ├── llama.yml └── translator.yml ├── js ├── .gitignore ├── build.sh ├── config-overrides.js ├── package-lock.json ├── package.json ├── public │ └── index.html └── src │ ├── aify.js │ ├── chat.js │ ├── index.css │ └── index.js ├── mkdocs.yml ├── requirements.txt ├── requirements_dev.txt ├── setup.py └── webui ├── .gitignore ├── __init__.py └── templates ├── auth.html ├── basic.html └── index.html /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | release: 13 | types: [published] 14 | 15 | permissions: 16 | contents: read 17 | 18 | jobs: 19 | deploy: 20 | 21 | runs-on: ubuntu-latest 22 | 23 | permissions: 24 | # IMPORTANT: this permission is mandatory for trusted publishing 25 | id-token: write 26 | 27 | steps: 28 | - uses: actions/checkout@v3 29 | - name: Set up Python 30 | uses: actions/setup-python@v3 31 | with: 32 | python-version: '3.x' 33 | - name: Install dependencies 34 | run: | 35 | python -m pip install --upgrade pip 36 | pip install build 37 | - name: Build package 38 | run: python -m build 39 | - name: Publish package 40 | uses: pypa/gh-action-pypi-publish@release/v1 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .vscode 3 | .env 4 | __pycache__ 5 | *.egg-info 6 | build 7 | dist 8 | site 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Wang Shenggong 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🚀 aify 2 | 3 | ### Build your AI-native application in seconds. 4 | 5 | [Home](https://aify.run/) | [Documentation](https://docs.aify.run/) | [Feedback](https://github.com/shellc/aify/issues) 6 | 7 | 🛠️ AI-native application framework and runtime. Simply write a YAML file. 8 | 9 | 🤖 Ready-to-use AI chatbot UI. 10 | 11 | **Dependencies** 12 | 13 | * [microsoft/guidance](https://github.com/microsoft/guidance) as the core prompt engine 14 | * [Uvicorn](https://www.uvicorn.org/), [Starlette](https://www.starlette.io/), [FastAPI](https://fastapi.tiangolo.com/) as the server 15 | 16 | **Features** 17 | 18 | * Models: The LLMs/transformers models supported by guidance. 19 | * Memory storage: Local file / Google Cloud Datastore / User-defined 20 | * Embeddings: OpenAI / User-defined 21 | * Vector storage and search: Local CSV files, Pandas DataFrame and Numpy in memory / User-defined 22 | * Deployment: Local / [Google Cloud App engine](https://docs.aify.run/deploy_to_clouds/google_appengine/) 23 | * UI: Chatbot webui 24 | * API: RESTful API / Python 25 | 26 | ## Getting started 27 | 28 | Welcome to Aify, the AI-native application framework and runtime that allows you to ship your AI applications in seconds! With Aify, you can easily build and deploy AI-powered applications using a simple YAML file. In this guide, we will walk you through the steps to get started with Aify and create your first AI application. 29 | 30 | 31 | ### Installation 32 | 33 | To begin, make sure you have the following prerequisites installed on your system: 34 | 35 | * Python 3.8 or higher 36 | * Pip package manager 37 | 38 | Once you have the prerequisites, you can install Aify by running the following command in your terminal: 39 | 40 | ```bash 41 | pip install aify 42 | ``` 43 | 44 | ### Create your first app 45 | 46 | You need to prepare a directory for your applications: 47 | 48 | ```bash 49 | mkdir ./apps 50 | ``` 51 | 52 | Now you can start the aify service and then access [http://localhost:2000](http://localhost:2000) using a browser, and aify will greet you. 53 | 54 | ```bash 55 | aify run ./apps 56 | ``` 57 | 58 | ![aify screenshot](https://docs.aify.run/assets/images/screenshots/aify_webui_new_start_screenshot.png) 59 | 60 | Now it's just a blank application, you can't use it for anything. Next, we will create a chatbot. 61 | 62 | Creating a YAML file aify uses a YAML file to define your AI application. This file contains all the necessary configurations and settings for your application. Here's an example of a basic YAML file: 63 | 64 | ```yaml 65 | title: Chatbot 66 | 67 | model: 68 | vendor: openai 69 | name: gpt-3.5-turbo 70 | params: 71 | api_key: 72 | 73 | prompt: | 74 | {{#system~}} 75 | You are a helpful and terse assistant. 76 | {{~/system}} 77 | 78 | {{#each (memory.read program_name session_id n=3)}} 79 | {{~#if this.role == 'user'}} 80 | {{#user~}} 81 | {{this.content}} 82 | {{~/user}} 83 | {{/if~}} 84 | {{~#if this.role == 'assistant'}} 85 | {{#assistant~}} 86 | {{this.content}} 87 | {{~/assistant}} 88 | {{/if~}} 89 | {{~/each}} 90 | 91 | {{#user~}} 92 | {{prompt}} 93 | {{memory.save program_name session_id 'user' prompt}} 94 | {{~/user}} 95 | 96 | {{#assistant~}} 97 | {{gen 'answer' temperature=0 max_tokens=2000}} 98 | {{memory.save program_name session_id 'assistant' answer}} 99 | {{~/assistant}} 100 | 101 | variables: 102 | - name: prompt 103 | type: input 104 | - name: answer 105 | type: output 106 | ``` 107 | 108 | Here are some simple explanations about this YAML file: 109 | 110 | * The ***title*** represents the name of this application. 111 | * The ***model*** section defines the AI model used by this application and the runtime parameters required by the model. 112 | * The ***prompt*** section is used to drive the application's execution. Aify uses the guidance software package provided by Microsoft to drive the execution of the AI program. Guidance provides a way to operate as a Chain of Thought. Since guidance uses the Handlebars template system, the format of this section is actually a Handlebars template.The prompt section contains some helper functions that allow the AI model to dynamically change its runtime behavior, helping us achieve more complex functionality. These functions are built-in to aify, but you can also write your own helper functions in Python to accomplish specific tasks. 113 | * The terms "system," "user," and "assistant" are used to define the roles in an LLM-based chat task. 114 | * "memory.read" and "memory.write" are built-in helper functions in Aify, used to save and load the conversation history of users and AI. 115 | * "each" and "if" are branch control statements provided by Handlebars. 116 | * "gen" is the function provided by "guidance" to indicate the execution of LLM generation tasks. 117 | * The ***variables*** section defines the input and output variables of the application, which are used for external systems to access the data generated by AI through an API. 118 | 119 | ### Play with your AI app 120 | 121 | Now go back to your browser and refresh the page. You will see the application you just created. You can have some conversations with it, just like ChatGPT. 122 | 123 | ![aify screenshot](https://docs.aify.run/assets/images/screenshots/aify_webui_new_start_1_screenshot.png) 124 | 125 | ### aify is not a chatbot 126 | 127 | Although aify provides a chatbot interface, its main purpose is not to provide a replacement for ChatGPT or a competitive conversation application. 128 | 129 | The chatbot UI is only for convenient debugging of AI applications. Of course, you can indeed use it as a chatbot for daily use. 130 | 131 | The main goal of aify is to provide an efficient framework for developing and deploying AI applications. 132 | 133 | If your goal is to develop your own complex AI applications, you should pay more attention to the APIs and extension mechanisms provided by aify. 134 | 135 | 📝 More examples: https://github.com/shellc/aify/tree/main/examples 136 | 137 | ### Webui screenshot 138 | 139 | ![Webui screenshot](https://docs.aify.run/assets/images/screenshots/aify_webui_screenshot.png) -------------------------------------------------------------------------------- /aify/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.1.23' 2 | 3 | from ._logging import logger 4 | from ._entry import entry, api 5 | from ._web_template import apps_render as render 6 | from ._program import programs 7 | from ._auth import TokenManager 8 | -------------------------------------------------------------------------------- /aify/__main__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import os 3 | import sys 4 | import argparse 5 | import uvicorn 6 | from uvicorn.config import LOGGING_CONFIG 7 | 8 | import aify 9 | import aify.embeddings 10 | 11 | logger = {"handlers": ["default"], "level": "INFO", "propagate": False} 12 | LOGGING_CONFIG['loggers'][''] = logger 13 | LOGGING_CONFIG['loggers']['aify'] = logger 14 | 15 | AIFY_LIB_DIR = os.path.abspath(os.path.join( 16 | os.path.dirname(__file__), os.pardir)) 17 | sys.path.insert(0, AIFY_LIB_DIR) 18 | 19 | 20 | def runserver(args): 21 | """ 22 | Initialize and start the uvicorn service process. 23 | """ 24 | apps_dir = args.apps_dir 25 | if apps_dir: 26 | apps_dir = os.path.abspath(apps_dir) 27 | os.environ['AIFY_APPS_DIR'] = apps_dir 28 | 29 | if args.reload: 30 | if not args.reload_dirs: 31 | args.reload_dirs = [] 32 | args.reload_dirs.append(AIFY_LIB_DIR) 33 | args.reload_dirs.append(apps_dir) 34 | 35 | uvicorn.run('aify:entry', 36 | host=args.host, 37 | port=args.port, 38 | workers=args.workers, 39 | reload=args.reload, 40 | h11_max_incomplete_event_size=0, 41 | log_config=LOGGING_CONFIG, 42 | reload_dirs=args.reload_dirs, 43 | reload_includes="**/*.[py][yam]*" 44 | ) 45 | 46 | 47 | parser = argparse.ArgumentParser() 48 | parser.add_argument('--env-file', default='.env', 49 | help="environment configuration file") 50 | 51 | subparser = parser.add_subparsers( 52 | title="commands", help="type command --help to print help message") 53 | 54 | # run command 55 | parser_run = subparser.add_parser('run', help="run aify server") 56 | parser_run.add_argument('-H', '--host', default='0.0.0.0', 57 | help="bind socket to this host. default: 0.0.0.0") 58 | parser_run.add_argument('-p', '--port', default=2000, 59 | type=int, help="bind socket to this port, default: 2000") 60 | parser_run.add_argument('-w', '--workers', default=1, type=int, 61 | help="number of worker processes, default: 1") 62 | parser_run.add_argument('-r', '--reload', default=False, 63 | action='store_true', help="enable auto-reload") 64 | parser_run.add_argument('--reload-dirs', default=None, 65 | help="set reload directories explicitly, default is applications directory") 66 | 67 | parser_run.add_argument("apps_dir", nargs='?', 68 | default=None, help="applications directory") 69 | parser_run.set_defaults(func=runserver) 70 | 71 | # embed command 72 | parser_embed = subparser.add_parser( 73 | 'embed', help="build embeddings from a CSV dataset") 74 | parser_embed.add_argument("from_csv_file", help="read data from this CSV file") 75 | parser_embed.add_argument( 76 | "to_csv_file", help="write embeddings to this CSV file") 77 | parser_embed.add_argument('--vendor', default=None, 78 | help="specify the model vendor name, default is openai, options: openai, sentence-transforers",) 79 | parser_embed.add_argument('--model-name', default=None, 80 | help="speicify the model name") 81 | parser_embed.set_defaults(func=lambda args: aify.embeddings.build_csv( 82 | args.from_csv_file, args.to_csv_file, vendor=args.vendor, model_name=args.model_name)) 83 | 84 | 85 | def main(): 86 | """ 87 | Main function to start Aify. 88 | """ 89 | args = parser.parse_args(sys.argv[1:]) 90 | 91 | if os.path.exists(args.env_file): 92 | from dotenv import load_dotenv 93 | load_dotenv(args.env_file) 94 | 95 | if hasattr(args, 'func'): 96 | args.func(args) 97 | else: 98 | parser.print_help() 99 | 100 | 101 | # Fire 102 | main() 103 | -------------------------------------------------------------------------------- /aify/_auth.py: -------------------------------------------------------------------------------- 1 | import os 2 | import importlib 3 | from typing import Optional, List 4 | 5 | from starlette.authentication import ( 6 | AuthCredentials, AuthenticationBackend, BaseUser 7 | ) 8 | 9 | from . import _env 10 | 11 | class TokenManager: 12 | def get_token(self): 13 | raise NotImplementedError() 14 | 15 | class LocalTokenManager(TokenManager): 16 | def __init__(self) -> None: 17 | super().__init__() 18 | self._auth_required = True 19 | self._tokens = {} 20 | 21 | self._load_tokens() 22 | 23 | def get_token(self, token: str): 24 | return self._tokens.get(token) 25 | 26 | def _load_tokens(self): 27 | 28 | tokens_file = os.path.join(_env.apps_dir(), '.tokens') 29 | if not os.path.exists(tokens_file): 30 | self._auth_required = False 31 | return 32 | 33 | with open(tokens_file) as f: 34 | for line in f.readlines(): 35 | segs = line.split('|') 36 | token = segs[0].strip() 37 | if len(token) > 0: 38 | self._tokens[token] = { 39 | 'username': segs[1] if len(segs) > 1 else None, 40 | 'permissions': [x.strip() for x in segs[2].split(',')] if len(segs) > 2 else [] 41 | } 42 | 43 | 44 | class AuthenticatedUser(BaseUser): 45 | def __init__(self, username: Optional[str] = "anonymous", permissions: Optional[List[str]] = []) -> None: 46 | self._username = username 47 | self._permissions = permissions 48 | 49 | @property 50 | def identity(self) -> str: 51 | return self._username 52 | 53 | @property 54 | def username(self) -> str: 55 | return self._username 56 | 57 | @property 58 | def is_authenticated(self) -> bool: 59 | return True 60 | 61 | @property 62 | def permissions(self) -> Optional[List[str]]: 63 | return self._permissions 64 | 65 | 66 | class BasicAuthBackend(AuthenticationBackend): 67 | def __init__(self) -> None: 68 | super().__init__() 69 | 70 | self._inited = False 71 | self._auth_required = True 72 | self._token_manager = None 73 | 74 | def _init(self): 75 | if 'TOKEN_MANAGER' in os.environ: 76 | mgr_ref = os.environ['TOKEN_MANAGER'] 77 | idx = mgr_ref.rfind(".") 78 | pkg_name = mgr_ref[:idx] 79 | class_name = mgr_ref[idx+1:] 80 | 81 | module = importlib.import_module(pkg_name) 82 | klass = getattr(module, class_name) 83 | self._token_manager = klass() 84 | else: 85 | self._token_manager = LocalTokenManager() 86 | self._auth_required = self._token_manager._auth_required 87 | 88 | async def authenticate(self, request): 89 | if not self._inited: 90 | self._init() 91 | 92 | if not self._auth_required: 93 | return AuthCredentials(scopes=['authenticated', 'write']), AuthenticatedUser() 94 | 95 | token = None 96 | if 'token' in request.query_params: 97 | token = request.query_params.get('token') 98 | 99 | if not token and "Authorization" in request.headers: 100 | auth = request.headers["Authorization"] 101 | 102 | scheme, token_str = auth.split() 103 | if scheme.lower() == 'bearer': 104 | token = token_str 105 | 106 | if not token: 107 | token = request.cookies.get('token') 108 | 109 | if token: 110 | token_stored = self._token_manager.get_token(token) 111 | if token_stored: 112 | username = token_stored['username'] 113 | permissions = token_stored['permissions'] 114 | user = AuthenticatedUser( 115 | username=username, permissions=permissions) 116 | 117 | scopes = ["authenticated"] 118 | scopes.extend(permissions) 119 | credentials = AuthCredentials(scopes=scopes) 120 | 121 | return credentials, user 122 | -------------------------------------------------------------------------------- /aify/_entry.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import json 4 | import importlib 5 | import contextlib 6 | import hashlib 7 | import datetime 8 | from starlette.applications import Starlette 9 | from starlette.routing import Mount, Route 10 | from starlette.staticfiles import StaticFiles 11 | from starlette.responses import JSONResponse, StreamingResponse, RedirectResponse 12 | from starlette.middleware import Middleware 13 | from starlette.middleware.authentication import AuthenticationMiddleware 14 | from starlette.authentication import requires 15 | from starlette.exceptions import HTTPException 16 | from fastapi import FastAPI, Request 17 | from . import _env 18 | from . import _program 19 | from . import _auth 20 | from ._logging import logger 21 | from ._web_template import render 22 | 23 | # FastAPI 24 | api = FastAPI() 25 | 26 | def get_program(name: str) -> _program.Program: 27 | """A program is an application defined by the user. 28 | 29 | Parameters 30 | ---------- 31 | name : string 32 | The name of the program, typically the filename of the user-defined application template. 33 | """ 34 | program = None 35 | try: 36 | program = _program.get(name) 37 | if not program: 38 | raise ValueError() 39 | except Exception as e: 40 | logger.error(e, exc_info=e) 41 | raise HTTPException( 42 | status_code=404, detail=f"Not a valid app: {e}") 43 | return program 44 | 45 | def _validate_sessions_id(sessions_id:str, request: Request): 46 | if sessions_id.startswith('p-'): 47 | auth_header = request.headers.get("Authorization") 48 | if not auth_header: 49 | raise HTTPException(status_code=403, detail="Forbidden") 50 | 51 | token = "".join(auth_header.split(' ')[-1:]) 52 | token_hash = hashlib.sha256(token.encode('utf-8')).hexdigest() 53 | 54 | ss = "".join(sessions_id.split('-')[1:2]) 55 | 56 | if token_hash != ss: 57 | raise HTTPException(status_code=403, detail="Forbidden") 58 | 59 | @api.put('/apps/{name}/{session_id}') 60 | @requires(['authenticated', 'write']) 61 | async def execute_program(request: Request, name: str, session_id: str): 62 | """Execute the program identified by the name.""" 63 | program = get_program(name) 64 | 65 | kwargs = {} 66 | try: 67 | kwargs = await request.json() 68 | except Exception as e: 69 | raise HTTPException(status_code=400, detail="Bad request body.") 70 | 71 | _validate_sessions_id(sessions_id=session_id, request=request) 72 | 73 | # update user info 74 | kwargs['user_info'] = json.dumps({ 75 | 'language': request.headers.get('Accept-Language'), 76 | 'now': request.headers.get('Date') if 'Date' in request.headers else datetime.datetime.now().isoformat() 77 | }) 78 | 79 | kwargs['program_name'] = name 80 | kwargs['session_id'] = session_id 81 | 82 | filter_variable = request.query_params.get('variable') 83 | if filter_variable and not (filter_variable in program.input_variable_names or filter_variable in program.output_variable_names): 84 | raise HTTPException(status_code=400, detail="invalid variable.") 85 | 86 | # Check if the Server-Sent Event enabled. 87 | sse = 'sse' in request.query_params 88 | 89 | # https://github.com/microsoft/guidance/discussions/129 90 | async def _aiter(): 91 | pos = dict([(vname, 0) for vname in program.output_variable_names]) 92 | catched = False 93 | 94 | kwargs['stream'] = True 95 | kwargs['async_mode'] = True 96 | kwargs['silent'] = True 97 | 98 | async for t in program.run(**kwargs): 99 | if t._exception: 100 | if catched: 101 | return 102 | catched = True 103 | 104 | e = { 105 | "error": str(t._exception) 106 | } 107 | if sse: 108 | yield "event: error\ndata: %s\n\n" % json.dumps(e) 109 | else: 110 | yield json.dumps(e) 111 | else: 112 | for vname in program.output_variable_names: 113 | 114 | if filter_variable and vname != filter_variable: 115 | continue 116 | 117 | generated = t.get(vname) 118 | if generated: 119 | diff = generated[pos[vname]:] 120 | pos[vname] = len(generated) 121 | if len(diff) > 0: 122 | if sse: 123 | e = { 124 | "variable": vname, 125 | "diff": diff 126 | } 127 | 128 | yield "event: message\ndata: %s\n\n" % json.dumps(e) 129 | else: 130 | yield diff 131 | 132 | content_type = 'text/event-stream' if sse else 'text/plain' 133 | 134 | try: 135 | it = _aiter() 136 | except Exception as e: 137 | raise HTTPException(status_code=400, detail=f"Something wrong: {e}") 138 | 139 | return StreamingResponse(it, headers={'Content-Type': content_type, 'X-Accel-Buffering': 'no'}) 140 | 141 | @api.get('/apps/{name}/{session_id}/memories') 142 | @requires(['authenticated']) 143 | async def get_memories(request: Request, name: str, session_id: str, limit: int=1000): 144 | """Get the memory content of the specified application's current session.""" 145 | _validate_sessions_id(sessions_id=session_id, request=request) 146 | 147 | memories = [] 148 | program = get_program(name) 149 | memory = program.modules.get('memory') 150 | if memory: 151 | m = memory.read(name, session_id, max_len=2040*1024, n=limit) 152 | if m: 153 | memories = m 154 | return JSONResponse(memories) 155 | 156 | @api.get('/apps') 157 | @requires(['authenticated']) 158 | async def list_apps(request: Request): 159 | """List applications""" 160 | progs = [] 161 | 162 | for name, prog in _program.programs().items(): 163 | progs.append({ 164 | 'name': name, 165 | 'title': prog.template.get('title'), 166 | 'description': prog.template.get('description'), 167 | 'icon_emoji': prog.template.get('icon_emoji'), 168 | 'is_public': prog.template.get('is_public'), 169 | 'variables': prog.template.get('variables') 170 | }) 171 | 172 | return JSONResponse(progs) 173 | 174 | @api.get('/sessions') 175 | @requires(['authenticated']) 176 | async def list_sessions(request: Request): 177 | """List sessions""" 178 | 179 | sessions = [] 180 | 181 | progs = [] 182 | 183 | for name, prog in _program.programs().items(): 184 | memory = prog.modules.get('memory') 185 | sessions.extend(memory.sessions(name)) 186 | 187 | sessions = sorted(sessions, key=lambda x: x.get('last_modified'), reverse=True) 188 | 189 | return JSONResponse(sessions) 190 | 191 | async def auth(request: Request): 192 | response = await render('auth.html')(request=request) 193 | if request.method == 'POST': 194 | next = request.query_params.get('next') 195 | if next: 196 | response = RedirectResponse(next, status_code=302) 197 | 198 | async with request.form() as form: 199 | token = form.get('token') 200 | response.set_cookie('token', token, max_age=7*24*3600) 201 | return response 202 | 203 | @api.get('/user') 204 | @requires(['authenticated']) 205 | async def get_user(request: Request): 206 | user = { 207 | 'username': request.user.username, 208 | "permissions": request.user.permissions 209 | } 210 | return JSONResponse(user) 211 | 212 | # Routes 213 | routes = [ 214 | Mount( 215 | '/api', 216 | name='api', 217 | app=api 218 | ), 219 | Mount( 220 | '/static', 221 | name='static', 222 | app=StaticFiles(directory=os.path.join(_env.webui_dir(), 'static'), check_dir=False), 223 | ), 224 | Route( 225 | '/', 226 | name='home', 227 | endpoint=requires(scopes=['authenticated'], redirect='auth')(render('index.html')) 228 | ), 229 | Route( 230 | '/auth', 231 | name='auth', 232 | methods=['POST', 'GET'], 233 | endpoint=auth 234 | ) 235 | ] 236 | 237 | apps_static_dir = os.path.join(_env.apps_dir(), 'static') 238 | if os.path.exists(apps_static_dir): 239 | routes.append(Mount( 240 | '/apps/static', 241 | name='apps_static', 242 | app=StaticFiles(directory=apps_static_dir, check_dir=False), 243 | )) 244 | 245 | # Middlewares 246 | middleware = [ 247 | Middleware(AuthenticationMiddleware, backend=_auth.BasicAuthBackend()) 248 | ] 249 | 250 | def import_entry(): 251 | sys.path.append(_env.apps_dir()) 252 | entry_py = os.path.join(_env.apps_dir(), 'entry.py') 253 | if os.path.exists(entry_py): 254 | importlib.import_module('entry') 255 | 256 | @contextlib.asynccontextmanager 257 | async def lifespan(app): 258 | import_entry() 259 | yield 260 | 261 | entry = Starlette(debug=True, routes=routes, middleware=middleware, lifespan=lifespan) 262 | 263 | env_file = os.path.join(_env.apps_dir(), '.env') 264 | if os.path.exists(env_file): 265 | from dotenv import load_dotenv 266 | load_dotenv(env_file) -------------------------------------------------------------------------------- /aify/_env.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | _here = os.path.abspath(os.path.join(os.path.dirname(__file__))) 4 | 5 | 6 | def apps_dir(): 7 | """Returns the directory where user applications are stored.""" 8 | apps_dir = os.environ['AIFY_APPS_DIR'] if 'AIFY_APPS_DIR' in os.environ else None 9 | return apps_dir if apps_dir else '.' 10 | 11 | 12 | def webui_dir(): 13 | "Returns the directory where webuid resources ared stored." 14 | return os.path.join(_here, '../webui') 15 | -------------------------------------------------------------------------------- /aify/_error.py: -------------------------------------------------------------------------------- 1 | class AifyError(Exception): 2 | pass 3 | -------------------------------------------------------------------------------- /aify/_logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | logger = logging.getLogger('aify') 4 | -------------------------------------------------------------------------------- /aify/_program.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import glob 4 | import yaml 5 | import importlib 6 | from typing import Dict 7 | from string import Template 8 | try: 9 | from yaml import CLoader as Loader, CDumper as Dumper 10 | except: 11 | from yaml import Loader, Dumper 12 | 13 | import guidance 14 | 15 | from . import _env 16 | from ._error import AifyError 17 | from ._logging import logger 18 | 19 | 20 | class CompileError(AifyError): 21 | pass 22 | 23 | 24 | class Program: 25 | """ 26 | The Program class represents an executable program driven by a LLM. 27 | """ 28 | 29 | def __init__(self, template: str) -> None: 30 | self._template = None 31 | self._runner = None 32 | self._modules = {} 33 | self._input_variables = [] 34 | self._output_variables = [] 35 | 36 | try: 37 | self._compile(template=template) 38 | except Exception as e: 39 | raise CompileError(e) 40 | 41 | def _compile(self, template: str): 42 | try: 43 | # replace env variables 44 | t = Template(template=template) 45 | t = t.substitute(os.environ) 46 | self._template = yaml.load(t, Loader=Loader) 47 | except Exception as e: 48 | raise ValueError(f"parse template faild, {e}") 49 | 50 | model = None 51 | model_settings = self._template.get('model') 52 | if not model_settings: 53 | raise ValueError("missing model section in the template.") 54 | 55 | model_name = model_settings.get('name') 56 | if not model_name: 57 | raise ValueError("missing model name.") 58 | 59 | model_params = model_settings.get('params', {}) 60 | 61 | if 'type' not in model_settings or model_settings['type'] == 'llm': 62 | if model_settings['vendor'].lower() == 'openai': 63 | model = guidance.llms.OpenAI(model_name, **model_params) 64 | else: 65 | raise ValueError( 66 | f"the model vendor `{model_settings['vendor']}` is not support yet.") 67 | elif model_settings['type'] == 'transformers': 68 | model = guidance.llms.Transformers(model=model_name, **model_params) 69 | else: 70 | raise ValueError( 71 | f"the model type `{model_settings['type']}` is not support yet.") 72 | 73 | prompt = self._template.get('prompt') 74 | if not prompt or not isinstance(prompt, str): 75 | raise ValueError("missing prompt text.") 76 | 77 | self._runner = guidance(prompt, llm=model, silent=True) 78 | 79 | self._import_modules() 80 | 81 | variables = self._template.get('variables', []) 82 | for var in variables: 83 | name = var.get('name') 84 | if not name or not isinstance(name, str): 85 | raise ValueError(f'invalid variable') 86 | typ = var.get('type') 87 | if not typ or typ == 'output': 88 | self._output_variables.append(var) 89 | else: 90 | self._input_variables.append(var) 91 | 92 | def _import_modules(self): 93 | module_names = self.template.get("modules", {}) 94 | for name, module_name in module_names.items(): 95 | try: 96 | module = importlib.import_module(module_name) 97 | self._modules[name] = module 98 | except Exception as e: 99 | raise ValueError(f"import moudle `{module_name}` failed, {e}") 100 | 101 | if 'memory' not in self._modules: 102 | import aify.memory 103 | self._modules['memory'] = aify.memory 104 | 105 | if 'embeddings' not in self._modules: 106 | import aify.embeddings 107 | self._modules['embeddings'] = aify.embeddings 108 | 109 | def run(self, **kwargs): 110 | """Run this program.""" 111 | kwargs.update(self._modules) 112 | r = self._runner(**kwargs) 113 | r.update_display.throttle_limit = 0 114 | return r 115 | 116 | @property 117 | def modules(self): 118 | return self._modules 119 | 120 | @property 121 | def template(self): 122 | return self._template 123 | 124 | @property 125 | def input_variable_names(self): 126 | return [x['name'] for x in self._input_variables] 127 | 128 | @property 129 | def output_variable_names(self): 130 | return [x['name'] for x in self._output_variables] 131 | 132 | 133 | def _load_template(url: str): 134 | template = None 135 | if url.startswith("http://") or url.startswith("https://"): 136 | pass 137 | else: 138 | with open(url) as f: 139 | template = f.read() 140 | 141 | return template 142 | 143 | 144 | _programs = {} 145 | 146 | 147 | def _reload(apps_dir: str = None, skip_error=False): 148 | """Load programs from the user's application directory.""" 149 | global _programs 150 | _programs = {} 151 | 152 | if not apps_dir: 153 | apps_dir = _env.apps_dir() 154 | 155 | sys.path.append(apps_dir) 156 | 157 | exts = ['*.yml', '*.yaml'] 158 | files = [] 159 | for e in exts: 160 | files.extend(glob.glob(os.path.join(apps_dir, e))) 161 | for f in files: 162 | template = _load_template(f) 163 | try: 164 | program_name = os.path.basename(f).split('.')[0] 165 | program = Program(template=template) 166 | _programs[program_name] = program 167 | except Exception as e: 168 | if not skip_error: 169 | raise e 170 | else: 171 | logger.warn(f"Compile program ({program_name}) error: {e}") 172 | 173 | def programs() -> Dict[str, Program]: 174 | if len(_programs) == 0: 175 | _reload(skip_error=True) 176 | return _programs 177 | 178 | def get(name: str): 179 | """Retrieve a specific program from the user's application directory.""" 180 | global _programs 181 | if name not in _programs: 182 | apps_dir = _env.apps_dir() 183 | if os.path.exists(os.path.join(apps_dir, f'{name}.yml')) or os.path.exists(os.path.join(apps_dir, f'{name}.yaml')): 184 | _reload(skip_error=True) 185 | 186 | return _programs.get(name) 187 | -------------------------------------------------------------------------------- /aify/_web_template.py: -------------------------------------------------------------------------------- 1 | import os 2 | from starlette.templating import Jinja2Templates 3 | from ._env import webui_dir, apps_dir 4 | 5 | templates = Jinja2Templates(directory=os.path.join(webui_dir(), 'templates')) 6 | 7 | apps_templates = Jinja2Templates(directory=os.path.join(apps_dir(), 'templates')) 8 | 9 | def render(template_name, context = {}, is_apps_tempates=False): 10 | """Render a template.""" 11 | async def _request(request): 12 | context['request'] = request 13 | tpls = apps_templates if is_apps_tempates else templates 14 | return tpls.TemplateResponse(template_name, context=context) 15 | return _request 16 | 17 | def apps_render(template_name, context = {}): 18 | return render(template_name=template_name, context=context, is_apps_tempates=True) -------------------------------------------------------------------------------- /aify/embeddings.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pandas as pd 3 | import numpy as np 4 | 5 | from . import embeddings_openai 6 | from . import _env 7 | 8 | 9 | def embed(text: str, max_length=None, vendor=None, model_name=None): 10 | """Genereate embeddings. 11 | """ 12 | text = text if not max_length else text[:max_length] 13 | 14 | if vendor == 'sentence-transformers': 15 | from . import embeddings_sentence_transformers 16 | return embeddings_sentence_transformers.embed(text=text, model=model_name) 17 | 18 | return embeddings_openai.embed(text=text) 19 | 20 | async def aembed(text: str, max_length=None, vendor=None, model_name=None): 21 | """Genereate embeddings. 22 | 23 | Only OpenAI is currently supported. 24 | """ 25 | text = text if not max_length else text[:max_length] 26 | 27 | if vendor == 'sentence-transformers': 28 | from . import embeddings_sentence_transformers 29 | return await embeddings_sentence_transformers.aembed(text=text, model=model_name) 30 | 31 | return await embeddings_openai.aembed(text=text if not max_length else text[:max_length]) 32 | 33 | def cosine_similarity(a, b): 34 | """This is the Cosine Similarity algorithm.""" 35 | return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)) 36 | 37 | def _cal_score(collection_name, embeds, limit): 38 | df = _load_collections(collection_name).copy(deep=False) 39 | 40 | df['score'] = df['embedding'].apply( 41 | lambda x: cosine_similarity(x, embeds)) 42 | res = df.sort_values('score', ascending=False).head( 43 | limit).drop(['embedding'], axis=1) 44 | #return res.values.todict() 45 | return res.to_json(orient="records", force_ascii=False, double_precision=4) 46 | 47 | def search(collection_name: str, text: str, n=3, vendor=None, model_name=None): 48 | """Searches the specified collection.""" 49 | 50 | embeds = embed(text, vendor=vendor, model_name=model_name) 51 | 52 | return _cal_score(collection_name=collection_name, embeds=embeds, limit=n) 53 | 54 | async def asearch(collection_name: str, text: str, n=3, vendor=None, model_name=None): 55 | """Searches the specified collection.""" 56 | 57 | embeds = await aembed(text, vendor=vendor, model_name=model_name) 58 | 59 | return _cal_score(collection_name=collection_name, embeds=embeds, limit=n) 60 | 61 | def build_csv(from_file: str, to_file: str, vendor=None, model_name=None): 62 | """Builds word embeddings for a CSV file""" 63 | try: 64 | df = pd.read_csv(from_file) 65 | df['embedding'] = df.apply(lambda x: embed(x.to_string(), vendor=vendor, model_name=model_name), axis=1) 66 | df.to_csv(to_file, index=False) 67 | except Exception as e: 68 | raise e 69 | 70 | 71 | _embeds = {} 72 | 73 | def _load_csv_file(name: str): 74 | csv_file = os.path.join(_env.apps_dir(), 'embeddings', f'{name}.csv') 75 | if os.path.exists(f"{csv_file}.gz"): 76 | csv_file = f"{csv_file}.gz" 77 | elif not os.path.exists(csv_file): 78 | return 79 | 80 | df = pd.read_csv(csv_file) 81 | df['embedding'] = df['embedding'].apply(eval).apply(np.array) 82 | 83 | return df 84 | 85 | def _load_pickle_file(name: str): 86 | pickle_file = os.path.join(_env.apps_dir(), 'embeddings', f'{name}.pkl') 87 | if os.path.exists(f"{pickle_file}.gz"): 88 | pickle_file = f"{pickle_file}.gz" 89 | elif not os.path.exists(pickle_file): 90 | return 91 | 92 | df = pd.read_pickle(pickle_file) 93 | 94 | return df 95 | 96 | def _load_collections(name: str): 97 | global _embeds 98 | if name not in _embeds: 99 | df = _load_pickle_file(name=name) 100 | if df is None: 101 | df = _load_csv_file(name=name) 102 | 103 | if df is not None: 104 | _embeds[name] = df 105 | return _embeds.get(name) 106 | -------------------------------------------------------------------------------- /aify/embeddings_openai.py: -------------------------------------------------------------------------------- 1 | import os 2 | import openai 3 | 4 | 5 | def _set_env(): 6 | if "OPENAI_API_KEY" in os.environ: 7 | openai.api_key = os.environ["OPENAI_API_KEY"] 8 | if "OPENAI_API_BASE" in os.environ: 9 | openai.api_base = os.environ["OPENAI_API_BASE"] 10 | 11 | 12 | def embed(text: str, model='text-embedding-ada-002'): 13 | """Generate embeddings.""" 14 | _set_env() 15 | return openai.Embedding.create( 16 | input=[text], 17 | model=model 18 | )['data'][0]['embedding'] 19 | 20 | 21 | async def aembed(text: str, model='text-embedding-ada-002'): 22 | """Generate embeddings.""" 23 | _set_env() 24 | res = await openai.Embedding.acreate( 25 | input=[text], 26 | model=model 27 | ) 28 | return res['data'][0]['embedding'] 29 | -------------------------------------------------------------------------------- /aify/embeddings_sentence_transformers.py: -------------------------------------------------------------------------------- 1 | from sentence_transformers import SentenceTransformer 2 | 3 | _models = {} 4 | 5 | def _get_model(model_name): 6 | if model_name not in _models: 7 | _models[model_name] = SentenceTransformer(model_name) 8 | return _models[model_name] 9 | 10 | def embed(text: str, model=None): 11 | model_name = model if model is not None else "all-MiniLM-L6-v2" 12 | return _get_model(model_name).encode([text], convert_to_tensor=False)[0].tolist() 13 | 14 | async def aembed(text: str, model=None): 15 | model_name = model if model is not None else "all-MiniLM-L6-v2" 16 | return _get_model(model_name).encode([text], convert_to_tensor=False)[0].tolist() -------------------------------------------------------------------------------- /aify/memories/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shellc/aify/1b6062461098c438ae554547712ae62d9f2bacfe/aify/memories/__init__.py -------------------------------------------------------------------------------- /aify/memories/google_cloud_datastore.py: -------------------------------------------------------------------------------- 1 | import time 2 | from typing import List, Dict 3 | import uuid as _uuid 4 | from google.cloud import datastore 5 | 6 | namespace = _uuid.uuid1() 7 | 8 | 9 | def uuid(): 10 | return _uuid.uuid5(namespace, _uuid.uuid1().hex).hex 11 | 12 | 13 | datastore_client = datastore.Client() 14 | 15 | 16 | def save(program_name: str, session_id: str, role: str, content: str) -> None: 17 | """Save memory for the specified session of the application.""" 18 | 19 | m = { 20 | 'role': role, 21 | 'content': content, 22 | 'created': time.time() 23 | } 24 | 25 | entity = datastore.Entity( 26 | key=datastore_client.key('memories', f'{program_name}_{session_id}', 'session_memories', uuid()), 27 | exclude_from_indexes=("content",) 28 | ) 29 | entity.update(m) 30 | datastore_client.put(entity) 31 | 32 | session_key = datastore_client.key( 33 | 'sessions', program_name, 'session_id', session_id 34 | ) 35 | session_entity = datastore.Entity( 36 | key=session_key, 37 | exclude_from_indexes=("content",) 38 | ) 39 | session_entity.update(m) 40 | session_entity.update({ 41 | 'program_name': program_name, 42 | 'session_id': session_id 43 | }) 44 | 45 | datastore_client.put(session_entity) 46 | 47 | 48 | def read(program_name: str, session_id: str, n=10, max_len=4096) -> List[Dict]: 49 | query = datastore_client.query( 50 | kind='session_memories', ancestor=datastore_client.key('memories', f'{program_name}_{session_id}')) 51 | query.order = ["-created"] 52 | 53 | memories = [x for x in query.fetch(limit=n)] 54 | 55 | return memories[::-1] 56 | 57 | 58 | def sessions(program_name: str) -> List[Dict]: 59 | res = [] 60 | 61 | key = datastore_client.key('sessions', program_name) 62 | query = datastore_client.query(kind='session_id', ancestor=key) 63 | # query.order = ["-created"] 64 | 65 | sessions = query.fetch() 66 | 67 | for s in sessions: 68 | res.append({ 69 | 'name': s['program_name'], 70 | 'session_id': s['session_id'], 71 | 'last_modified': s['created'], 72 | 'latest': [dict(s)] 73 | }) 74 | return res 75 | -------------------------------------------------------------------------------- /aify/memory.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import json 4 | from typing import List, Dict 5 | from ._env import apps_dir 6 | 7 | def _memories_dirs(): 8 | memories_dir = os.path.join(apps_dir(), 'memories') 9 | if not os.path.exists(memories_dir): 10 | os.mkdir(memories_dir) 11 | 12 | return memories_dir 13 | 14 | def save(program_name:str, session_id: str, role: str, content: str) -> None: 15 | """Save memory for the specified session of the application.""" 16 | 17 | prog_session_dir = os.path.join(_memories_dirs(), program_name) 18 | if not os.path.exists(prog_session_dir): 19 | os.mkdir(prog_session_dir) 20 | 21 | with open(os.path.join(prog_session_dir, session_id), 'a') as f: 22 | f.write(json.dumps({ 23 | 'role': role, 24 | 'content': content 25 | })) 26 | f.write('\n') 27 | 28 | def read(program_name: str, session_id: str, n = 10, max_len = 4096) -> List[Dict]: 29 | """Read memories from the specified session of the application.""" 30 | fname = os.path.join(_memories_dirs(), program_name, session_id) 31 | if not os.path.exists(fname): 32 | return [] 33 | 34 | fsize = os.path.getsize(fname) 35 | 36 | pos = fsize - max_len if fsize > max_len else 0 37 | lines = [] 38 | with open(fname) as f: 39 | f.seek(pos) 40 | lines = f.readlines() 41 | lines = lines[n * -1:] 42 | 43 | memories = [] 44 | for line in lines: 45 | try: 46 | m = json.loads(line.strip()) 47 | memories.append(m) 48 | except Exception as e: 49 | pass 50 | return memories 51 | 52 | def sessions(program_name: str) -> List[Dict]: 53 | """Get the session list of the specified application.""" 54 | res = [] 55 | for f in glob.glob(os.path.join(_memories_dirs(), program_name, "*")): 56 | mtime = os.path.getmtime(f) 57 | p = os.path.abspath(f).split('/') 58 | session_id = p[-1] 59 | 60 | latest = [] 61 | memories = read(program_name=program_name, session_id=session_id, n=1) 62 | if memories and len(memories) > 0: 63 | latest = memories[-1:] 64 | 65 | res.append({ 66 | 'name': program_name, 67 | 'session_id': session_id, 68 | 'last_modified': mtime, 69 | 'latest': latest 70 | }) 71 | return res 72 | -------------------------------------------------------------------------------- /bin/aify: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | python -m aify $@ -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | cd js 2 | ./build.sh 3 | cd .. 4 | 5 | python -m build 6 | python -m twine upload --repository pypi dist/*$1*.whl 7 | -------------------------------------------------------------------------------- /docs/app_template.md: -------------------------------------------------------------------------------- 1 | # # App template 2 | 3 | Aify uses YAML files to define applications. Applications are reusable, and we also call YAML-formatted applications templates. Template files consist of the following parts: 4 | 5 | * Application descriptions 6 | * Model settings 7 | * Modules 8 | * Prompt 9 | * Variables 10 | 11 | ## Application description 12 | 13 | The application description contains attribute fields used to explain basic information about the application: 14 | 15 | * `title`: The title of the application, used for display and identification in the interface 16 | * `description`: A text that explains the functionality of the application, generally not too long. The built-in chatbot in Aify will display this text on the application list card, not required 17 | * `version`: Used to indicate the current version of the application, not required 18 | * `author`: The author of the application, not required 19 | * `website`: The website of the application author, not required 20 | * `email`: The author's email, not required 21 | * `license`: The open-source license of the author, not required 22 | 23 | ## Model 24 | 25 | The model specifies the AI model used by the application, mainly LLMs and Transformers models. An example of model settings is as follows: 26 | 27 | ``` 28 | model: 29 | type: llm 30 | vendor: openai 31 | name: gpt-3.5-turbo 32 | params: 33 | api_key: sk-xxxxx 34 | ``` 35 | 36 | Supported model list: 37 | 38 | * openai 39 | * llm 40 | * models: 41 | * gpt-3.5-turbo* 42 | * gpt-4* 43 | * params: 44 | * api_key 45 | * api_base 46 | 47 | ## Modules 48 | 49 | Modules are Python modules loaded during the runtime of the application, which can extend the capabilities of the application. For example: 50 | 51 | ``` 52 | modules: 53 | memory: $AIFY_MEMORY_STORAGE 54 | my_memory_storate: my.memory_storage 55 | helpers: helpers 56 | ``` 57 | 58 | Several things happen here: 59 | 60 | 1. We override the default memory implementation with the value represented by the environment variable AIFY_MEMORY_STORAGE. 61 | 2. We define our own my.memory_storage module, using our own backend storage to store memories. 62 | 3. We define the helpers module, which contains some useful functions. 63 | 64 | The modules defined here can be called during the runtime of the application. 65 | 66 | ## Prompt 67 | 68 | The prompt defines the flow of the program. Since Aify uses guidance as the engine, the definition and format of this part are completely the same as guidance. We include the prompt definition in our application template to make it part of our application. The benefit is that we can directly call extension modules in the prompt to enhance the program's capabilities. 69 | 70 | The prompt template syntax of guidance is Handlebars. For details about guidance and Handlebars, please refer to their documentation: 71 | 72 | * Handlebars template syntax: [https://handlebarsjs.com/](https://handlebarsjs.com/) 73 | * guidance extension functions: [https://guidance.readthedocs.io/en/latest/api.html#program-creation-a-k-a-guidance-program-string](https://guidance.readthedocs.io/en/latest/api.html#program-creation-a-k-a-guidance-program-string) 74 | 75 | 76 | ## Variables 77 | 78 | Variables define the input and output of the application, making it easier for higher-level applications to handle and interact with the application. The RESTful API and built-in chatbot only output the defined variables. 79 | 80 | The format for defining variables is as follows: 81 | 82 | ``` 83 | variables: 84 | - name: prompt 85 | type: input 86 | data_type: string 87 | required: true 88 | - name: answer 89 | type: output 90 | data_type: string 91 | required: true 92 | ``` 93 | 94 | These definitions can be retrieved through the API for higher-level applications to customize the interface. -------------------------------------------------------------------------------- /docs/assets/images/screenshots/aify_webui_new_start_1_screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shellc/aify/1b6062461098c438ae554547712ae62d9f2bacfe/docs/assets/images/screenshots/aify_webui_new_start_1_screenshot.png -------------------------------------------------------------------------------- /docs/assets/images/screenshots/aify_webui_new_start_screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shellc/aify/1b6062461098c438ae554547712ae62d9f2bacfe/docs/assets/images/screenshots/aify_webui_new_start_screenshot.png -------------------------------------------------------------------------------- /docs/assets/images/screenshots/aify_webui_screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shellc/aify/1b6062461098c438ae554547712ae62d9f2bacfe/docs/assets/images/screenshots/aify_webui_screenshot.png -------------------------------------------------------------------------------- /docs/customized/main.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block extrahead %} 4 | 6 | {% endblock extrahead %} -------------------------------------------------------------------------------- /docs/deploy_to_clouds/google_appengine.md: -------------------------------------------------------------------------------- 1 | # Deploy to google app engine 2 | 3 | You need to install and configure your Google Cloud CLI before deploying your apps to Google Cloud App Engine. [https://cloud.google.com/sdk/docs/install](https://cloud.google.com/sdk/docs/install) 4 | 5 | Examples: [https://github.com/shellc/aify/tree/main/examples](https://github.com/shellc/aify/tree/main/examples) 6 | 7 | ``` 8 | 9 | cd examples 10 | 11 | cp deploy-to-google-appengine/requirements.txt . 12 | 13 | gcloud app deploy ./deploy-to-google-appengine/index.yaml 14 | 15 | gcloud app deploy --appyaml ./deploy-to-google-appengine/app.yaml 16 | 17 | ``` 18 | 19 | Make sure that the memory module in your app template is specified as `aify.memories.google_cloud_datastore`. You can also set it as an environment variable in the .env file. 20 | 21 | .env: 22 | ```bash 23 | 24 | AIFY_MEMORY_STORAGE=aify.memories.google_cloud_datastore 25 | 26 | ``` 27 | 28 | your_app_template.yaml: 29 | ``` 30 | 31 | modules: 32 | memory: $AIFY_MEMORY_STORAGE 33 | 34 | ``` 35 | 36 | Notice: Google Cloud App Engine does not support streaming responses, so aify does not either when it is deployed on Google Cloud App Engine. 37 | 38 | * [https://cloud.google.com/appengine/docs/standard/how-requests-are-handled?tab=python#streaming_responses](https://cloud.google.com/appengine/docs/standard/how-requests-are-handled?tab=python#streaming_responses) 39 | * [https://cloud.google.com/appengine/docs/flexible/how-requests-are-handled?hl=zh-cn&tab=python#x-accel-buffering](https://cloud.google.com/appengine/docs/flexible/how-requests-are-handled?hl=zh-cn&tab=python#x-accel-buffering) -------------------------------------------------------------------------------- /docs/dive_into_apps.md: -------------------------------------------------------------------------------- 1 | # Dive into apps 2 | 3 | User apps are stored in a directory. The files that comprise the application program include YAML files, Python code, environment variables, authentication tokens, and other static resources(such as word embeddings). 4 | 5 | The directory also contains data generated during the program's execution 6 | 7 | ## Directory structure 8 | 9 | ``` 10 | your_apps_dir/ 11 | .evn 12 | .tokens 13 | your_app_1.yaml 14 | your_app_2.yaml 15 | memories/ 16 | embeddings/ 17 | static/ 18 | helpers.py 19 | ``` 20 | 21 | * .env The file contains environment variable settings, such as OPENAI_API_KEY. 22 | * .token The file contains authentication information for users and permissions, used for API authentication. 23 | * your_app_*.yaml The file defines the application, and a folder can contain multiple applications. 24 | * memories The folder is automatically generated during program execution and is used to store data generated by the program. 25 | * embeddings The folder is used to store text embedding data, and the application performs similarity searches during runtime. 26 | * static The folder stores static resources required by the application. 27 | * helpers.py is a collection of custom Python functions that can be invoked in the application. You can define multiple pypthon files and functions of any complexity. 28 | 29 | ## Set environment variables 30 | 31 | Example: 32 | 33 | ``` 34 | OPENAI_API_KEY=sk-YOUR_OPENAI_API_KEY 35 | OPENAI_API_BASE=https://YOU_OPENAI_API_BASE 36 | ``` 37 | 38 | ## Authorization 39 | 40 | Example: 41 | 42 | ``` 43 | YOUR_GENEREATED_TOKEN_STR_FOR_AUTH|username|write 44 | ANATHORE_TOKEN_STR|🦸🏻‍♀️| 45 | ``` 46 | 47 | The last column is for user permissions. "write" indicates that the user has write permission. Executing an application requires write permission, otherwise the user only has read permission. 48 | 49 | The ".tokens" file is not necessary. If there is no ".tokens" file in the application folder, it means that authentication is not required and all users can read and write anonymously. 50 | 51 | ## Memories 52 | 53 | Chatbot applications typically need to save conversation context data. We refer to this data as "memories." You can also implement your own memory storage method. Aify is a highly scalable framework. 54 | 55 | ## Embeddings 56 | 57 | The "embeddings" directory contains some CSV files that store word embeddings. It is used to retrieve data based on similarity during program execution, allowing the application to dynamically utilize external data. Similarly, you can also implement your own embedding retrieval method. 58 | 59 | ## Helper functions 60 | 61 | Helper functions is a collection of custom Python functions that can be invoked in the application. You can define multiple pypthon files and functions of any complexity. 62 | 63 | This can be used to enable communication between applications and external systems. For example, it allows for real-time access to a search engine's interface to retrieve data. 64 | -------------------------------------------------------------------------------- /docs/enhance_with_python.md: -------------------------------------------------------------------------------- 1 | # Enhance with Python 2 | 3 | By utilizing Python, the application can be enhanced to enable collaboration between AI models and Python code. 4 | 5 | There are two main types of extension methods. One is to write your own helper functions and call them in the application template to enhance the flexibility of the AI model. For example, using custom functions to format input and output, access external data sources in real-time, customize memory storage, customize embeddings methods, call other AI models, and so on. The other is to add some APIs or even web pages to enrich the capabilities of the application. The reason why this can be done is that aify is actually both a development framework and a complete server. Can provide general web service capabilities. 6 | 7 | ## Helper functions 8 | 9 | ## Web API 10 | 11 | Coming soon.. -------------------------------------------------------------------------------- /docs/examples/chatbot.md: -------------------------------------------------------------------------------- 1 | # Chatbot 2 | 3 | Coming soon... -------------------------------------------------------------------------------- /docs/examples/llm_generation.md: -------------------------------------------------------------------------------- 1 | # LLM Generation 2 | 3 | Coming soon... -------------------------------------------------------------------------------- /docs/getting_started.md: -------------------------------------------------------------------------------- 1 | # Getting started 2 | 3 | Welcome to Aify, the AI-native application framework and runtime that allows you to ship your AI applications in seconds! With Aify, you can easily build and deploy AI-powered applications using a simple YAML file. In this guide, we will walk you through the steps to get started with Aify and create your first AI application. 4 | 5 | 6 | ## Installation 7 | 8 | To begin, make sure you have the following prerequisites installed on your system: 9 | 10 | * Python 3.8 or higher 11 | * Pip package manager 12 | 13 | Once you have the prerequisites, you can install Aify by running the following command in your terminal: 14 | 15 | ```bash 16 | pip install aify 17 | ``` 18 | 19 | ## Create your first app 20 | 21 | You need to prepare a directory for your applications: 22 | 23 | ```bash 24 | mkdir ./apps 25 | ``` 26 | 27 | Now you can start the aify service and then access [http://localhost:2000](http://localhost:2000) using a browser, and aify will greet you. 28 | 29 | ```bash 30 | aify run ./apps 31 | ``` 32 | 33 | ![aify screenshot](./assets/images/screenshots/aify_webui_new_start_screenshot.png) 34 | 35 | Now it's just a blank application, you can't use it for anything. Next, we will create a chatbot. 36 | 37 | Creating a YAML file aify uses a YAML file to define your AI application. This file contains all the necessary configurations and settings for your application. Here's an example of a basic YAML file: 38 | 39 | ```yaml 40 | title: Chatbot 41 | 42 | model: 43 | vendor: openai 44 | name: gpt-3.5-turbo 45 | params: 46 | api_key: 47 | 48 | prompt: | 49 | {{#system~}} 50 | You are a helpful and terse assistant. 51 | {{~/system}} 52 | 53 | {{#each (memory.read program_name session_id n=3)}} 54 | {{~#if this.role == 'user'}} 55 | {{#user~}} 56 | {{this.content}} 57 | {{~/user}} 58 | {{/if~}} 59 | {{~#if this.role == 'assistant'}} 60 | {{#assistant~}} 61 | {{this.content}} 62 | {{~/assistant}} 63 | {{/if~}} 64 | {{~/each}} 65 | 66 | {{#user~}} 67 | {{prompt}} 68 | {{memory.save program_name session_id 'user' prompt}} 69 | {{~/user}} 70 | 71 | {{#assistant~}} 72 | {{gen 'answer' temperature=0 max_tokens=2000}} 73 | {{memory.save program_name session_id 'assistant' answer}} 74 | {{~/assistant}} 75 | 76 | variables: 77 | - name: prompt 78 | type: input 79 | - name: answer 80 | type: output 81 | ``` 82 | 83 | Here are some simple explanations about this YAML file: 84 | 85 | * The ***title*** represents the name of this application. 86 | * The ***model*** section defines the AI model used by this application and the runtime parameters required by the model. 87 | * The ***prompt*** section is used to drive the application's execution. Aify uses the guidance software package provided by Microsoft to drive the execution of the AI program. Guidance provides a way to operate as a Chain of Thought. Since guidance uses the Handlebars template system, the format of this section is actually a Handlebars template.The prompt section contains some helper functions that allow the AI model to dynamically change its runtime behavior, helping us achieve more complex functionality. These functions are built-in to aify, but you can also write your own helper functions in Python to accomplish specific tasks. 88 | * The terms "system," "user," and "assistant" are used to define the roles in an LLM-based chat task. 89 | * "memory.read" and "memory.write" are built-in helper functions in Aify, used to save and load the conversation history of users and AI. 90 | * "each" and "if" are branch control statements provided by Handlebars. 91 | * "gen" is the function provided by "guidance" to indicate the execution of LLM generation tasks. 92 | * The ***variables*** section defines the input and output variables of the application, which are used for external systems to access the data generated by AI through an API. 93 | 94 | ## Play with your AI app 95 | 96 | Now go back to your browser and refresh the page. You will see the application you just created. You can have some conversations with it, just like ChatGPT. 97 | 98 | ![aify screenshot](./assets/images/screenshots/aify_webui_new_start_1_screenshot.png) 99 | 100 | ## aify is not a chatbot 101 | 102 | Although aify provides a chatbot interface, its main purpose is not to provide a replacement for ChatGPT or a competitive conversation application. 103 | 104 | The chatbot UI is only for convenient debugging of AI applications. Of course, you can indeed use it as a chatbot for daily use. 105 | 106 | The main goal of aify is to provide an efficient framework for developing and deploying AI applications. 107 | 108 | If your goal is to develop your own complex AI applications, you should pay more attention to the APIs and extension mechanisms provided by aify. -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # Home 2 | 3 | ** Build your AI-native application in seconds. ** 4 | 5 | 🛠️ AI-native application framework and runtime. Simply write a YAML file. 6 | 7 | 🤖 Ready-to-use AI chatbot UI. 8 | 9 | ## Screenshots 10 | 11 | ![Webui screenshot](assets/images/screenshots/aify_webui_screenshot.png) -------------------------------------------------------------------------------- /docs/rest_api.md: -------------------------------------------------------------------------------- 1 | # RESTful API 2 | 3 | Coming soon... 4 | 5 | ## Execute program 6 | 7 | ``` 8 | PUT /api/apps/{program_name}/{session_id} 9 | ``` 10 | 11 | ## Get memories 12 | 13 | ``` 14 | GET /api/apps/{program_name}/{session_id}/memories 15 | ``` 16 | 17 | ## List apps 18 | 19 | ``` 20 | GET /api/apps 21 | ``` 22 | 23 | ## List sessions 24 | 25 | ``` 26 | GET /api/sessions 27 | ``` 28 | 29 | ## Get user 30 | 31 | ``` 32 | GET /api/user 33 | ``` -------------------------------------------------------------------------------- /examples/.env: -------------------------------------------------------------------------------- 1 | #AIFY_MEMORY_STORAGE=aify.memory 2 | OPENAI_API_KEY= 3 | -------------------------------------------------------------------------------- /examples/.gitignore: -------------------------------------------------------------------------------- 1 | dataset 2 | embeddings 3 | memories 4 | .env 5 | .tokens 6 | .gcloudignore 7 | requirements.txt -------------------------------------------------------------------------------- /examples/chatbot.yml: -------------------------------------------------------------------------------- 1 | title: Chatbot 2 | description: Chatbot like ChatGPT-3.5 3 | version: 1.0 4 | author: shellc 5 | website: https://aify.run 6 | email: shenggong.wang@gmail.com 7 | 8 | model: 9 | type: llm 10 | vendor: openai 11 | name: gpt-3.5-turbo 12 | #params: 13 | # api_key: sk- 14 | # api_base: https:// 15 | 16 | modules: 17 | memory: $AIFY_MEMORY_STORAGE 18 | 19 | prompt: | 20 | {{#system~}} 21 | You are a helpful and terse assistant. 22 | {{~/system}} 23 | 24 | {{#each (memory.read program_name session_id n=3)}} 25 | {{~#if this.role == 'user'}} 26 | {{#user~}} 27 | {{this.content}} 28 | {{~/user}} 29 | {{/if~}} 30 | {{~#if this.role == 'assistant'}} 31 | {{#assistant~}} 32 | {{this.content}} 33 | {{~/assistant}} 34 | {{/if~}} 35 | {{~/each}} 36 | 37 | {{#user~}} 38 | {{prompt}} 39 | {{memory.save program_name session_id 'user' prompt}} 40 | {{~/user}} 41 | 42 | {{#assistant~}} 43 | {{gen 'answer' temperature=0 max_tokens=2000}} 44 | {{memory.save program_name session_id 'assistant' answer}} 45 | {{~/assistant}} 46 | 47 | variables: 48 | - name: prompt 49 | type: input 50 | - name: answer 51 | type: output 52 | -------------------------------------------------------------------------------- /examples/comments.yml: -------------------------------------------------------------------------------- 1 | title: Code comments 2 | icon_emoji: 💻 3 | description: | 4 | Translate code comments. 5 | model: 6 | type: llm 7 | vendor: openai 8 | name: gpt-3.5-turbo 9 | modules: 10 | memory: $AIFY_MEMORY_STORAGE 11 | prompt: | 12 | {{#system~}} 13 | You are a helpful and terse assistant. 14 | Your task is to detect the language of the given text and translate it into English suitable for use as program comments or documentation. 15 | Ensure it remains clear and concise. If the given text is in English, please check the spelling and improve it. 16 | {{~/system}} 17 | 18 | {{#user~}} 19 | {{prompt}} 20 | {{memory.save program_name session_id 'user' prompt}} 21 | {{~/user}} 22 | 23 | {{#assistant~}} 24 | {{gen 'answer' temperature=0 max_tokens=2000}} 25 | {{memory.save program_name session_id 'assistant' answer}} 26 | {{~/assistant}} 27 | 28 | variables: 29 | - name: answer -------------------------------------------------------------------------------- /examples/deploy-to-google-appengine/app.yaml: -------------------------------------------------------------------------------- 1 | # https://cloud.google.com/appengine/docs/standard/reference/app-yaml?tab=python 2 | runtime: python311 # or another supported version 3 | 4 | instance_class: F2 5 | 6 | entrypoint: python -m aify run --port $PORT 7 | 8 | handlers: 9 | - url: /apps/static 10 | static_dir: static 11 | 12 | - url: /.* 13 | secure: always 14 | redirect_http_response_code: 301 15 | script: auto 16 | -------------------------------------------------------------------------------- /examples/deploy-to-google-appengine/index.yaml: -------------------------------------------------------------------------------- 1 | indexes: 2 | - kind: session_memories 3 | ancestor: yes 4 | properties: 5 | - name: created 6 | direction: desc 7 | -------------------------------------------------------------------------------- /examples/emoji.yml: -------------------------------------------------------------------------------- 1 | title: Best emoji 2 | description: Find the top 10 emojis that best match the content entered by the user. 3 | icon_emoji: 😈 4 | model: 5 | type: llm 6 | vendor: openai 7 | name: gpt-3.5-turbo 8 | modules: 9 | memory: $AIFY_MEMORY_STORAGE 10 | prompt: | 11 | {{#system~}} 12 | You are a helpful and terse assistant. 13 | 14 | Find the top 10 emojis that best match the content entered by the user. Output these emojis as a list with short description. 15 | {{~/system}} 16 | 17 | {{#user~}} 18 | {{prompt}} 19 | {{memory.save program_name session_id 'user' prompt}} 20 | {{~/user}} 21 | 22 | {{#assistant~}} 23 | {{gen 'answer' temperature=0 max_tokens=2000}} 24 | {{memory.save program_name session_id 'assistant' answer}} 25 | {{~/assistant}} 26 | variables: 27 | - name: answer -------------------------------------------------------------------------------- /examples/helpers.py: -------------------------------------------------------------------------------- 1 | import tiktoken 2 | 3 | def count_tokens(text: str, encoding_name='cl100k_base'): 4 | encoding = tiktoken.get_encoding(encoding_name) 5 | return len(encoding.encode(text)) -------------------------------------------------------------------------------- /examples/indie_hacker.yml: -------------------------------------------------------------------------------- 1 | title: Indie Hacker 2 | icon_emoji: 🚀 3 | description: | 4 | Contributed by: shellc@github 5 | model: 6 | type: llm 7 | vendor: openai 8 | name: gpt-3.5-turbo 9 | modules: 10 | memory: $AIFY_MEMORY_STORAGE 11 | prompt: | 12 | {{#system~}} 13 | You are a helpful and terse assistant. 14 | 15 | Act as an successful indie hacker who build popular saas and mobile apps. 16 | Provide commercial and technical consultation for individual developers, aiding them in achieving success in creative ideation and product-market fit. 17 | {{~/system}} 18 | 19 | {{#user~}} 20 | {{prompt}} 21 | {{memory.save program_name session_id 'user' prompt}} 22 | {{~/user}} 23 | 24 | {{#assistant~}} 25 | {{gen 'answer' temperature=0 max_tokens=2000}} 26 | {{memory.save program_name session_id 'assistant' answer}} 27 | {{~/assistant}} 28 | 29 | variables: 30 | - name: answer -------------------------------------------------------------------------------- /examples/llama.yml: -------------------------------------------------------------------------------- 1 | title: LLMA Chatbot 2 | description: LLMA-2-7b 3 | version: 1.0 4 | author: shellc 5 | website: https://aify.run 6 | email: shenggong.wang@gmail.com 7 | 8 | model: 9 | type: transformers 10 | name: meta-llama/Llama-2-7b-chat-hf 11 | 12 | modules: 13 | memory: $AIFY_MEMORY_STORAGE 14 | 15 | prompt: | 16 | {{#system~}} 17 | You are a helpful and terse assistant. 18 | {{~/system}} 19 | 20 | {{#each (memory.read program_name session_id n=3)}} 21 | {{~#if this.role == 'user'}} 22 | {{#user~}} 23 | {{this.content}} 24 | {{~/user}} 25 | {{/if~}} 26 | {{~#if this.role == 'assistant'}} 27 | {{#assistant~}} 28 | {{this.content}} 29 | {{~/assistant}} 30 | {{/if~}} 31 | {{~/each}} 32 | 33 | {{#user~}} 34 | {{prompt}} 35 | {{memory.save program_name session_id 'user' prompt}} 36 | {{~/user}} 37 | 38 | {{#assistant~}} 39 | {{gen 'answer' temperature=0 max_tokens=2000}} 40 | {{memory.save program_name session_id 'assistant' answer}} 41 | {{~/assistant}} 42 | 43 | variables: 44 | - name: prompt 45 | type: input 46 | - name: answer 47 | type: output 48 | -------------------------------------------------------------------------------- /examples/translator.yml: -------------------------------------------------------------------------------- 1 | title: English Translator 2 | icon_emoji: 🇬🇧 3 | description: | 4 | English Translator and Improver. 5 | Contributed by: f@github 6 | model: 7 | type: llm 8 | vendor: openai 9 | name: gpt-3.5-turbo 10 | modules: 11 | memory: $AIFY_MEMORY_STORAGE 12 | prompt: | 13 | {{#system~}} 14 | You are a helpful and terse assistant. 15 | {{~/system}} 16 | 17 | {{#user~}} 18 | I want you to act as an English translator, spelling corrector and improver. 19 | I will speak to you in any language and you will detect the language, 20 | translate it and answer in the corrected and improved version of my text, in English. 21 | 22 | I want you to only reply the correction, the improvements and nothing else, do not write explanations. 23 | My first sentence is “{{prompt}}” 24 | {{memory.save program_name session_id 'user' prompt}} 25 | {{~/user}} 26 | 27 | {{#assistant~}} 28 | {{gen 'answer' temperature=0 max_tokens=2000}} 29 | {{memory.save program_name session_id 'assistant' answer}} 30 | {{~/assistant}} 31 | 32 | variables: 33 | - name: answer -------------------------------------------------------------------------------- /js/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | build -------------------------------------------------------------------------------- /js/build.sh: -------------------------------------------------------------------------------- 1 | npm run build 2 | 3 | mkdir -p ../webui/static/aify 4 | cp build/* ../webui/static/aify -------------------------------------------------------------------------------- /js/config-overrides.js: -------------------------------------------------------------------------------- 1 | //const path = require('path'); 2 | 3 | const entries = require('react-app-rewire-multiple-entry')([ 4 | { 5 | entry: 'src/index.js', 6 | }, 7 | ]) 8 | 9 | const MiniCssExtractPlugin = require("mini-css-extract-plugin"); 10 | 11 | module.exports = { 12 | webpack: function(config, env) { 13 | 14 | config.plugins=[ 15 | new MiniCssExtractPlugin({ 16 | filename: "aify.css", 17 | }) 18 | ], 19 | config.output.library = 'aify'; 20 | config.output.libraryExport = 'default'; 21 | config.output.libraryTarget = 'umd'; 22 | //config.output.path=path.join(__dirname, '../webui/static/aify/js'), 23 | //config.output.filename = 'aify-[chunkhash].js'; 24 | config.output.filename = 'aify.js'; 25 | //entries.addMultiEntry(config); 26 | return config; 27 | }, 28 | 29 | 30 | 31 | 32 | 33 | devServer: function (configFunction) { 34 | return function(proxy, allowedHost) { 35 | const config = configFunction(proxy, allowedHost); 36 | //https://github.com/chimurai/http-proxy-middleware/issues/371 37 | config.compress = false; 38 | return config; 39 | } 40 | } 41 | } -------------------------------------------------------------------------------- /js/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "aify", 3 | "version": "0.1.0", 4 | "private": true, 5 | "dependencies": { 6 | "@ant-design/icons": "^5.1.4", 7 | "@microsoft/fetch-event-source": "^2.0.1", 8 | "antd": "^5.7.0", 9 | "bootstrap": "^5.3.0", 10 | "react": "^18.2.0", 11 | "react-dom": "^18.2.0", 12 | "react-markdown": "^8.0.7", 13 | "react-scripts": "^5.0.1", 14 | "remark-gfm": "^3.0.1" 15 | }, 16 | "scripts": { 17 | "start": "react-app-rewired start", 18 | "build": "react-app-rewired build", 19 | "test": "react-app-rewired test", 20 | "eject": "react-scripts eject" 21 | }, 22 | "eslintConfig": { 23 | "extends": [ 24 | "react-app", 25 | "react-app/jest" 26 | ] 27 | }, 28 | "browserslist": { 29 | "production": [ 30 | ">0.2%", 31 | "not dead", 32 | "not op_mini all" 33 | ], 34 | "development": [ 35 | "last 1 chrome version", 36 | "last 1 firefox version", 37 | "last 1 safari version" 38 | ] 39 | }, 40 | "proxy": "http://127.0.0.1:2000", 41 | "devDependencies": { 42 | "mini-css-extract-plugin": "^2.7.6", 43 | "react-app-rewire-multiple-entry": "^2.2.3", 44 | "react-app-rewired": "^2.2.1" 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /js/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Aify 8 | 9 | 10 | 11 | 12 |
13 | 14 | 19 | 20 | -------------------------------------------------------------------------------- /js/src/aify.js: -------------------------------------------------------------------------------- 1 | import React, { useEffect } from 'react'; 2 | import ReactDOM from 'react-dom/client'; 3 | import { useState } from 'react' 4 | import { Layout, List, Avatar, Space, Typography, Button, Card } from 'antd' 5 | import { MenuUnfoldOutlined, MenuFoldOutlined, MessageFilled } from '@ant-design/icons' 6 | import { 7 | 8 | } from '@ant-design/icons'; 9 | import { ReactMarkdown } from 'react-markdown/lib/react-markdown'; 10 | import remarkGfm from 'remark-gfm' 11 | import { Chat } from './chat' 12 | import Link from 'antd/es/typography/Link'; 13 | 14 | const { Sider } = Layout 15 | const { Text } = Typography 16 | const { Meta } = Card 17 | 18 | let welcome_message = ` 19 | # Welcome to aify.run 20 | 21 | ##### Build your AI-native application in seconds. 22 | 23 | 🛠️ AI-native application framework and runtime. Simply write a YAML file. 24 | 25 | 🤖 Ready-to-use AI chatbot UI. 26 | 27 | 🚀 [Getting started: Create your first AI application](https://docs.aify.run/getting_started/) 28 | ` 29 | 30 | const Aify = (props) => { 31 | const [leftCollapsed, setLeftCollapsed] = useState(false); 32 | const [rightCollapsed, setRightCollapsed] = useState(false); 33 | const [apps, setApps] = useState(); 34 | const [appMap, setAppMap] = useState({}); 35 | const [currentAppName, setCurrentAppName] = useState(null); 36 | const [currentSessionId, setCurrentSessionId] = useState(null); 37 | const [sessions, setSessions] = useState(); 38 | const [welcomMessage, setWelcomeMessage] = useState(); 39 | const [user, setUser] = useState(); 40 | 41 | const loadApps = () => { 42 | fetch('/api/apps') 43 | .then(r => r.json()) 44 | .then(apps => { 45 | let m = {}; 46 | apps.forEach(app => { 47 | m[app.name] = app; 48 | }); 49 | setApps(apps); 50 | setAppMap(m); 51 | }) 52 | } 53 | 54 | const loadSessions = () => { 55 | fetch('/api/sessions') 56 | .then(r => r.json()) 57 | .then(sessions => setSessions(sessions)) 58 | } 59 | 60 | const loadWelcomeMessage = () => { 61 | fetch('/apps/static/welcome.md') 62 | .then(r => { 63 | if (r.status === 200) { 64 | return r.text(); 65 | } else { 66 | return welcome_message 67 | } 68 | }) 69 | .then(data => setWelcomeMessage(data)) 70 | } 71 | 72 | const loadUser = () => { 73 | fetch('/api/user') 74 | .then(r => r.json()) 75 | .then(user => setUser(user)) 76 | } 77 | 78 | useEffect(() => { 79 | loadApps(); 80 | loadSessions(); 81 | loadWelcomeMessage(); 82 | loadUser(); 83 | }, []) 84 | 85 | const createSession = (appName) => { 86 | let sessionId = crypto.randomUUID(); 87 | switchSession(appName, sessionId); 88 | } 89 | 90 | const switchSession = (appName, sessionId) => { 91 | setCurrentAppName(appName); 92 | setCurrentSessionId(sessionId); 93 | } 94 | 95 | return ( 96 | 102 | setLeftCollapsed(value)} 106 | breakpoint="lg" 107 | theme="light" 108 | style={{ 109 | overflow: 'auto', 110 | height: '100vh', 111 | backgroundColor: '#eee' 112 | }} 113 | width={300} 114 | collapsedWidth={65} 115 | trigger={null} 116 | > 117 | ( 122 | 123 | switchSession(session.name, session.session_id)} 125 | > 126 | 127 | {(appMap[session.name] && appMap[session.name]['icon_emoji']) ?? '🤖'} 128 | {!leftCollapsed ? ( 129 | 130 | 136 | {(session.latest && session.latest.length) > 0 ? session.latest[session.latest.length-1].content : ''} 137 | 138 | 139 | ) : null} 140 | 141 | 142 | 143 | 144 | ))} 145 | /> 146 | 147 | 155 |
156 |
175 | {(currentAppName != null && currentSessionId != null) ? ( 176 | 184 | ) : ( 185 |
186 | 187 | {welcomMessage} 188 | 189 |
190 | )} 191 | 192 |
193 | 194 | setRightCollapsed(value)} 198 | breakpoint="lg" 199 | theme="light" 200 | style={{ 201 | overflow: 'auto', 202 | height: '100vh', 203 | backgroundColor: '#eee' 204 | }} 205 | width={300} 206 | collapsedWidth={0} 207 | trigger={null} 208 | reverseArrow 209 | > 210 | 211 | ( 217 | 218 | createSession(app.name)} />, 225 | ]} 226 | > 227 | {app.icon_emoji ?? '🤖'}} 229 | title={app.title} 230 | /> 231 |
232 | 233 | {app.description} 234 | 235 |
236 |
237 |
238 | )} 239 | 240 | /> 241 |
242 |
243 | ); 244 | } 245 | 246 | export const create = (elementId, height) => { 247 | const root = ReactDOM.createRoot(document.getElementById(elementId)); 248 | root.render( 249 | 250 | 251 | 252 | ); 253 | } -------------------------------------------------------------------------------- /js/src/chat.js: -------------------------------------------------------------------------------- 1 | import React, { useEffect } from 'react'; 2 | 3 | import { useState } from 'react' 4 | 5 | import { 6 | Layout, 7 | Form, 8 | Input, 9 | Button, 10 | Row, 11 | Col, 12 | Avatar, 13 | Alert, 14 | } from 'antd'; 15 | 16 | import { BulbOutlined } from '@ant-design/icons' 17 | 18 | import { fetchEventSource } from '@microsoft/fetch-event-source' 19 | import ReactMarkdown from 'react-markdown' 20 | import remarkGfm from 'remark-gfm' 21 | 22 | const { TextArea } = Input; 23 | 24 | let abortController = null; 25 | 26 | export const Chat = (props) => { 27 | 28 | const [form] = Form.useForm(); 29 | const [error, setError] = useState(null); 30 | const [generating, setGenerating] = useState(false); 31 | const [currentReply, setCurrentReply] = useState(null); 32 | const [history, setHistory] = useState([]); 33 | 34 | const onKeyDown = (e) => { 35 | if (e.keyCode === 13/*enter*/ && (e.ctrlKey || e.metaKey)) { 36 | onSubmit(); 37 | } 38 | } 39 | 40 | const setHistoryScroll = () => { 41 | let history_div = document.getElementById('history'); 42 | if (history_div) { 43 | setTimeout(() => { 44 | history_div.scrollTop = history_div.scrollHeight; 45 | }, 0); 46 | } 47 | } 48 | 49 | const onSubmit = () => { 50 | setError(null); 51 | setGenerating(true); 52 | abortController = new AbortController(); 53 | 54 | let values = form.getFieldsValue(); 55 | if (!values.prompt) { 56 | //setError("Prompt required!"); 57 | setGenerating(false); 58 | return; 59 | } 60 | 61 | let prompt_variable = props.prompt_variable; 62 | if (!prompt_variable) { 63 | prompt_variable = 'prompt'; 64 | } 65 | 66 | let req = {} 67 | req[prompt_variable] = values.prompt; 68 | 69 | history.push({ role: 'user', message: values.prompt }); 70 | setHistoryScroll(); 71 | form.setFieldValue('prompt', ''); 72 | getReply(req); 73 | } 74 | 75 | const loadHistory = () => { 76 | let name = props.name; 77 | let session_id = props.session_id; 78 | 79 | if (name && session_id) { 80 | fetch(`/api/apps/${name}/${session_id}/memories`) 81 | .then(r => r.json()) 82 | .then(messages => { 83 | let t_history = []; 84 | for (let i = 0; i < messages.length; i++) { 85 | let msg = messages[i]; 86 | 87 | let content = msg.content; 88 | if (msg.extra) { 89 | content += '\n\n' + msg.extra; 90 | } 91 | 92 | t_history.push({ 93 | role: msg.role, 94 | message: content 95 | }); 96 | } 97 | setHistory(t_history); 98 | 99 | setHistoryScroll(); 100 | }); 101 | } 102 | } 103 | 104 | const getReply = (req) => { 105 | let name = props.name; 106 | let session_id = props.session_id; 107 | 108 | if (!name || !session_id) { 109 | return 110 | } 111 | 112 | var reply = ''; 113 | var variable_name = ''; 114 | 115 | fetchEventSource(`/api/apps/${name}/${session_id}?sse`, { 116 | method: 'PUT', 117 | body: JSON.stringify(req), 118 | headers: { 'Content-Type': 'application/json' }, 119 | signal: abortController.signal, 120 | onopen(response) { 121 | if (response.ok /*&& response.headers.get('content-type') === EventStreamContentType*/) { 122 | return; // everything's good 123 | } else if (response.status >= 400 && response.status < 500 && response.status !== 429) { 124 | // client-side errors are usually non-retriable: 125 | 126 | setError('Something wrong! ERR: ' + response.statusText); 127 | abortController.abort(); 128 | setGenerating(false); 129 | 130 | //throw new FatalError(); 131 | } else { 132 | abortController.abort();// don't retry 133 | setGenerating(false); 134 | //throw new RetriableError(); 135 | } 136 | }, 137 | onmessage(msg) { 138 | if (msg.event === 'error') { 139 | let j = JSON.parse(msg.data); 140 | let e = j['error']; 141 | setError(e); 142 | setGenerating(false); 143 | } else if (msg.event === 'message') { 144 | let j = JSON.parse(msg.data); 145 | let c = j['diff']; 146 | let vname = j['variable']; 147 | 148 | if (c) { 149 | if (variable_name !== '' && variable_name !== vname) { 150 | reply = reply + '\n\n'; 151 | } 152 | variable_name = vname; 153 | 154 | reply = reply + c; 155 | setCurrentReply(reply + "▁"); 156 | setHistoryScroll(); 157 | } 158 | } else if (msg.event === 'extra') { 159 | let j = JSON.parse(msg.data); 160 | reply = reply + "\n\n" + j; 161 | setCurrentReply(reply) 162 | setHistoryScroll(); 163 | } else { 164 | console.log("Unkow event: " + msg.data); 165 | } 166 | }, 167 | onerror(err) { 168 | setGenerating(false); 169 | throw err; 170 | }, 171 | onclose() { 172 | history.push({ role: 'ai', message: reply }); 173 | setCurrentReply(null); 174 | setGenerating(false); 175 | 176 | if (props.onMessageReceived) { 177 | props.onMessageReceived(); 178 | } 179 | } 180 | }); 181 | }; 182 | 183 | const abort = () => { 184 | abortController.abort(); 185 | setGenerating(false); 186 | }; 187 | 188 | useEffect(() => { 189 | loadHistory(); 190 | }, [props.name, props.session_id]); 191 | 192 | return ( 193 | 200 |
201 | 202 |
203 | {currentReply ? ( 204 | 205 | ) : null} 206 | {generating ? ( 207 |
208 | 209 |
210 | ) : null} 211 |
212 | 213 | {error ? ( 214 | 215 | ) : null} 216 |
217 |
225 | 226 | 227 | 231 |