├── .gitignore ├── LICENSE ├── README.md ├── __init__.py ├── add_tools_to_db.py ├── admin_routes.py ├── agent_routes.py ├── agents ├── __init__.py ├── clarifier │ ├── README.md │ └── clarifier_agent.py ├── main_agent.py └── planner_executor │ ├── README.md │ ├── __init__.py │ ├── execute_tool.py │ ├── planner_executor_agent.py │ ├── tool_helpers │ ├── __init__.py │ ├── all_tools.py │ ├── core_functions.py │ ├── rerun_step.py │ ├── sorting_functions.py │ ├── tool_param_types.py │ └── toolbox_manager.py │ └── toolboxes │ ├── __init__.py │ ├── data_fetching │ └── tools.py │ ├── plots │ └── tools.py │ ├── stats │ └── tools.py │ └── toolbox_prompts.py ├── auth_routes.py ├── auth_utils.py ├── config.env ├── connection_manager.py ├── create_admin_user.py ├── create_sqlite_tables.py ├── csv_routes.py ├── db_utils.py ├── defog_local.db ├── feedback_routes.py ├── generic_utils.py ├── imgo_routes.py ├── integration_routes.py ├── main.py ├── out ├── 404 ├── 404.html ├── _next │ └── static │ │ ├── chunks │ │ ├── 200-fcd158ea74b5b6c9.js │ │ ├── 209-a8949ab7cba0c2ea.js │ │ ├── 25-7fb05dc01022e2bd.js │ │ ├── 279-7bc53f84c1699471.js │ │ ├── 435-7015704cbffb83a3.js │ │ ├── 6179b68e-26cac05b7c197047.js │ │ ├── 8770e839-777f7eee86370bb0.js │ │ ├── framework-17016bf213e9c197.js │ │ ├── main-2f8e3c3a38acac99.js │ │ ├── pages │ │ │ ├── _app-d0a47cb7546f4574.js │ │ │ ├── _error-ee42a9921d95ff81.js │ │ │ ├── align-model-cf0361b2ce2d7c40.js │ │ │ ├── check-readiness-a154993c8da3a4c9.js │ │ │ ├── doc-dcf4766c050e5b31.js │ │ │ ├── extract-metadata-7e71f6756a066263.js │ │ │ ├── index-b92da338cfbf2e22.js │ │ │ ├── log-in-beee6f8bec469b4f.js │ │ │ ├── manage-tools-0ea0305e69aa79d0.js │ │ │ ├── manage-users-e13c055c741549c8.js │ │ │ ├── msal_redirect-a4253aab45cea6c0.js │ │ │ ├── oracle-frontend-eff9fc199d2175f6.js │ │ │ ├── query-data-9ba5b3b9aa6c4a60.js │ │ │ ├── view-feedback-e72be7d499923dca.js │ │ │ └── view-notebooks-31985eb96195a01a.js │ │ ├── polyfills-c67a75d1b6f99dc8.js │ │ └── webpack-4e69afc7053360d9.js │ │ ├── css │ │ ├── 2f0ea180d954b099.css │ │ ├── 494200714edbf843.css │ │ └── 4d5e7ec7defb96bd.css │ │ └── zTY4QOWMq1oj_IUOIzG7x │ │ ├── _buildManifest.js │ │ └── _ssgManifest.js ├── align-model ├── align-model.html ├── check-readiness ├── check-readiness.html ├── doc ├── doc.html ├── extract-metadata ├── extract-metadata.html ├── favicon.ico ├── index ├── index.html ├── log-in ├── log-in.html ├── logo512.png ├── manage-tools ├── manage-tools.html ├── manage-users ├── manage-users.html ├── msal_redirect ├── msal_redirect.html ├── next.svg ├── oracle-frontend ├── oracle-frontend.html ├── query-data ├── query-data.html ├── vercel.svg ├── view-feedback ├── view-feedback.html ├── view-notebooks └── view-notebooks.html ├── query_routes.py ├── readiness_routes.py ├── requirements.txt ├── tool_code_utilities.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | *.pyc 6 | 7 | # C extensions 8 | *.so 9 | 10 | # Distribution / packaging 11 | .Python 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | cover/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | db.sqlite3 63 | db.sqlite3-journal 64 | 65 | # Flask stuff: 66 | instance/ 67 | .webassets-cache 68 | 69 | # Scrapy stuff: 70 | .scrapy 71 | 72 | # Sphinx documentation 73 | docs/_build/ 74 | 75 | # PyBuilder 76 | .pybuilder/ 77 | target/ 78 | 79 | # Jupyter Notebook 80 | .ipynb_checkpoints 81 | 82 | # IPython 83 | profile_default/ 84 | ipython_config.py 85 | 86 | # pyenv 87 | # For a library or package, you might want to ignore these files since the code is 88 | # intended to run in multiple environments; otherwise, check them in: 89 | # .python-version 90 | 91 | # pipenv 92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 95 | # install all needed dependencies. 96 | #Pipfile.lock 97 | 98 | # poetry 99 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 100 | # This is especially recommended for binary packages to ensure reproducibility, and is more 101 | # commonly ignored for libraries. 102 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 103 | #poetry.lock 104 | 105 | # pdm 106 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 107 | #pdm.lock 108 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 109 | # in version control. 110 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 111 | .pdm.toml 112 | .pdm-python 113 | .pdm-build/ 114 | 115 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 116 | __pypackages__/ 117 | 118 | # Celery stuff 119 | celerybeat-schedule 120 | celerybeat.pid 121 | 122 | # SageMath parsed files 123 | *.sage.py 124 | 125 | # Environments 126 | .env 127 | .venv 128 | env/ 129 | venv/ 130 | ENV/ 131 | env.bak/ 132 | venv.bak/ 133 | 134 | # Spyder project settings 135 | .spyderproject 136 | .spyproject 137 | 138 | # Rope project settings 139 | .ropeproject 140 | 141 | # mkdocs documentation 142 | /site 143 | 144 | # mypy 145 | .mypy_cache/ 146 | .dmypy.json 147 | dmypy.json 148 | 149 | # Pyre type checker 150 | .pyre/ 151 | 152 | # pytype static type analyzer 153 | .pytype/ 154 | 155 | # Cython debug symbols 156 | cython_debug/ 157 | 158 | # PyCharm 159 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 160 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 161 | # and can be added to the global gitignore or merged into this file. For a more nuclear 162 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 163 | #.idea/ 164 | 165 | 166 | # Mac OS 167 | .DS_Store -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Defog Desktop 2 | 3 | Defog is a privacy friendly AI data analyst that lets you ask data questions in plain English, while ensuring that your actual data never leaves your servers. 4 | 5 | ## Getting Started 6 | 7 | 1. If you have not yet gotten a Defog API Key, sign up at https://defog.ai/signup to get a free API key! The free key lets you query up to 5 tables with 25 total columns, and up to 1000 queries per month. 8 | 2. Clone this repo with `git clone https://github.com/defog-ai/defog-desktop` 9 | 3. Install requirements with `pip install -r requirements.txt` 10 | 4. Update `config.env` with your API Key 11 | 5. [only if using bigquery] Download your Service Account Key as a JSON file, name it `bq.json`, and save it in the same directory as this `README.md` file 12 | 6. Launch defog with `python main.py`. This will automatically open up http://localhost:33364/static/extract-metadata.html in your browser. 13 | 7. If this is your first time using defog, log in with the user id `admin`, and the password `admin` 14 | 15 | ## Docs and Guides 16 | 17 | For more details documentation, please refer to [docs.defog.ai](https://docs.defog.ai/) 18 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/defog-ai/defog-desktop/02eb40f7090be763604b5c7631796aecc0e9beae/__init__.py -------------------------------------------------------------------------------- /add_tools_to_db.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | import os 3 | 4 | load_dotenv("config.env") 5 | 6 | from agents.planner_executor.tool_helpers.all_tools import tools 7 | from db_utils import add_tool 8 | import asyncio 9 | 10 | 11 | async def main(): 12 | # initialise basic tools in db 13 | for key in tools: 14 | tool = tools[key] 15 | function_name = tool["function_name"] 16 | description = tool["description"] 17 | code = tool["code"] 18 | tool_name = tool["tool_name"] 19 | toolbox = tool["toolbox"] 20 | input_metadata = tool["input_metadata"] 21 | output_metadata = tool["output_metadata"] 22 | api_keys = os.environ["DEFOG_API_KEY"].split(",") 23 | # create embedding for the tool name + description 24 | 25 | for api_key in api_keys: 26 | err = await add_tool( 27 | api_key=api_key, 28 | tool_name=tool_name, 29 | function_name=function_name, 30 | description=description, 31 | code=code, 32 | input_metadata=input_metadata, 33 | output_metadata=output_metadata, 34 | toolbox=toolbox, 35 | cannot_delete=True, 36 | cannot_disable=True, 37 | ) 38 | 39 | if err: 40 | if "already exists" in err: 41 | print(f"Tool {function_name} already exists in the database.") 42 | else: 43 | print(f"Error adding tool {tool_name}: {err}") 44 | else: 45 | print(f"Tool {function_name} added to the database.") 46 | 47 | 48 | # Run the main function 49 | asyncio.run(main()) 50 | -------------------------------------------------------------------------------- /admin_routes.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Request 2 | from sqlalchemy import ( 3 | select, 4 | update, 5 | insert, 6 | delete, 7 | ) 8 | from db_utils import engine, Users, validate_user 9 | import hashlib 10 | import pandas as pd 11 | from io import StringIO 12 | import requests 13 | import asyncio 14 | from fastapi.responses import JSONResponse 15 | 16 | router = APIRouter() 17 | 18 | SALT = "WITHPEPPER" 19 | INTERNAL_API_KEY = "DUMMY_KEY" 20 | 21 | 22 | @router.post("/admin/add_users") 23 | async def add_user(request: Request): 24 | params = await request.json() 25 | token = params.get("token") 26 | gsheets_url = params.get("gsheets_url") 27 | if not validate_user(token, user_type="admin"): 28 | return JSONResponse( 29 | status_code=401, 30 | content={ 31 | "error": "unauthorized", 32 | "message": "Invalid username or password", 33 | }, 34 | ) 35 | 36 | if not gsheets_url: 37 | return {"error": "no google sheets url provided"} 38 | 39 | # get the users from the google sheet 40 | user_dets_csv = None 41 | try: 42 | url_to_query = gsheets_url.split("/edit")[0] + "/gviz/tq?tqx=out:csv&sheet=v4" 43 | user_dets_csv = await asyncio.to_thread(requests.get, url_to_query) 44 | user_dets_csv = user_dets_csv.text 45 | except: 46 | return {"error": "could not get the google sheet"} 47 | 48 | # get the users from the csv 49 | try: 50 | users = pd.read_csv(StringIO(user_dets_csv)).to_dict(orient="records") 51 | print(users, flush=True) 52 | except: 53 | return {"error": "could not parse the google sheets csv"} 54 | 55 | # create a password for each user 56 | userdets = [] 57 | for user in users: 58 | dets = { 59 | "username": user.get("username", user.get("user_email")).lower(), 60 | "user_type": user.get("user_type", user.get("user_role")).lower(), 61 | } 62 | userdets.append(dets) 63 | 64 | # save the users to postgres 65 | with engine.begin() as conn: 66 | for dets in userdets: 67 | hashed_password = hashlib.sha256( 68 | (dets["username"] + SALT + "defog_" + dets["username"]).encode() 69 | ).hexdigest() 70 | 71 | # check if user already exists 72 | user_exists = conn.execute( 73 | select(Users).where(Users.username == dets["username"]) 74 | ).fetchone() 75 | 76 | if user_exists: 77 | conn.execute( 78 | update(Users) 79 | .where(Users.username == dets["username"]) 80 | .values( 81 | hashed_password=hashed_password, user_type=dets["user_type"] 82 | ) 83 | ) 84 | else: 85 | conn.execute( 86 | insert(Users).values( 87 | username=dets["username"], 88 | hashed_password=hashed_password, 89 | token=INTERNAL_API_KEY, 90 | user_type=dets["user_type"], 91 | is_premium=True, 92 | ) 93 | ) 94 | 95 | return {"status": "success"} 96 | 97 | 98 | @router.post("/admin/add_users_csv") 99 | async def add_users_csv(request: Request): 100 | params = await request.json() 101 | token = params.get("token") 102 | users_csv = params.get("users_csv") 103 | if not validate_user(token, user_type="admin"): 104 | return JSONResponse( 105 | status_code=401, 106 | content={ 107 | "error": "unauthorized", 108 | "message": "Invalid username or password", 109 | }, 110 | ) 111 | 112 | if not users_csv: 113 | return {"error": "no users provided"} 114 | 115 | users = pd.read_csv(StringIO(users_csv)).to_dict(orient="records") 116 | 117 | # create a password for each user 118 | userdets = [] 119 | for user in users: 120 | dets = { 121 | "username": user.get("username", user.get("user_email")).lower(), 122 | "password": user.get("password", user.get("user_password")), 123 | "user_type": user.get("user_type", user.get("user_role")).lower(), 124 | } 125 | userdets.append(dets) 126 | 127 | # save the users to postgres 128 | # save the users to postgres 129 | with engine.begin() as conn: 130 | cur = conn.connection.cursor() 131 | for dets in userdets: 132 | hashed_password = hashlib.sha256( 133 | (dets["username"] + SALT + dets["password"]).encode() 134 | ).hexdigest() 135 | 136 | # check if user already exists 137 | user_exists = conn.execute( 138 | select(Users).where(Users.username == dets["username"]) 139 | ).fetchone() 140 | 141 | if user_exists: 142 | conn.execute( 143 | update(Users) 144 | .where(Users.username == dets["username"]) 145 | .values( 146 | hashed_password=hashed_password, user_type=dets["user_type"] 147 | ) 148 | ) 149 | else: 150 | conn.execute( 151 | insert(Users).values( 152 | username=dets["username"], 153 | hashed_password=hashed_password, 154 | token=INTERNAL_API_KEY, 155 | user_type=dets["user_type"], 156 | is_premium=True, 157 | ) 158 | ) 159 | 160 | return {"status": "success"} 161 | 162 | 163 | @router.post("/admin/get_users") 164 | async def get_users(request: Request): 165 | params = await request.json() 166 | token = params.get("token", None) 167 | if not validate_user(token, user_type="admin"): 168 | return JSONResponse( 169 | status_code=401, 170 | content={ 171 | "error": "unauthorized", 172 | "message": "Invalid username or password", 173 | }, 174 | ) 175 | 176 | with engine.begin() as conn: 177 | users = conn.execute(select(Users)).fetchall() 178 | 179 | users = pd.DataFrame(users)[["username", "user_type"]].to_dict(orient="records") 180 | return {"users": users} 181 | 182 | 183 | @router.post("/admin/delete_user") 184 | async def delete_user(request: Request): 185 | params = await request.json() 186 | token = params.get("token", None) 187 | if not validate_user(token, user_type="admin"): 188 | return JSONResponse( 189 | status_code=401, 190 | content={ 191 | "error": "unauthorized", 192 | "message": "Invalid username or password", 193 | }, 194 | ) 195 | 196 | username = params.get("username", None) 197 | with engine.begin() as conn: 198 | conn.execute(delete(Users).where(Users.username == username)) 199 | return {"status": "success"} 200 | -------------------------------------------------------------------------------- /agents/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/defog-ai/defog-desktop/02eb40f7090be763604b5c7631796aecc0e9beae/agents/__init__.py -------------------------------------------------------------------------------- /agents/clarifier/README.md: -------------------------------------------------------------------------------- 1 | This agent starts whenever a new analysis is started. 2 | 3 | The clarifier looks at the question, looks at the schema + glossary, and then checks if there are any clarifying questions it needs to ask the user. 4 | 5 | If yes, it asks these questions, the user answers them, and the final responses are then sent over to a planner_executor. 6 | -------------------------------------------------------------------------------- /agents/clarifier/clarifier_agent.py: -------------------------------------------------------------------------------- 1 | # ask an agent what extra information would it need except for the database metadata 2 | # "Clarifier" 3 | import re 4 | import traceback 5 | import yaml 6 | import asyncio 7 | import requests 8 | import os 9 | 10 | from utils import make_request 11 | 12 | default_values_formatted = { 13 | "multi select": [], 14 | "text input": "", 15 | # "date range selector": "12 months", 16 | } 17 | 18 | # unformatted values 19 | default_values = { 20 | "multi select": [], 21 | "text input": "", 22 | # "date range selector": 12, 23 | } 24 | 25 | llm_calls_url = os.environ.get("LLM_CALLS_URL", "https://api.defog.ai/agent_endpoint") 26 | 27 | 28 | async def turn_into_statements(clarification_questions, dfg_api_key): 29 | url = llm_calls_url 30 | filtered = [q for q in clarification_questions if q.get("response", "") != ""] 31 | if len(filtered) == 0: 32 | return [] 33 | 34 | payload = { 35 | "request_type": "turn_into_statement", 36 | "clarification_questions": [ 37 | q for q in clarification_questions if q.get("response", "") != "" 38 | ], 39 | "api_key": dfg_api_key, 40 | } 41 | r = await make_request(url, payload=payload) 42 | statements = r.json()["statements"] 43 | return statements 44 | 45 | 46 | def parse_q(q): 47 | try: 48 | q = re.sub("```", "", q).strip() 49 | q = re.sub("yaml", "", q).strip() 50 | j = yaml.safe_load(q.strip()) 51 | for idx in range(len(j)): 52 | # if this is a multi select, and has no options, change it to a text input 53 | if ( 54 | j[idx]["ui_tool"] == "multi select" 55 | and len(j[idx]["ui_tool_options"]) == 0 56 | ): 57 | j[idx]["ui_tool"] = "text input" 58 | 59 | j[idx]["response"] = default_values.get(j[idx]["ui_tool"]) 60 | j[idx]["response_formatted"] = default_values_formatted.get( 61 | j[idx]["ui_tool"] 62 | ) 63 | return j 64 | except Exception as e: 65 | # print(e) 66 | # traceback.print_exc() 67 | return [] 68 | 69 | 70 | async def get_clarification(question, api_key, dev=False, temp=False): 71 | payload = { 72 | "request_type": "clarify_task", 73 | "question": question, 74 | "api_key": api_key, 75 | "dev": dev, 76 | "temp": temp, 77 | } 78 | 79 | r = await make_request( 80 | llm_calls_url, 81 | payload=payload, 82 | ) 83 | 84 | if r.status_code == 200: 85 | clarifying_questions = r.json()["clarifications"] 86 | parsed_clarifying_questions = parse_q(clarifying_questions) 87 | return parsed_clarifying_questions 88 | else: 89 | raise Exception(f"Error getting clarifications: {r.status_code}") 90 | 91 | 92 | class Clarifier: 93 | """ 94 | Ask the user clarifying questions to understand the user's question better. 95 | """ 96 | 97 | def __init__( 98 | self, 99 | dfg_api_key, 100 | user_question, 101 | client_description, 102 | dev=False, 103 | temp=False, 104 | parent_analyses=[], 105 | direct_parent_analysis=None, 106 | ): 107 | self.user_question = user_question 108 | self.client_description = client_description 109 | self.parent_analyses = parent_analyses 110 | self.direct_parent_analysis = direct_parent_analysis 111 | self.dfg_api_key = dfg_api_key 112 | self.dev = dev 113 | self.temp = temp 114 | 115 | @staticmethod 116 | async def clarifier_post_process(self={}, dfg_api_key="", dev=False, temp=False): 117 | """ 118 | This function is called right before the understander stage. 119 | It takes in the user's answers to the clarification questions 120 | and converts them into "user_requirements" which is a string 121 | that is passed on to the understander. 122 | """ 123 | 124 | async def post_process(res_data): 125 | print("Running clarifier post process...") 126 | clarification_questions = res_data["clarification_questions"] 127 | 128 | # gather responses into text 129 | # pass it to the clarifier as "answers from the user", and ask it to turn them into statements 130 | 131 | url = llm_calls_url 132 | payload = { 133 | "request_type": "turn_into_statement", 134 | "clarification_questions": clarification_questions, 135 | "api_key": dfg_api_key, 136 | } 137 | r = await asyncio.to_thread(requests.post, url, json=payload) 138 | statements = r.json()["statements"] 139 | ret = {"assignment_understanding": statements} 140 | 141 | return ret 142 | 143 | return post_process 144 | 145 | async def gen_clarification_questions(self): 146 | print("Running clarifier...") 147 | 148 | async def generator(): 149 | url = llm_calls_url 150 | payload = { 151 | "request_type": "clarify_task", 152 | "question": self.user_question, 153 | "client_description": self.client_description, 154 | "parent_questions": [ 155 | i["user_question"] 156 | for i in self.parent_analyses 157 | if i["user_question"] is not None and i["user_question"] != "" 158 | ], 159 | "direct_parent_analysis": self.direct_parent_analysis, 160 | "api_key": self.dfg_api_key, 161 | "dev": self.dev, 162 | "temp": self.temp, 163 | } 164 | print(payload) 165 | r = await asyncio.to_thread(requests.post, url, json=payload) 166 | res = r.json() 167 | print(res, flush=True) 168 | clarifying_questions = res["clarifications"] 169 | try: 170 | cleaned_clarifying_questions = parse_q(clarifying_questions) 171 | print(cleaned_clarifying_questions) 172 | for q in cleaned_clarifying_questions: 173 | try: 174 | if q is not None: 175 | yield [q] 176 | except Exception as e: 177 | print(e) 178 | pass 179 | except Exception as e: 180 | traceback.print_exc() 181 | print(e) 182 | yield None 183 | 184 | return generator, await self.clarifier_post_process() 185 | -------------------------------------------------------------------------------- /agents/main_agent.py: -------------------------------------------------------------------------------- 1 | from agents.clarifier.clarifier_agent import Clarifier 2 | import traceback 3 | 4 | 5 | # each of the agents can return a "postprocess" function 6 | # that will be run before the next stage and will process the incoming user input if any for the next stage 7 | async def get_clarification( 8 | dfg_api_key="", 9 | user_question="", 10 | client_description="", 11 | parent_analyses=[], 12 | direct_parent_analysis=None, 13 | dev=False, 14 | temp=False, 15 | **kwargs, 16 | ): 17 | """ 18 | This function is called when the user asks for clarification questions. 19 | It creates a clarifier object and calls the gen_clarification_questions function 20 | on it. This function returns a generator that yields clarification questions. 21 | """ 22 | try: 23 | clarifier = Clarifier( 24 | dfg_api_key=dfg_api_key, 25 | user_question=user_question, 26 | client_description=client_description, 27 | parent_analyses=parent_analyses, 28 | direct_parent_analysis=direct_parent_analysis, 29 | dev=dev, 30 | temp=temp, 31 | ) 32 | 33 | ( 34 | clarification_questions, 35 | post_process, 36 | ) = await clarifier.gen_clarification_questions() 37 | 38 | return { 39 | "success": True, 40 | "generator": clarification_questions, 41 | "prop_name": "clarification_questions", 42 | }, post_process 43 | except Exception as e: 44 | err = e 45 | traceback.print_exc() 46 | return { 47 | "success": False, 48 | "error_message": "Error generating clarification questions.", 49 | }, None 50 | -------------------------------------------------------------------------------- /agents/planner_executor/README.md: -------------------------------------------------------------------------------- 1 | This is where the planner-executor agent and the toolboxes reside. 2 | 3 | This agent is responsible for generating, and executing a plan to solve the task given to it. 4 | 5 | `toolbox_manager.py` is the main file where all the toolboxes are registered. It is also the file that is called by the `planner_executor_prompts.py` to get the list of toolboxes (as a formatted prompt) to be used. 6 | 7 | To create a new toolbox, you need to do the following: 8 | 9 | 1. Add a folder inside `toolboxes/` with your toolbox's name, and create a file called `tools.py` inside it. 10 | 2. Inside the above `tools.py`, write all your tools/functions that you want to be available in the toolbox. Make sure that the function names are unique from other toolboxes and your function signature is well documented, like types and default values. Have all functions have a `**kwargs` at the end. This is because not all functions need the `global_dict` (which stores results of previous tools in the chain), but it is always passed anyway. 11 | 3. Inside `tool_helpers/all_tools.py`, import all your functions from the above `tools.py`, and add them to the tools object. This is the object that is looked up when running the tool. 12 | 4. Inside `toolboxes/toolbox_prompts.py`, add another property to the toolbox_prompts object. The property name can be whatever you want your toolbox to be called (ideally your folder name you created above), and add a formatted description of your tools in the value. This is the string that will be sent to the model in the prompt. Make sure it is formatted like the other strings in that file. 13 | 5. Inside `tool_helpers/toolbox_manager.py`, add your toolbox to the `all_toolboxes` list. The value you add should be the name of the property you created in the above step. 14 | 6. If you don't want your tool'd code to be sent to the front end, add a property `"no_code": True` to the `tool_helpers/all_tools.py` entry you created in step 3. 15 | 7. IMPORTANT: Once you're done adding your tools, cd into `backend/`, and run `export PYTHONPATH=$(pwd):$PYTHONPATH && python3 ./scripts/generate_tool_metadata_for_frontend.py` to generate the metadata for the front end.

This will create a file called `tool_metadata.js` in the front end's `utils` folder. This file is used by the front end to know what tools are available when letting users add steps. If you don't run this, your tools won't show up in the front end's add-a-step UI. 16 | 17 | After you've completed the above, you can add users who will have access to these toolboxes by adding an entry into the defog_toolboxes table. 18 | 19 | `toolboxes` is an array of strings. Each string is the name of the toolbox you want to give access to. For example: `["cancer-survival", "f1"]`. If you want to give access to all toolboxes, use `["*"]`. 20 | 21 | ```SQL 22 | insert into defog_toolboxes (username, api_key, toolboxes) 23 | values ("EMAIL_ADDRESS", 'API_KEY', 'TOOLBOX_ARRAY'); 24 | ``` 25 | -------------------------------------------------------------------------------- /agents/planner_executor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/defog-ai/defog-desktop/02eb40f7090be763604b5c7631796aecc0e9beae/agents/planner_executor/__init__.py -------------------------------------------------------------------------------- /agents/planner_executor/execute_tool.py: -------------------------------------------------------------------------------- 1 | import re 2 | import pandas as pd 3 | import traceback 4 | import inspect 5 | from utils import ( 6 | SqlExecutionError, 7 | error_str, 8 | warn_str, 9 | filter_function_inputs, 10 | wrap_in_async, 11 | ) 12 | from db_utils import get_all_tools 13 | import asyncio 14 | from tool_code_utilities import default_top_level_imports 15 | 16 | 17 | def parse_function_signature(param_signatures, fn_name): 18 | """ 19 | Given a dictionary of function signature, return a list of all the parameters 20 | with name, default values and types. 21 | """ 22 | params = {} 23 | for p in param_signatures: 24 | # ignore kwargs 25 | if p == "kwargs" or p == "global_dict": 26 | continue 27 | p_name = param_signatures[p].name 28 | p_default_val = param_signatures[p].default 29 | 30 | if p_default_val is param_signatures[p].empty: 31 | p_default_val = None 32 | 33 | p_type = param_signatures[p].annotation 34 | if p_type is param_signatures[p].empty: 35 | warn_str( 36 | "No type annotation for parameter " 37 | + p_name 38 | + " in " 39 | + fn_name 40 | + ". Assuming type is str." 41 | ) 42 | p_type = "str" 43 | else: 44 | # if p_type starts with
{full_data_md}", 105 | } 106 | resend.Emails.send(params) 107 | return { 108 | "outputs": [ 109 | { 110 | "data": pd.DataFrame( 111 | [ 112 | [ 113 | { 114 | "message": f"Email sent successfully to {recipient_email_address}" 115 | }, 116 | ] 117 | ] 118 | ), 119 | "analysis": "Email sent successfully", 120 | } 121 | ], 122 | } 123 | -------------------------------------------------------------------------------- /agents/planner_executor/toolboxes/toolbox_prompts.py: -------------------------------------------------------------------------------- 1 | # - tool_name: simple_stats 2 | # description: Gets simple statistics from a pandas df using numpy and pandas. 3 | # inputs: [snippets of python code for each calculation] 4 | # outputs: [pandas dfs in the same order as the above snippets] 5 | 6 | 7 | toolbox_prompts = { 8 | "data-fetching": """ 9 | - tool_name: data_fetcher_and_aggregator 10 | description: Converting a natural language question into a SQL query, that then runs on an external database. Fetches, filters, aggregates, and performs arithmetic computations on data. Remember that this tool does not have access to the data returned by the previous steps. It only has access to the data in the database. 11 | inputs: [natural language description of the data required to answer this question (or get the required information for subsequent steps) as a string] 12 | outputs: pandas df""", 13 | "stats": """- tool_name: line_plot 14 | description: This function generates a line plot using python's seaborn library. It should be used when the user wants to see how a variable changes over time, and should be used immediately after the data_fetcher tool. 15 | inputs: ["global_dict.", xaxis column (exactly a single column - often a datetime or string), yaxis column (exactly a single column - always a numerical value), hue column or None, facet column or None, estimator ("mean" if data must be aggregated, "None" if it is not aggregated), individual_id_column or None - refers to the column that contains individual data points, often some kind of id), plot_average_line or None - True if the user wants to plot an average or median line, average_line_type or None - the kind of value for the average line to have. Can be mean, median, max, min, or mode] 16 | outputs: pandas df 17 | 18 | -tool_name: t_test 19 | description: This function gets two groups and runs a t-test to check if there is a significant difference between their means. There are two ways to run the test: paired and unpaired. Paired test has one group column, unpaired has one group column. 20 | inputs: ["global_dict.", group column, score column, name column or None, type of t test as a string (paired or unpaired)] 21 | outputs: pandas df 22 | 23 | -tool_name: wilcoxon_test 24 | description: This function gets two groups and runs a wilcoxon test to check if there is a significant difference between their means. 25 | inputs: ["global_dict.", group column, score column, name column] 26 | outputs: pandas df 27 | 28 | -tool_name: anova_test 29 | description: This function gets more than two groups and runs an anova test to check if there is a significant difference between their means. 30 | inputs: ["global_dict.", group column, score column] 31 | outputs: pandas df 32 | 33 | - tool_name: fold_change 34 | description: This function calculates the fold change over time for different groups. Fold change is the ratio of the final value to the initial value. 35 | inputs: ["global_dict.", value column (the numerical value), individual id column (the column that represents individual ids to calculate fold change for), time column (the column that represents the time point), group column or None (the column that represents the groups that individuals belong to, like cohort or study)] 36 | outputs: pandas 37 | """, 38 | "plots": """-tool_name: boxplot 39 | description: Generates a boxplot using python's seaborn library. Also accepts a faceting column. This usually required the full dataset and not summary statistics. Use the facet feature only when specifically asked for it. 40 | inputs: ["global_dict.", [boxplot_x column, boxplot_y column], facet = True/False, facet column] 41 | outputs: pandas df 42 | 43 | -tool_name: heatmap 44 | description: Generates a heatmap using python's seaborn library. This accepts the full dataset as the first parameter, and not summary statistics or aggregates. 45 | inputs: ["global_dict.", heatmap_x_column, heatmap_y_column, heatmap_value_column, aggregation_type as a string (can be mean, median, max, min or sum), color_scale (only if specified by the user. defaults to YlGnBu)] 46 | outputs: pandas df 47 | """, 48 | # --- --- --- # 49 | "cancer-survival": """ 50 | - tool_name: kaplan_meier_curve 51 | description: Generates a kaplan meier survival function. You have to run data_fetcher before this. 52 | inputs: ["global_dict.", survival time column name, status/event column name, [array of stratification variables if any otherwise None]] 53 | outputs: [as many pandas dfs as there are stratification variables. Make sure the outputs length matches the number of stratification variables. only one pandas df is output if stratification variables is None] 54 | 55 | - tool_name: hazard_ratio 56 | description: Creates a hazard ratio (based on the Cox Index), given some inputs. 57 | inputs: ["global_dict.", survival time column name, status/event column name] 58 | outputs: pandas df""", 59 | } 60 | -------------------------------------------------------------------------------- /auth_routes.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Request, HTTPException 2 | from auth_utils import login_user, reset_password, get_hashed_password 3 | from db_utils import validate_user 4 | from fastapi.responses import JSONResponse 5 | 6 | INTERNAL_API_KEY = "DUMMY_KEY" 7 | 8 | router = APIRouter() 9 | 10 | 11 | @router.post("/login") 12 | async def login(request: Request): 13 | params = await request.json() 14 | username = params.get("username", None) 15 | password = params.get("password", None) 16 | if not username: 17 | return {"error": "no user id provided"} 18 | if not password: 19 | return {"error": "no password provided"} 20 | 21 | dets = login_user(username, password) 22 | return dets 23 | 24 | 25 | @router.post("/reset_password") 26 | async def reset_password(request: Request): 27 | params = await request.json() 28 | username = params.get("username", None) 29 | new_password = params.get("password", None) 30 | token = params.get("token", None) 31 | if not validate_user(token, user_type="admin"): 32 | return JSONResponse( 33 | status_code=401, 34 | content={ 35 | "error": "unauthorized", 36 | "message": "Invalid username or password", 37 | }, 38 | ) 39 | if not username: 40 | return {"error": "no user id provided"} 41 | if not new_password: 42 | return {"error": "no password provided"} 43 | dets = reset_password(username, new_password) 44 | return dets 45 | -------------------------------------------------------------------------------- /auth_utils.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | from db_utils import engine, Users 3 | from sqlalchemy import ( 4 | select, 5 | update, 6 | ) 7 | from fastapi.responses import JSONResponse 8 | 9 | SALT = "WITHPEPPER" 10 | 11 | 12 | def login_user(username, password): 13 | hashed_password = hashlib.sha256((username + SALT + password).encode()).hexdigest() 14 | with engine.begin() as conn: 15 | user = conn.execute( 16 | select(Users).where(Users.hashed_password == hashed_password) 17 | ).fetchone() 18 | 19 | if user: 20 | return {"status": "success", "user_type": user[0], "token": hashed_password} 21 | else: 22 | return JSONResponse( 23 | status_code=401, 24 | content={ 25 | "error": "unauthorized", 26 | "message": "Invalid username or password", 27 | }, 28 | ) 29 | 30 | 31 | def reset_password(username, new_password): 32 | hashed_password = hashlib.sha256( 33 | (username + SALT + new_password).encode() 34 | ).hexdigest() 35 | with engine.begin() as conn: 36 | conn.execute( 37 | update(Users) 38 | .where(Users.username == username) 39 | .values(hashed_password=hashed_password) 40 | ) 41 | 42 | 43 | def get_hashed_password(username, password): 44 | return hashlib.sha256((username + SALT + password).encode()).hexdigest() 45 | 46 | 47 | def validate_user_email(email): 48 | with engine.begin() as conn: 49 | user = conn.execute(select(Users).where(Users.username == email)).fetchone() 50 | if user: 51 | return True 52 | else: 53 | return False 54 | -------------------------------------------------------------------------------- /config.env: -------------------------------------------------------------------------------- 1 | DEFOG_API_KEY=YOUR_API_KEY -------------------------------------------------------------------------------- /connection_manager.py: -------------------------------------------------------------------------------- 1 | from fastapi import WebSocket 2 | 3 | 4 | class ConnectionManager: 5 | def __init__(self): 6 | self.active_connections: list[WebSocket] = [] 7 | 8 | async def connect(self, websocket: WebSocket): 9 | await websocket.accept() 10 | self.active_connections.append(websocket) 11 | 12 | def disconnect(self, websocket: WebSocket): 13 | self.active_connections.remove(websocket) 14 | 15 | async def send_personal_message(self, message: str, websocket: WebSocket): 16 | await websocket.send_json(message) 17 | 18 | async def broadcast(self, message: str): 19 | for connection in self.active_connections: 20 | await connection.send_json(message) 21 | -------------------------------------------------------------------------------- /create_admin_user.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from db_utils import engine, Users 4 | from sqlalchemy import select, insert 5 | import hashlib 6 | import time 7 | 8 | SALT = "WITHPEPPER" 9 | INTERNAL_API_KEY = "dummy_api_key" 10 | 11 | username = "admin" 12 | password = "admin" 13 | hashed_password = hashlib.sha256((username + SALT + password).encode()).hexdigest() 14 | 15 | # check if admin user exists first 16 | admin_exists = False 17 | 18 | with engine.begin() as conn: 19 | user = conn.execute(select(Users).where(Users.username == username)).fetchone() 20 | 21 | if user: 22 | admin_exists = True 23 | print("Admin user already exists.") 24 | else: 25 | print("Creating admin user...") 26 | with engine.begin() as conn: 27 | conn.execute( 28 | insert(Users).values( 29 | username=username, 30 | hashed_password=hashed_password, 31 | token=INTERNAL_API_KEY, 32 | user_type="admin", 33 | is_premium=True, 34 | ) 35 | ) 36 | print("Admin user created.") 37 | -------------------------------------------------------------------------------- /create_sqlite_tables.py: -------------------------------------------------------------------------------- 1 | # read a sql file, and create tables in sqlite database 2 | 3 | from sqlalchemy import create_engine, MetaData, Table, Column, Integer, Text, Boolean 4 | from sqlalchemy.dialects.sqlite import JSON 5 | import os 6 | 7 | path_to_sql_file = "defog_local.db" 8 | 9 | if not os.path.exists(path_to_sql_file): 10 | open(path_to_sql_file, "w").close() 11 | 12 | # Create an engine (SQLite in this example) 13 | engine = create_engine(f"sqlite:///{path_to_sql_file}", echo=True) 14 | 15 | # Initialize MetaData object 16 | metadata = MetaData() 17 | 18 | # Define tables 19 | defog_analyses = Table( 20 | "defog_analyses", 21 | metadata, 22 | Column("analysis_id", Text, primary_key=True), 23 | Column("api_key", Text, nullable=False), 24 | Column("email", Text), 25 | Column("timestamp", Text), 26 | Column("approaches", JSON), 27 | Column("clarify", JSON), 28 | Column("assignment_understanding", JSON), 29 | Column("understand", JSON), 30 | Column("gen_approaches", JSON), 31 | Column("user_question", Text), 32 | Column("gen_steps", JSON), 33 | Column("follow_up_analyses", JSON), 34 | Column("parent_analyses", JSON), 35 | Column("is_root_analysis", Boolean, default=True), 36 | Column("root_analysis_id", Text), 37 | Column("direct_parent_id", Text), 38 | Column("username", Text), 39 | ) 40 | 41 | defog_table_charts = Table( 42 | "defog_table_charts", 43 | metadata, 44 | Column("table_id", Text, primary_key=True), 45 | Column("data_csv", JSON), 46 | Column("query", Text), 47 | Column("chart_images", JSON), 48 | Column("sql", Text), 49 | Column("code", Text), 50 | Column("tool", JSON), 51 | Column("edited", Integer), 52 | Column("error", Text), 53 | Column("reactive_vars", JSON), 54 | ) 55 | 56 | defog_tool_runs = Table( 57 | "defog_tool_runs", 58 | metadata, 59 | Column("tool_run_id", Text, primary_key=True), 60 | Column("step", JSON), 61 | Column("outputs", JSON), 62 | Column("tool_name", Text), 63 | Column("tool_run_details", JSON), 64 | Column("error_message", Text), 65 | Column("edited", Integer), 66 | Column("analysis_id", Text), 67 | ) 68 | 69 | defog_toolboxes = Table( 70 | "defog_toolboxes", 71 | metadata, 72 | Column("api_key", Text, primary_key=True), 73 | Column("username", Text, nullable=False), 74 | Column("toolboxes", JSON), 75 | ) 76 | 77 | defog_tools = Table( 78 | "defog_tools", 79 | metadata, 80 | Column("tool_name", Text, primary_key=True), 81 | Column("function_name", Text, nullable=False), 82 | Column("description", Text, nullable=False), 83 | Column("code", Text, nullable=False), 84 | Column("input_metadata", JSON), 85 | Column("output_metadata", JSON), 86 | Column("toolbox", Text), 87 | Column("disabled", Boolean, default=False), 88 | Column("cannot_delete", Boolean, default=False), 89 | Column("cannot_disable", Boolean, default=False), 90 | ) 91 | 92 | defog_users = Table( 93 | "defog_users", 94 | metadata, 95 | Column("username", Text, primary_key=True), 96 | Column("hashed_password", Text), 97 | Column("token", Text, nullable=False), 98 | Column("user_type", Text, nullable=False), 99 | Column("csv_tables", Text), 100 | Column("is_premium", Integer), 101 | Column("created_at", Text), 102 | Column("is_verified", Integer), 103 | ) 104 | 105 | defog_db_creds = Table( 106 | "defog_db_creds", 107 | metadata, 108 | Column("api_key", Text, primary_key=True), 109 | Column("db_type", Text), 110 | Column("db_creds", JSON), 111 | ) 112 | 113 | # Create tables in the database 114 | metadata.create_all(engine) 115 | -------------------------------------------------------------------------------- /csv_routes.py: -------------------------------------------------------------------------------- 1 | import os 2 | from fastapi import APIRouter, Request, HTTPException 3 | from generic_utils import get_api_key_from_key_name, make_request 4 | import pandas as pd 5 | from io import StringIO 6 | 7 | router = APIRouter() 8 | 9 | DEFOG_BASE_URL = os.environ.get("DEFOG_BASE_URL", "https://api.defog.ai") 10 | 11 | 12 | @router.post("/generate_column_descriptions_for_csv") 13 | async def generate_column_descriptions_for_csv(request: Request): 14 | """ 15 | Adds column descriptions to the metadata of a CSV file 16 | Expects a list of dictionaries with keys 'column_name' and 'data_type' 17 | Return a list of dictionaries with keys 'column_name', 'data_type', and 'column_description' 18 | This is done by sending a POST request to the /generate_metadata_csv endpoint 19 | """ 20 | params = await request.json() 21 | key_name = params.get("key_name", None) 22 | metadata = params.get("metadata", None) 23 | table_name = params.get("table_name", None) 24 | 25 | if not key_name: 26 | return {"error": "no key name provided"} 27 | if not metadata: 28 | return {"error": "no metadata provided"} 29 | if not table_name: 30 | return {"error": "no table name provided"} 31 | 32 | api_key = get_api_key_from_key_name(key_name) 33 | 34 | # verify that metadata is a list 35 | if not isinstance(metadata, list): 36 | return {"error": "metadata must be a list of dictionaries"} 37 | 38 | # verify that each element in metadata is a dictionary 39 | for element in metadata: 40 | if not isinstance(element, dict): 41 | return {"error": "each element in metadata must be a dictionary"} 42 | 43 | # verify that each dictionary in metadata has the keys 'column_name' and 'data_type' 44 | for element in metadata: 45 | if "column_name" not in element: 46 | return { 47 | "error": "each dictionary in metadata must have the key 'column_name'" 48 | } 49 | if "data_type" not in element: 50 | return { 51 | "error": "each dictionary in metadata must have the key 'data_type'" 52 | } 53 | 54 | # now, send a request to `/get_schema_csv` 55 | schemas = {table_name: metadata} 56 | r = await make_request( 57 | f"{DEFOG_BASE_URL}/get_schema_csv", 58 | { 59 | "api_key": api_key, 60 | "schemas": schemas, 61 | }, 62 | ) 63 | metadata_csv_string = r["csv"] 64 | metadata_json = ( 65 | pd.read_csv(StringIO(metadata_csv_string)).fillna("").to_dict(orient="records") 66 | ) 67 | return metadata_json 68 | 69 | 70 | @router.post("/generate_query_csv") 71 | async def generate_query_csv(request: Request): 72 | """ 73 | Generates a CSV file with the results of a query 74 | Expects a query string 75 | Return a CSV file with the results of the query 76 | This is done by sending a POST request to the /generate_query_csv endpoint 77 | """ 78 | params = await request.json() 79 | key_name = params.get("key_name", None) 80 | question = params.get("question", None) 81 | metadata = params.get("metadata", None) 82 | previous_questions = params.get("previous_questions", []) 83 | 84 | if len(previous_questions) > 0: 85 | previous_questions = previous_questions[:-1] 86 | 87 | prev_questions = [] 88 | for item in previous_questions: 89 | prev_question = item.get("user_question") 90 | if question: 91 | prev_steps = ( 92 | item.get("analysisManager", {}) 93 | .get("analysisData", {}) 94 | .get("gen_steps", {}) 95 | .get("steps", []) 96 | ) 97 | if len(prev_steps) > 0: 98 | for step in prev_steps: 99 | if "sql" in step: 100 | prev_sql = step["sql"] 101 | prev_questions.append(prev_question) 102 | prev_questions.append(prev_sql) 103 | break 104 | 105 | # metadata should be a list of dictionaries with keys 'table_name', 'column_name', 'data_type', and 'column_description'\ 106 | 107 | if not key_name: 108 | return {"error": "no key name provided"} 109 | if not question: 110 | return {"error": "no question provided"} 111 | if not metadata: 112 | return {"error": "no metadata provided"} 113 | 114 | if not isinstance(metadata, list): 115 | return {"error": "metadata must be a list of dictionaries"} 116 | 117 | for element in metadata: 118 | if not isinstance(element, dict): 119 | return {"error": "each element in metadata must be a dictionary"} 120 | if "table_name" not in element: 121 | return { 122 | "error": "each dictionary in metadata must have the key 'table_name'" 123 | } 124 | if "column_name" not in element: 125 | return { 126 | "error": "each dictionary in metadata must have the key 'column_name'" 127 | } 128 | if "data_type" not in element: 129 | return { 130 | "error": "each dictionary in metadata must have the key 'data_type'" 131 | } 132 | # let's keep column descriptions optional for now 133 | # if "column_description" not in element: 134 | # return { 135 | # "error": "each dictionary in metadata must have the key 'column_description'" 136 | # } 137 | 138 | api_key = get_api_key_from_key_name(key_name) 139 | 140 | # convert metadata to a dictionary 141 | metadata_dict = {} 142 | for element in metadata: 143 | table_name = element["table_name"] 144 | if table_name not in metadata_dict: 145 | metadata_dict[table_name] = [] 146 | metadata_dict[table_name].append( 147 | { 148 | "column_name": element["column_name"], 149 | "data_type": element["data_type"], 150 | "column_description": element.get("column_description"), 151 | } 152 | ) 153 | 154 | r = await make_request( 155 | f"{DEFOG_BASE_URL}/generate_query_chat", 156 | { 157 | "api_key": api_key, 158 | "question": question, 159 | "metadata": metadata_dict, 160 | "db_type": "sqlite", 161 | "previous_context": prev_questions, 162 | }, 163 | ) 164 | return r 165 | -------------------------------------------------------------------------------- /defog_local.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/defog-ai/defog-desktop/02eb40f7090be763604b5c7631796aecc0e9beae/defog_local.db -------------------------------------------------------------------------------- /feedback_routes.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Request 2 | from generic_utils import ( 3 | make_request, 4 | get_api_key_from_key_name, 5 | format_sql, 6 | format_date_string, 7 | ) 8 | from db_utils import validate_user 9 | import os 10 | import pandas as pd 11 | from fastapi.responses import JSONResponse 12 | from datetime import datetime 13 | 14 | router = APIRouter() 15 | 16 | DEFOG_BASE_URL = os.environ.get("DEFOG_BASE_URL", "https://api.defog.ai") 17 | 18 | 19 | @router.post("/feedback") 20 | async def feedback(request: Request): 21 | """Responds to a feedback request from the user by sending the feedback to the api.defog.ai endpoint.""" 22 | params = await request.json() 23 | key_name = params.get("key_name") 24 | token = params.get("token") 25 | if not validate_user(token): 26 | return JSONResponse( 27 | status_code=401, 28 | content={ 29 | "error": "unauthorized", 30 | "message": "Invalid username or password", 31 | }, 32 | ) 33 | api_key = get_api_key_from_key_name(key_name) 34 | feedback = params.get("feedback") # "good" or "Bad" 35 | text = params.get("text") # the text of the feedback 36 | dev = params.get("dev") # True or False 37 | response = params.get("response", {}) 38 | 39 | if "columns" in response: 40 | del response["columns"] 41 | 42 | if "data" in response: 43 | del response["data"] 44 | 45 | if not feedback: 46 | feedback = "good" 47 | 48 | res = await send_feedback( 49 | { 50 | "api_key": api_key, 51 | "feedback": feedback, 52 | "text": text, 53 | "dev": dev, 54 | "response": response, 55 | }, 56 | ) 57 | return res 58 | 59 | 60 | @router.post("/get_feedback") 61 | async def get_feedback(request: Request): 62 | """Responds by fetching the feedback users have given in the past.""" 63 | params = await request.json() 64 | token = params.get("token") 65 | if not validate_user(token): 66 | return JSONResponse( 67 | status_code=401, 68 | content={ 69 | "error": "unauthorized", 70 | "message": "Invalid username or password", 71 | }, 72 | ) 73 | key_name = params.get("key_name") 74 | api_key = get_api_key_from_key_name(key_name) 75 | url = DEFOG_BASE_URL + "/get_feedback" 76 | res = await make_request(url, json={"api_key": api_key}) 77 | data = res["data"] 78 | for idx, item in enumerate(data): 79 | # first item is created_at 80 | data[idx][0] = format_date_string(item[0]) 81 | 82 | # third item is the SQL query 83 | data[idx][3] = format_sql(item[3]) 84 | 85 | df = pd.DataFrame(data, columns=res["columns"]) 86 | # keep the most recent feedback for each question, query pair 87 | df = df.sort_values(by="created_at", ascending=False).drop_duplicates( 88 | subset=["question", "query_generated"] 89 | ) 90 | question_id_text = ( 91 | df[df.question_id.notnull()].set_index("question_id")["question"].to_dict() 92 | ) 93 | df["parent_question_text"] = df["parent_question_id"].map( 94 | lambda x: ( 95 | question_id_text.get( 96 | x, 97 | "not captured as no feedback was given for the parent question.", 98 | ) 99 | if x 100 | else None 101 | ) 102 | ) 103 | del df["parent_question_id"] 104 | del df["question_id"] 105 | data = df.values.tolist() 106 | columns = df.columns.tolist() 107 | res["data"] = data 108 | res["columns"] = columns[:-1] # remove the last column (parent_question_text) 109 | return res 110 | 111 | 112 | @router.post("/get_instructions_recommendation") 113 | async def get_instructions_recommendation(request: Request): 114 | """Uses negative feedback for a query to provide recommendations for instructions that might improve it.""" 115 | params = await request.json() 116 | token = params.get("token") 117 | if not validate_user(token): 118 | return JSONResponse( 119 | status_code=401, 120 | content={ 121 | "error": "unauthorized", 122 | "message": "Invalid username or password", 123 | }, 124 | ) 125 | key_name = params.get("key_name") 126 | api_key = get_api_key_from_key_name(key_name) 127 | question = params.get("question") 128 | sql_generated = params.get("sql_generated") 129 | user_feedback = params.get("user_feedback") 130 | url = DEFOG_BASE_URL + "/reflect_on_error" 131 | 132 | r = await make_request( 133 | url=url, 134 | json={ 135 | "api_key": api_key, 136 | "question": question, 137 | "sql_generated": sql_generated, 138 | "error": user_feedback, 139 | }, 140 | ) 141 | return r 142 | 143 | 144 | ### Helper functions ### 145 | 146 | 147 | async def send_feedback(params_obj): 148 | """ 149 | Sends the feedback to the api.defog.ai endpoint. 150 | 151 | Args: 152 | params_obj (dict): A dictionary containing key-value pairs representing feedback details. 153 | The dictionary should have the following keys: 154 | - 'api_key' (str): The API key for authentication. 155 | - 'feedback' (str): The type of feedback: 'Good' or 'Bad'. 156 | - 'text' (str): Feedback text provided by the user; empty if the feedback is 'Good'. 157 | - 'dev' (bool): Indicates if the feedback is being sent in development mode. 158 | - 'response' (dict): A dictionary representing the response object related to the feedback. 159 | This dictionary must contain the following keys: 160 | - 'question' (str): The question asked by the user. 161 | - 'questionId' (str): The unique identifier for the question. 162 | - 'generatedSQL' (str): The SQL query generated in response to the question. 163 | Returns: 164 | response (dict): The response object from the API request with desired "status" key with value "received". 165 | 166 | """ 167 | url = DEFOG_BASE_URL + "/feedback" 168 | res = await make_request(url, json=params_obj) 169 | return res -------------------------------------------------------------------------------- /generic_utils.py: -------------------------------------------------------------------------------- 1 | import httpx 2 | import os 3 | import sqlparse 4 | from datetime import datetime 5 | 6 | DEFOG_API_KEY = os.environ["DEFOG_API_KEY"] # replace with your DEFOG_API_KEY 7 | DEFOG_API_KEY_NAMES = os.environ.get("DEFOG_API_KEY_NAMES", "") 8 | 9 | 10 | async def make_request(url, json): 11 | async with httpx.AsyncClient() as client: 12 | r = await client.post( 13 | url, 14 | json=json, 15 | timeout=60, 16 | ) 17 | 18 | return r.json() 19 | 20 | 21 | def convert_nested_dict_to_list(table_metadata): 22 | metadata = [] 23 | for key in table_metadata: 24 | table_name = key 25 | for item in table_metadata[key]: 26 | item["table_name"] = table_name 27 | if "column_description" not in item: 28 | item["column_description"] = "" 29 | metadata.append(item) 30 | return metadata 31 | 32 | 33 | def get_api_key_from_key_name(key_name): 34 | if key_name and key_name in DEFOG_API_KEY_NAMES: 35 | idx = DEFOG_API_KEY_NAMES.split(",").index(key_name) 36 | api_key = DEFOG_API_KEY.split(",")[idx] 37 | else: 38 | api_key = DEFOG_API_KEY.split(",")[0] 39 | return api_key 40 | 41 | 42 | def format_sql(sql): 43 | """ 44 | Formats SQL query to be more readable 45 | """ 46 | return sqlparse.format(sql, reindent=True, keyword_case="upper") 47 | 48 | 49 | def format_date_string(iso_date_string): 50 | """ 51 | Formats date string to be more readable 52 | """ 53 | date = datetime.strptime(iso_date_string, "%Y-%m-%dT%H:%M:%S.%f") 54 | return date.strftime("%Y-%m-%d %H:%M") 55 | -------------------------------------------------------------------------------- /imgo_routes.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Request 2 | from generic_utils import ( 3 | make_request, 4 | get_api_key_from_key_name, 5 | ) 6 | from db_utils import validate_user, get_db_type_creds 7 | import os 8 | from fastapi.responses import JSONResponse 9 | 10 | router = APIRouter() 11 | 12 | DEFOG_BASE_URL = os.environ.get("DEFOG_BASE_URL", "https://api.defog.ai") 13 | 14 | 15 | async def send_imgo_request( 16 | request: Request, endpoint: str, additional_payload: dict = None 17 | ): 18 | """Helper function to handle IMGO request processing.""" 19 | params = await request.json() 20 | token = params.get("token") 21 | if not validate_user(token): 22 | return JSONResponse( 23 | status_code=401, 24 | content={ 25 | "error": "unauthorized", 26 | "message": "Invalid username or password", 27 | }, 28 | ) 29 | key_name = params.get("key_name") 30 | optimized_glossary = params.get("optimized_glossary", None) 31 | optimized_metadata = params.get("optimized_metadata", None) 32 | 33 | api_key = get_api_key_from_key_name(key_name) 34 | db_type, _ = get_db_type_creds(api_key) 35 | 36 | payload = { 37 | "api_key": api_key, 38 | "db_type": db_type, 39 | "optimized_glossary": optimized_glossary, 40 | "optimized_metadata": optimized_metadata, 41 | } 42 | 43 | if additional_payload: 44 | payload.update(additional_payload) 45 | 46 | url = f"{DEFOG_BASE_URL}/{endpoint}" 47 | return await make_request(url, json=payload) 48 | 49 | 50 | @router.post("/generate_golden_queries_from_questions") 51 | async def generate_golden_queries_from_questions(request: Request): 52 | """Generates golden queries for the current set of golden questions.""" 53 | return await send_imgo_request(request, "imgo_gen_golden_queries") 54 | 55 | 56 | @router.post("/check_generated_golden_queries_validity") 57 | async def check_generated_golden_queries_validity(request: Request): 58 | """Checks if the generated golden queries are valid and returns the invalid ones with the error message.""" 59 | return await send_imgo_request(request, "imgo_check_golden_queries_valid") 60 | 61 | 62 | @router.post("/check_generated_golden_queries_correctness") 63 | async def check_generated_golden_queries_correctness(request: Request): 64 | """Checks if the generated golden queries are correct and returns a confirmation message that it was triggered.""" 65 | return await send_imgo_request(request, "imgo_check_golden_queries_correct") 66 | 67 | 68 | @router.post("/optimize_glossary") 69 | async def optimize_glossary(request: Request): 70 | """Responds to a request for optimized glossary and returns the optimized glossary.""" 71 | return await send_imgo_request(request, "imgo_optimize_glossary") 72 | 73 | 74 | @router.post("/optimize_metadata") 75 | async def optimize_metadata(request: Request): 76 | """Responds to a request for optimized metadata and returns the optimized metadata.""" 77 | return await send_imgo_request(request, "imgo_optimize_metadata") 78 | 79 | 80 | @router.post("/get_recommendation_for_glossary_and_metadata") 81 | async def get_recommendation_for_glossary_and_metadata(request: Request): 82 | """Responds to a recommendation request for whether to improve glossary and/or metadata.""" 83 | res = await send_imgo_request(request, "imgo_get_recommendation") 84 | 85 | # Initialize flags to indicate if optimization is recommended 86 | res["is_glossary_optimization_recommended"] = False 87 | res["is_metadata_optimization_recommended"] = False 88 | 89 | # Check if the message suggests optimizing glossary or metadata 90 | if "message" in res and res["message"]: 91 | message = str(res["message"]).lower() 92 | if "glossary" in message: 93 | res["is_glossary_optimization_recommended"] = True 94 | if "metadata" in message: 95 | res["is_metadata_optimization_recommended"] = True 96 | 97 | return res 98 | 99 | 100 | 101 | @router.post("/check_task_status") 102 | async def check_task_status(request: Request): 103 | """Checks the status of a task and returns the status which can be either 'processing' or 'completed'.""" 104 | print("Checking task status") 105 | params = await request.json() 106 | token = params.get("token") 107 | if not validate_user(token): 108 | return JSONResponse( 109 | status_code=401, 110 | content={ 111 | "error": "unauthorized", 112 | "message": "Invalid username or password", 113 | }, 114 | ) 115 | key_name = params.get("key_name") 116 | api_key = get_api_key_from_key_name(key_name) 117 | 118 | task_id = params.get("task_id") 119 | 120 | url = f"{DEFOG_BASE_URL}/check_imgo_task_status" 121 | return await make_request( 122 | url, 123 | json={"api_key": api_key, "task_id": task_id}, 124 | ) 125 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from dotenv import load_dotenv 3 | import os 4 | 5 | load_dotenv("config.env") 6 | 7 | DEFOG_API_KEY = os.environ.get("DEFOG_API_KEY") 8 | if DEFOG_API_KEY is None or DEFOG_API_KEY == "" or DEFOG_API_KEY == "YOUR_API_KEY": 9 | raise Exception( 10 | f"Your DEFOG_API_KEY is currently invalid. Please set DEFOG_API_KEY to your valid API key in your config.env file." 11 | ) 12 | 13 | import traceback 14 | from fastapi import FastAPI, Request, WebSocket, WebSocketDisconnect 15 | from fastapi.responses import FileResponse 16 | from fastapi.middleware.cors import CORSMiddleware 17 | from connection_manager import ConnectionManager 18 | from utils import make_request 19 | 20 | from db_utils import ( 21 | get_all_analyses, 22 | get_analysis_data, 23 | initialise_analysis, 24 | get_all_tools, 25 | ) 26 | from generic_utils import get_api_key_from_key_name 27 | import integration_routes, query_routes, admin_routes, auth_routes, readiness_routes, csv_routes, feedback_routes, agent_routes, imgo_routes 28 | 29 | logging.basicConfig(level=logging.INFO) 30 | 31 | manager = ConnectionManager() 32 | 33 | app = FastAPI() 34 | app.include_router(integration_routes.router) 35 | app.include_router(query_routes.router) 36 | app.include_router(admin_routes.router) 37 | app.include_router(auth_routes.router) 38 | app.include_router(readiness_routes.router) 39 | app.include_router(csv_routes.router) 40 | app.include_router(feedback_routes.router) 41 | app.include_router(imgo_routes.router) 42 | app.include_router(agent_routes.router) 43 | 44 | origins = ["*"] 45 | app.add_middleware( 46 | CORSMiddleware, 47 | allow_origins=origins, 48 | allow_credentials=True, 49 | allow_methods=["*"], 50 | allow_headers=["*"], 51 | ) 52 | 53 | request_types = ["clarify", "understand", "gen_approaches", "gen_steps", "gen_analysis"] 54 | from pathlib import Path 55 | 56 | home_dir = Path.home() 57 | analysis_assets_dir = home_dir / "defog_report_assets" 58 | analysis_assets_dir = os.environ.get( 59 | "REPORT_ASSETS_DIR", analysis_assets_dir.as_posix() 60 | ) 61 | 62 | from fastapi.staticfiles import StaticFiles 63 | 64 | base_path = os.path.abspath(".") 65 | print(base_path) 66 | one_level_up = os.path.abspath(os.path.join(base_path, "..")) 67 | if os.path.exists(os.path.join(one_level_up, "Content/Resources")): 68 | base_path = os.path.join(one_level_up, "Content/Resources") 69 | 70 | directory = os.path.join(base_path, "out") 71 | print(directory) 72 | app.mount("/static", StaticFiles(directory=directory, html=True), name="static") 73 | 74 | 75 | llm_calls_url = os.environ.get("LLM_CALLS_URL", "https://api.defog.ai/agent_endpoint") 76 | 77 | 78 | @app.get("/ping") 79 | async def root(): 80 | return {"message": "Hello World"} 81 | 82 | 83 | edit_request_types_and_prop_names = { 84 | "edit_analysis_md": { 85 | "table_column": "gen_analysis", 86 | "prop_name": "analysis_sections", 87 | }, 88 | "edit_approaches": {"table_column": "gen_approaches", "prop_name": "approaches"}, 89 | } 90 | 91 | 92 | async def get_classification(question, api_key, debug=False): 93 | r = await make_request( 94 | f"{os.environ.get('DEFOG_BASE_URL', 'https://api.defog.ai')}/update_agent_feedback", 95 | payload={"question": question, "api_key": api_key}, 96 | ) 97 | if r.status_code == 200: 98 | return r.json() 99 | else: 100 | print(f"Error getting question classification: {r.status_code}") 101 | print(r.text) 102 | 103 | 104 | @app.post("/get_analyses") 105 | async def all_analyses(request: Request): 106 | params = await request.json() 107 | key_name = params.get("key_name") 108 | api_key = get_api_key_from_key_name(key_name) 109 | try: 110 | err, analyses = get_all_analyses(api_key=api_key) 111 | if err is not None: 112 | return {"success": False, "error_message": err} 113 | 114 | return {"success": True, "analyses": analyses} 115 | except Exception as e: 116 | print(e) 117 | traceback.print_exc() 118 | return {"success": False, "error_message": "Incorrect request"} 119 | 120 | 121 | @app.post("/get_analysis") 122 | async def one_analysis(request: Request): 123 | try: 124 | params = await request.json() 125 | analysis_id = params.get("analysis_id") 126 | 127 | print("get_one_analysis", params) 128 | 129 | err, analysis_data = get_analysis_data(analysis_id) 130 | 131 | if err is not None: 132 | return {"success": False, "error_message": err} 133 | 134 | return {"success": True, "analysis_data": analysis_data} 135 | except Exception as e: 136 | print(e) 137 | traceback.print_exc() 138 | return {"success": False, "error_message": "Incorrect request"} 139 | 140 | 141 | @app.post("/create_analysis") 142 | async def create_analysis(request: Request): 143 | try: 144 | params = await request.json() 145 | token = params.get("token") 146 | 147 | key_name = params.get("key_name") 148 | api_key = get_api_key_from_key_name(key_name) 149 | 150 | print("create_analysis", params) 151 | 152 | err, analysis_data = await initialise_analysis( 153 | user_question="", 154 | token=token, 155 | api_key=api_key, 156 | custom_id=params.get("custom_id"), 157 | other_data=params.get("other_data"), 158 | ) 159 | 160 | if err is not None: 161 | return {"success": False, "error_message": err} 162 | 163 | return {"success": True, "analysis_data": analysis_data} 164 | except Exception as e: 165 | print(e) 166 | return {"success": False, "error_message": "Incorrect request"} 167 | 168 | 169 | @app.get("/get_assets") 170 | async def get_assets(path: str): 171 | try: 172 | return FileResponse(os.path.join(analysis_assets_dir, path)) 173 | except Exception as e: 174 | print(e) 175 | traceback.print_exc() 176 | return {"success": False, "error_message": "Error getting assets"} 177 | 178 | 179 | @app.get("/") 180 | def read_root(): 181 | return { 182 | "status": "Hi there! You were probably looking to visit http://localhost:33364/static/query-data.html or http://localhost:33364/static/log-in.html" 183 | } 184 | 185 | 186 | @app.get("/health") 187 | def health_check(): 188 | return {"status": "ok"} 189 | 190 | 191 | async def analyse_data(): 192 | yield { 193 | "success": False, 194 | "model_analysis": "The analysis feature is only available in the Defog Docker version. In this version, you can look at the data returned from SQL query that Defog generated.", 195 | } 196 | return 197 | 198 | 199 | @app.websocket("/analyse_data") 200 | async def analyse_data_endpoint(websocket: WebSocket): 201 | await manager.connect(websocket) 202 | try: 203 | while True: 204 | data = await websocket.receive_json() 205 | if "ping" in data: 206 | # don't do anything 207 | continue 208 | 209 | async for chunk in analyse_data(): 210 | await manager.send_personal_message(chunk, websocket) 211 | 212 | except WebSocketDisconnect as e: 213 | # logging.info("Disconnected. Error: " + str(e)) 214 | # traceback.print_exc() 215 | manager.disconnect(websocket) 216 | await websocket.close() 217 | except Exception as e: 218 | # logging.info("Disconnected. Error: " + str(e)) 219 | traceback.print_exc() 220 | await manager.send_personal_message( 221 | {"success": False, "error_message": str(e)[:300]}, websocket 222 | ) 223 | # other reasons for disconnect, like websocket being closed or a timeout 224 | manager.disconnect(websocket) 225 | await websocket.close() 226 | 227 | 228 | @app.post("/get_user_tools") 229 | async def get_user_tools(request: Request): 230 | """ 231 | Get all tools available to the user. 232 | """ 233 | err, tools = get_all_tools() 234 | if err: 235 | return {"success": False, "error_message": err} 236 | return {"success": True, "tools": tools} 237 | 238 | 239 | import threading 240 | import webbrowser 241 | import uvicorn 242 | 243 | if __name__ == "__main__": 244 | # open the browser after a 1 second delay 245 | threading.Timer( 246 | 1, lambda: webbrowser.open("http://localhost:33364/static/index.html") 247 | ).start() 248 | uvicorn.run(app, host="0.0.0.0", port=33364) 249 | -------------------------------------------------------------------------------- /out/404: -------------------------------------------------------------------------------- 1 | 404: This page could not be found

404

This page could not be found.

-------------------------------------------------------------------------------- /out/404.html: -------------------------------------------------------------------------------- 1 | 404: This page could not be found

404

This page could not be found.

-------------------------------------------------------------------------------- /out/_next/static/chunks/209-a8949ab7cba0c2ea.js: -------------------------------------------------------------------------------- 1 | "use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[209],{70869:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.default={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M632 888H392c-4.4 0-8 3.6-8 8v32c0 17.7 14.3 32 32 32h192c17.7 0 32-14.3 32-32v-32c0-4.4-3.6-8-8-8zM512 64c-181.1 0-328 146.9-328 328 0 121.4 66 227.4 164 284.1V792c0 17.7 14.3 32 32 32h264c17.7 0 32-14.3 32-32V676.1c98-56.7 164-162.7 164-284.1 0-181.1-146.9-328-328-328zm127.9 549.8L604 634.6V752H420V634.6l-35.9-20.8C305.4 568.3 256 484.5 256 392c0-141.4 114.6-256 256-256s256 114.6 256 256c0 92.5-49.4 176.3-128.1 221.8z"}}]},name:"bulb",theme:"outlined"}},15416:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.default={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M536.1 273H488c-4.4 0-8 3.6-8 8v275.3c0 2.6 1.2 5 3.3 6.5l165.3 120.7c3.6 2.6 8.6 1.9 11.2-1.7l28.6-39c2.7-3.7 1.9-8.7-1.7-11.2L544.1 528.5V281c0-4.4-3.6-8-8-8zm219.8 75.2l156.8 38.3c5 1.2 9.9-2.6 9.9-7.7l.8-161.5c0-6.7-7.7-10.5-12.9-6.3L752.9 334.1a8 8 0 003 14.1zm167.7 301.1l-56.7-19.5a8 8 0 00-10.1 4.8c-1.9 5.1-3.9 10.1-6 15.1-17.8 42.1-43.3 80-75.9 112.5a353 353 0 01-112.5 75.9 352.18 352.18 0 01-137.7 27.8c-47.8 0-94.1-9.3-137.7-27.8a353 353 0 01-112.5-75.9c-32.5-32.5-58-70.4-75.9-112.5A353.44 353.44 0 01171 512c0-47.8 9.3-94.2 27.8-137.8 17.8-42.1 43.3-80 75.9-112.5a353 353 0 01112.5-75.9C430.6 167.3 477 158 524.8 158s94.1 9.3 137.7 27.8A353 353 0 01775 261.7c10.2 10.3 19.8 21 28.6 32.3l59.8-46.8C784.7 146.6 662.2 81.9 524.6 82 285 82.1 92.6 276.7 95 516.4 97.4 751.9 288.9 942 524.8 942c185.5 0 343.5-117.6 403.7-282.3 1.5-4.2-.7-8.9-4.9-10.4z"}}]},name:"history",theme:"outlined"}},59170:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.default={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M464 512a48 48 0 1096 0 48 48 0 10-96 0zm200 0a48 48 0 1096 0 48 48 0 10-96 0zm-400 0a48 48 0 1096 0 48 48 0 10-96 0zm661.2-173.6c-22.6-53.7-55-101.9-96.3-143.3a444.35 444.35 0 00-143.3-96.3C630.6 75.7 572.2 64 512 64h-2c-60.6.3-119.3 12.3-174.5 35.9a445.35 445.35 0 00-142 96.5c-40.9 41.3-73 89.3-95.2 142.8-23 55.4-34.6 114.3-34.3 174.9A449.4 449.4 0 00112 714v152a46 46 0 0046 46h152.1A449.4 449.4 0 00510 960h2.1c59.9 0 118-11.6 172.7-34.3a444.48 444.48 0 00142.8-95.2c41.3-40.9 73.8-88.7 96.5-142 23.6-55.2 35.6-113.9 35.9-174.5.3-60.9-11.5-120-34.8-175.6zm-151.1 438C704 845.8 611 884 512 884h-1.7c-60.3-.3-120.2-15.3-173.1-43.5l-8.4-4.5H188V695.2l-4.5-8.4C155.3 633.9 140.3 574 140 513.7c-.4-99.7 37.7-193.3 107.6-263.8 69.8-70.5 163.1-109.5 262.8-109.9h1.7c50 0 98.5 9.7 144.2 28.9 44.6 18.7 84.6 45.6 119 80 34.3 34.3 61.3 74.4 80 119 19.4 46.2 29.1 95.2 28.9 145.8-.6 99.6-39.7 192.9-110.1 262.7z"}}]},name:"message",theme:"outlined"}},80286:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.default={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm0 820c-205.4 0-372-166.6-372-372s166.6-372 372-372 372 166.6 372 372-166.6 372-372 372z"}},{tag:"path",attrs:{d:"M719.4 499.1l-296.1-215A15.9 15.9 0 00398 297v430c0 13.1 14.8 20.5 25.3 12.9l296.1-215a15.9 15.9 0 000-25.8zm-257.6 134V390.9L628.5 512 461.8 633.1z"}}]},name:"play-circle",theme:"outlined"}},70895:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.default={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M696 480H544V328c0-4.4-3.6-8-8-8h-48c-4.4 0-8 3.6-8 8v152H328c-4.4 0-8 3.6-8 8v48c0 4.4 3.6 8 8 8h152v152c0 4.4 3.6 8 8 8h48c4.4 0 8-3.6 8-8V544h152c4.4 0 8-3.6 8-8v-48c0-4.4-3.6-8-8-8z"}},{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm0 820c-205.4 0-372-166.6-372-372s166.6-372 372-372 372 166.6 372 372-166.6 372-372 372z"}}]},name:"plus-circle",theme:"outlined"}},36446:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.default={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M908.1 353.1l-253.9-36.9L540.7 86.1c-3.1-6.3-8.2-11.4-14.5-14.5-15.8-7.8-35-1.3-42.9 14.5L369.8 316.2l-253.9 36.9c-7 1-13.4 4.3-18.3 9.3a32.05 32.05 0 00.6 45.3l183.7 179.1-43.4 252.9a31.95 31.95 0 0046.4 33.7L512 754l227.1 119.4c6.2 3.3 13.4 4.4 20.3 3.2 17.4-3 29.1-19.5 26.1-36.9l-43.4-252.9 183.7-179.1c5-4.9 8.3-11.3 9.3-18.3 2.7-17.5-9.5-33.7-27-36.3zM664.8 561.6l36.1 210.3L512 672.7 323.1 772l36.1-210.3-152.8-149L417.6 382 512 190.7 606.4 382l211.2 30.7-152.8 148.9z"}}]},name:"star",theme:"outlined"}},90778:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.default={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M168 504.2c1-43.7 10-86.1 26.9-126 17.3-41 42.1-77.7 73.7-109.4S337 212.3 378 195c42.4-17.9 87.4-27 133.9-27s91.5 9.1 133.8 27A341.5 341.5 0 01755 268.8c9.9 9.9 19.2 20.4 27.8 31.4l-60.2 47a8 8 0 003 14.1l175.7 43c5 1.2 9.9-2.6 9.9-7.7l.8-180.9c0-6.7-7.7-10.5-12.9-6.3l-56.4 44.1C765.8 155.1 646.2 92 511.8 92 282.7 92 96.3 275.6 92 503.8a8 8 0 008 8.2h60c4.4 0 7.9-3.5 8-7.8zm756 7.8h-60c-4.4 0-7.9 3.5-8 7.8-1 43.7-10 86.1-26.9 126-17.3 41-42.1 77.8-73.7 109.4A342.45 342.45 0 01512.1 856a342.24 342.24 0 01-243.2-100.8c-9.9-9.9-19.2-20.4-27.8-31.4l60.2-47a8 8 0 00-3-14.1l-175.7-43c-5-1.2-9.9 2.6-9.9 7.7l-.7 181c0 6.7 7.7 10.5 12.9 6.3l56.4-44.1C258.2 868.9 377.8 932 512.2 932c229.2 0 415.5-183.7 419.8-411.8a8 8 0 00-8-8.2z"}}]},name:"sync",theme:"outlined"}},70439:function(e,t,a){var c=a(79290).default,l=a(44512).default;t.Z=void 0;var f=l(a(17963)),u=c(a(82684)),n=l(a(70869)),r=l(a(32905)),d=u.forwardRef(function(e,t){return u.createElement(r.default,(0,f.default)({},e,{ref:t,icon:n.default}))});t.Z=d},94242:function(e,t,a){var c=a(79290).default,l=a(44512).default;t.Z=void 0;var f=l(a(17963)),u=c(a(82684)),n=l(a(15416)),r=l(a(32905)),d=u.forwardRef(function(e,t){return u.createElement(r.default,(0,f.default)({},e,{ref:t,icon:n.default}))});t.Z=d},80429:function(e,t,a){var c=a(79290).default,l=a(44512).default;t.Z=void 0;var f=l(a(17963)),u=c(a(82684)),n=l(a(59170)),r=l(a(32905)),d=u.forwardRef(function(e,t){return u.createElement(r.default,(0,f.default)({},e,{ref:t,icon:n.default}))});t.Z=d},95153:function(e,t,a){var c=a(79290).default,l=a(44512).default;t.Z=void 0;var f=l(a(17963)),u=c(a(82684)),n=l(a(80286)),r=l(a(32905)),d=u.forwardRef(function(e,t){return u.createElement(r.default,(0,f.default)({},e,{ref:t,icon:n.default}))});t.Z=d},20997:function(e,t,a){var c=a(79290).default,l=a(44512).default;t.Z=void 0;var f=l(a(17963)),u=c(a(82684)),n=l(a(70895)),r=l(a(32905)),d=u.forwardRef(function(e,t){return u.createElement(r.default,(0,f.default)({},e,{ref:t,icon:n.default}))});t.Z=d},90353:function(e,t,a){var c=a(79290).default,l=a(44512).default;t.Z=void 0;var f=l(a(17963)),u=c(a(82684)),n=l(a(36446)),r=l(a(32905)),d=u.forwardRef(function(e,t){return u.createElement(r.default,(0,f.default)({},e,{ref:t,icon:n.default}))});t.Z=d},92e3:function(e,t,a){var c=a(79290).default,l=a(44512).default;t.Z=void 0;var f=l(a(17963)),u=c(a(82684)),n=l(a(90778)),r=l(a(32905)),d=u.forwardRef(function(e,t){return u.createElement(r.default,(0,f.default)({},e,{ref:t,icon:n.default}))});t.Z=d}}]); -------------------------------------------------------------------------------- /out/_next/static/chunks/pages/_error-ee42a9921d95ff81.js: -------------------------------------------------------------------------------- 1 | (self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[820],{81981:function(n,_,u){(window.__NEXT_P=window.__NEXT_P||[]).push(["/_error",function(){return u(30730)}])}},function(n){n.O(0,[774,888,179],function(){return n(n.s=81981)}),_N_E=n.O()}]); -------------------------------------------------------------------------------- /out/_next/static/chunks/pages/doc-dcf4766c050e5b31.js: -------------------------------------------------------------------------------- 1 | (self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[852],{43022:function(e,t,r){(window.__NEXT_P=window.__NEXT_P||[]).push(["/doc",function(){return r(63928)}])},26335:function(e,t,r){"use strict";var n=r(28598),o=r(1887),a=r.n(o);t.Z=()=>(0,n.jsxs)(a(),{children:[(0,n.jsx)("title",{children:"Defog.ai - AI Assistant for Data Analysis"}),(0,n.jsx)("meta",{name:"description",content:"Train your AI data assistant on your own device"}),(0,n.jsx)("meta",{name:"viewport",content:"width=device-width, initial-scale=1"}),(0,n.jsx)("link",{rel:"icon",href:"/favicon.ico"})]})},82067:function(e,t,r){"use strict";var n=r(28598),o=r(82684),a=r(97574),s=r(79869),l=r(34376);r(12691);var i=r(26978),c=r(47663),u=r(56572);t.Z=e=>{let{id:t,userType:r,children:d,rootClassNames:m="",contentClassNames:h=""}=e,{Content:f,Sider:g}=s.Layout,[y,p]=(0,o.useState)([]),[v,k]=(0,o.useContext)(a.S),x=(0,l.useRouter)(),redirect=e=>{x.push(e)},logout=()=>{localStorage.removeItem("defogUser"),localStorage.removeItem("defogToken"),localStorage.removeItem("defogUserType"),k({user:null,token:null,userType:null}),redirect("/static/log-in.html")},_=(0,c.usePathname)();(0,o.useEffect)(()=>{p(("admin"==r?[{key:"manage-database",title:"Manage Database",href:"/static/extract-metadata.html"},{key:"manage-users",title:"Manage Users",href:"/static/manage-users.html"},{key:"manage-tools",title:"Manage tools",href:"/static/manage-tools.html"},{key:"check-readiness",title:"Check Readiness",href:"/static/check-readiness.html"},{key:"align-model",title:"Align Model",href:"/static/align-model.html"},{key:"view-feedback",title:"View Feedback",href:"/static/view-feedback.html"},{key:"query-data",title:"Query Data",href:"/static/query-data.html"},{key:"logout",classNames:"self-end",title:"Logout",href:"#",onClick:logout}]:r?[{key:"query-data",title:"Query Data",href:"/static/query-data.html"},{key:"logout",classNames:"self-end",title:"Logout",href:"#",onClick:logout}]:[]).map(e=>(e.current=e.href==_,e)))},[r]);let[E,w]=(0,o.useState)("flex flex-col md:min-h-screen relative container mx-auto");return(0,o.useEffect)(()=>{"/query-data"===_&&w("flex flex-col md:min-h-screen relative")},[_]),(0,n.jsxs)("div",{className:(0,u.m6)(E,m),children:[y.length?(0,n.jsx)(i.l2,{rootClassNames:"border-b",items:y}):(0,n.jsx)(n.Fragment,{}),(0,n.jsx)("div",{className:(0,u.m6)("grow",h),children:d})]})}},63928:function(e,t,r){"use strict";let n;r.r(t),r.d(t,{default:function(){return DocPage}});var o=r(28598);r(10514);var a=r(82684),s=r(26335),l=r(34376);let ErrorBoundary=class ErrorBoundary extends a.Component{static getDerivedStateFromError(e){return console.log(e),{hasError:!0}}componentDidCatch(e,t){console.log(e,t),this.setState({hasError:!0,errorInfo:t,error:e})}render(){return this.state.hasError?(0,o.jsx)("div",{className:"error-boundary-ctr",children:this.props.maybeOldAnalysis?(0,o.jsx)("p",{className:"text-red",children:"You might need to re run this analysis for the latest version of the UI."}):(0,o.jsx)("p",{children:"Something went wrong."})}):this.props.children}constructor(e){super(e),this.state={hasError:!1,errorInfo:null,error:null,maybeOldAnalysis:e.maybeOldAnalysis}}};let i="undefined"!=typeof crypto&&crypto.randomUUID&&crypto.randomUUID.bind(crypto);var c={randomUUID:i};let u=new Uint8Array(16),d=[];for(let e=0;e<256;++e)d.push((e+256).toString(16).slice(1));var esm_browser_v4=function(e,t,r){if(c.randomUUID&&!t&&!e)return c.randomUUID();e=e||{};let o=e.random||(e.rng||function(){if(!n&&!(n="undefined"!=typeof crypto&&crypto.getRandomValues&&crypto.getRandomValues.bind(crypto)))throw Error("crypto.getRandomValues() not supported. See https://github.com/uuidjs/uuid#getrandomvalues-not-supported");return n(u)})();if(o[6]=15&o[6]|64,o[8]=63&o[8]|128,t){r=r||0;for(let e=0;e<16;++e)t[r+e]=o[e];return t}return function(e,t=0){return d[e[t+0]]+d[e[t+1]]+d[e[t+2]]+d[e[t+3]]+"-"+d[e[t+4]]+d[e[t+5]]+"-"+d[e[t+6]]+d[e[t+7]]+"-"+d[e[t+8]]+d[e[t+9]]+"-"+d[e[t+10]]+d[e[t+11]]+d[e[t+12]]+d[e[t+13]]+d[e[t+14]]+d[e[t+15]]}(o)},m=r(97574),h=r(82067);r(27122);var f=r(48497);function DocPage(){let e=(0,l.useRouter)(),[t,r]=(0,a.useContext)(m.S),[n,i]=(0,a.useState)(t.user),c=t.token,u=(0,a.useRef)(null);return(0,a.useEffect)(()=>{var n;let o=t.token,a=t.userType;if(!a){let t=localStorage.getItem("defogUser");if(i(t),o=localStorage.getItem("defogToken"),a=localStorage.getItem("defogUserType"),!t||!o||!a){e.push("/static/log-in.html");return}r({user:t,token:o,userType:a})}if(o||e.push("/static/log-in.html"),u.current=null==e?void 0:null===(n=e.query)||void 0===n?void 0:n.docId,!u.current){let t=null==e?void 0:e.asPath,r=t.match(/doc\?docId=([^&]*)/);r&&(u.current=r[1])}(async function(){try{e&&("new"===u.current||!u.current)&&(u.current=esm_browser_v4(),e.replace({query:{docId:u.current}}))}catch(e){console.log(e)}})()},[e,t,r]),console.log(u.current,c),(f.env.NEXT_PUBLIC_API_KEY_NAMES||"REPLACE_WITH_API_KEY_NAMES").split(","),u.current?(0,o.jsxs)(o.Fragment,{children:[(0,o.jsx)(s.Z,{}),(0,o.jsx)(h.Z,{id:"view-notebooks",userType:"admin",children:(0,o.jsx)(ErrorBoundary,{})})]}):(0,o.jsx)("h5",{children:"Verifying your details..."})}},27122:function(){},10514:function(){}},function(e){e.O(0,[304,435,774,888,179],function(){return e(e.s=43022)}),_N_E=e.O()}]); -------------------------------------------------------------------------------- /out/_next/static/chunks/pages/index-b92da338cfbf2e22.js: -------------------------------------------------------------------------------- 1 | (self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[405],{75557:function(e,t,a){(window.__NEXT_P=window.__NEXT_P||[]).push(["/",function(){return a(48591)}])},26335:function(e,t,a){"use strict";var s=a(28598),l=a(1887),r=a.n(l);t.Z=()=>(0,s.jsxs)(r(),{children:[(0,s.jsx)("title",{children:"Defog.ai - AI Assistant for Data Analysis"}),(0,s.jsx)("meta",{name:"description",content:"Train your AI data assistant on your own device"}),(0,s.jsx)("meta",{name:"viewport",content:"width=device-width, initial-scale=1"}),(0,s.jsx)("link",{rel:"icon",href:"/favicon.ico"})]})},82067:function(e,t,a){"use strict";var s=a(28598),l=a(82684),r=a(97574),i=a(79869),o=a(34376);a(12691);var n=a(26978),c=a(47663),u=a(56572);t.Z=e=>{let{id:t,userType:a,children:d,rootClassNames:m="",contentClassNames:h=""}=e,{Content:g,Sider:f}=i.Layout,[x,y]=(0,l.useState)([]),[k,p]=(0,l.useContext)(r.S),w=(0,o.useRouter)(),redirect=e=>{w.push(e)},logout=()=>{localStorage.removeItem("defogUser"),localStorage.removeItem("defogToken"),localStorage.removeItem("defogUserType"),p({user:null,token:null,userType:null}),redirect("/static/log-in.html")},j=(0,c.usePathname)();(0,l.useEffect)(()=>{y(("admin"==a?[{key:"manage-database",title:"Manage Database",href:"/static/extract-metadata.html"},{key:"manage-users",title:"Manage Users",href:"/static/manage-users.html"},{key:"manage-tools",title:"Manage tools",href:"/static/manage-tools.html"},{key:"check-readiness",title:"Check Readiness",href:"/static/check-readiness.html"},{key:"align-model",title:"Align Model",href:"/static/align-model.html"},{key:"view-feedback",title:"View Feedback",href:"/static/view-feedback.html"},{key:"query-data",title:"Query Data",href:"/static/query-data.html"},{key:"logout",classNames:"self-end",title:"Logout",href:"#",onClick:logout}]:a?[{key:"query-data",title:"Query Data",href:"/static/query-data.html"},{key:"logout",classNames:"self-end",title:"Logout",href:"#",onClick:logout}]:[]).map(e=>(e.current=e.href==j,e)))},[a]);let[v,N]=(0,l.useState)("flex flex-col md:min-h-screen relative container mx-auto");return(0,l.useEffect)(()=>{"/query-data"===j&&N("flex flex-col md:min-h-screen relative")},[j]),(0,s.jsxs)("div",{className:(0,u.m6)(v,m),children:[x.length?(0,s.jsx)(n.l2,{rootClassNames:"border-b",items:x}):(0,s.jsx)(s.Fragment,{}),(0,s.jsx)("div",{className:(0,u.m6)("grow",h),children:d})]})}},48591:function(e,t,a){"use strict";a.r(t);var s=a(28598),l=a(82684),r=a(34376),i=a(26335),o=a(97574),n=a(82067),c=a(49603);t.default=()=>{let[e,t]=(0,l.useState)(""),[a,u]=(0,l.useContext)(o.S),[d,m]=(0,l.useState)(!0),h=(0,r.useRouter)();return(0,l.useEffect)(()=>{let e=a.userType;if(!e){let t=localStorage.getItem("defogUser"),a=localStorage.getItem("defogToken");if(e=localStorage.getItem("defogUserType"),!t||!a||!e){h.push("/static/log-in.html");return}u({user:t,token:a,userType:e})}t(e),m(!1),"admin"===e?(console.log("redirecting to extract metadata.."),h.push("/static/extract-metadata.html")):(console.log("redirecting to query data.."),h.push("/static/query-data.html"))},[]),(0,s.jsxs)(s.Fragment,{children:[(0,s.jsx)(i.Z,{}),(0,s.jsx)(n.Z,{userType:e,children:(0,s.jsx)("div",{className:"flex flex-col items-center justify-center min-h-screen bg-gray-100 py-12 px-4 sm:px-6 lg:px-8",children:(0,s.jsxs)("div",{className:"max-w-md w-full space-y-8 p-10 bg-white rounded-xl shadow-md",children:[(0,s.jsxs)("div",{className:"text-center",children:[(0,s.jsx)("h1",{className:"text-3xl font-semibold text-gray-900 mb-4",children:"Welcome to Defog!"}),(0,s.jsxs)("h3",{className:"text-lg text-gray-700",children:["Please wait while we log you in and redirect you to the right page... ",(0,s.jsx)(c.default,{})]})]}),d&&(0,s.jsx)("div",{className:"flex justify-center mt-6",children:(0,s.jsxs)("svg",{className:"animate-spin h-5 w-5 text-indigo-600",xmlns:"http://www.w3.org/2000/svg",fill:"none",viewBox:"0 0 24 24",children:[(0,s.jsx)("circle",{className:"opacity-25",cx:"12",cy:"12",r:"10",stroke:"currentColor",strokeWidth:"4"}),(0,s.jsx)("path",{className:"opacity-75",fill:"currentColor",d:"M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"})]})})]})})})]})}}},function(e){e.O(0,[435,774,888,179],function(){return e(e.s=75557)}),_N_E=e.O()}]); -------------------------------------------------------------------------------- /out/_next/static/chunks/pages/log-in-beee6f8bec469b4f.js: -------------------------------------------------------------------------------- 1 | (self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[720],{13831:function(e,t,s){(window.__NEXT_P=window.__NEXT_P||[]).push(["/log-in",function(){return s(76794)}])},26335:function(e,t,s){"use strict";var a=s(28598),l=s(1887),r=s.n(l);t.Z=()=>(0,a.jsxs)(r(),{children:[(0,a.jsx)("title",{children:"Defog.ai - AI Assistant for Data Analysis"}),(0,a.jsx)("meta",{name:"description",content:"Train your AI data assistant on your own device"}),(0,a.jsx)("meta",{name:"viewport",content:"width=device-width, initial-scale=1"}),(0,a.jsx)("link",{rel:"icon",href:"/favicon.ico"})]})},82067:function(e,t,s){"use strict";var a=s(28598),l=s(82684),r=s(97574),n=s(79869),i=s(34376);s(12691);var o=s(26978),c=s(47663),m=s(56572);t.Z=e=>{let{id:t,userType:s,children:u,rootClassNames:d="",contentClassNames:g=""}=e,{Content:f,Sider:h}=n.Layout,[x,y]=(0,l.useState)([]),[p,b]=(0,l.useContext)(r.S),j=(0,i.useRouter)(),redirect=e=>{j.push(e)},logout=()=>{localStorage.removeItem("defogUser"),localStorage.removeItem("defogToken"),localStorage.removeItem("defogUserType"),b({user:null,token:null,userType:null}),redirect("/static/log-in.html")},w=(0,c.usePathname)();(0,l.useEffect)(()=>{y(("admin"==s?[{key:"manage-database",title:"Manage Database",href:"/static/extract-metadata.html"},{key:"manage-users",title:"Manage Users",href:"/static/manage-users.html"},{key:"manage-tools",title:"Manage tools",href:"/static/manage-tools.html"},{key:"check-readiness",title:"Check Readiness",href:"/static/check-readiness.html"},{key:"align-model",title:"Align Model",href:"/static/align-model.html"},{key:"view-feedback",title:"View Feedback",href:"/static/view-feedback.html"},{key:"query-data",title:"Query Data",href:"/static/query-data.html"},{key:"logout",classNames:"self-end",title:"Logout",href:"#",onClick:logout}]:s?[{key:"query-data",title:"Query Data",href:"/static/query-data.html"},{key:"logout",classNames:"self-end",title:"Logout",href:"#",onClick:logout}]:[]).map(e=>(e.current=e.href==w,e)))},[s]);let[k,v]=(0,l.useState)("flex flex-col md:min-h-screen relative container mx-auto");return(0,l.useEffect)(()=>{"/query-data"===w&&v("flex flex-col md:min-h-screen relative")},[w]),(0,a.jsxs)("div",{className:(0,m.m6)(k,d),children:[x.length?(0,a.jsx)(o.l2,{rootClassNames:"border-b",items:x}):(0,a.jsx)(a.Fragment,{}),(0,a.jsx)("div",{className:(0,m.m6)("grow",g),children:u})]})}},76794:function(e,t,s){"use strict";s.r(t);var a=s(28598),l=s(82684),r=s(34376),n=s(18953),i=s(26335),o=s(97574),c=s(82067),m=s(48497);t.default=()=>{let[e,t]=(0,l.useContext)(o.S),s=(0,r.useRouter)(),handleLogin=async e=>{e.preventDefault();let a=new FormData(e.target),l=Object.fromEntries(a),r=(m.env.NEXT_PUBLIC_AGENTS_ENDPOINT||"")+"/login",i=await fetch(r,{method:"POST",body:JSON.stringify(l)}),o=await i.json();"success"===o.status?(t({user:l.username,token:o.token,userType:o.user_type}),localStorage.setItem("defogUser",l.username),localStorage.setItem("defogToken",o.token),localStorage.setItem("defogUserType",o.user_type),"admin"===o.user_type?s.push("/static/extract-metadata.html"):s.push("/static/query-data.html")):n.default.error("Login failed. Please contact your administrator.")};return(0,a.jsxs)(a.Fragment,{children:[(0,a.jsx)(i.Z,{}),(0,a.jsx)(c.Z,{children:(0,a.jsxs)("div",{className:"flex min-h-full flex-1 flex-col justify-center px-6 py-12 lg:px-8 mt-16",children:[(0,a.jsxs)("div",{className:"sm:mx-auto sm:w-full sm:max-w-sm",children:[(0,a.jsx)("img",{alt:"Defog.ai",src:"/static/logo512.png",className:"mx-auto h-10 w-auto"}),(0,a.jsx)("h2",{className:"mt-10 text-center text-2xl font-bold leading-9 tracking-tight text-gray-900",children:"Sign in to Defog"})]}),(0,a.jsxs)("div",{className:"mt-10 sm:mx-auto sm:w-full sm:max-w-sm",children:[(0,a.jsxs)("form",{onSubmit:handleLogin,className:"space-y-6",children:[(0,a.jsxs)("div",{children:[(0,a.jsx)("label",{htmlFor:"username",className:"block text-sm font-medium leading-6 text-gray-900",children:"Username"}),(0,a.jsx)("div",{className:"mt-2",children:(0,a.jsx)("input",{id:"username",name:"username",type:"text",required:!0,autoComplete:"username",className:"block w-full rounded-md border-0 py-1.5 text-gray-900 shadow-sm ring-1 ring-inset ring-gray-300 placeholder:text-gray-400 focus:ring-2 focus:ring-inset focus:ring-blue-600 sm:text-sm sm:leading-6"})})]}),(0,a.jsxs)("div",{children:[(0,a.jsx)("div",{className:"flex items-center justify-between",children:(0,a.jsx)("label",{htmlFor:"password",className:"block text-sm font-medium leading-6 text-gray-900",children:"Password"})}),(0,a.jsx)("div",{className:"mt-2",children:(0,a.jsx)("input",{id:"password",name:"password",type:"password",required:!0,autoComplete:"current-password",className:"block w-full rounded-md border-0 py-1.5 text-gray-900 shadow-sm ring-1 ring-inset ring-gray-300 placeholder:text-gray-400 focus:ring-2 focus:ring-inset focus:ring-blue-600 sm:text-sm sm:leading-6"})})]}),(0,a.jsx)("div",{children:(0,a.jsx)("button",{type:"submit",className:"flex w-full justify-center rounded-md bg-blue-600 px-3 py-1.5 text-sm font-semibold leading-6 text-white shadow-sm hover:bg-blue-500 focus-visible:outline focus-visible:outline-2 focus-visible:outline-offset-2 focus-visible:outline-blue-600",children:"Sign in"})})]}),(0,a.jsxs)("p",{className:"mt-10 text-center text-sm text-gray-500",children:["Don't have an API key?"," ",(0,a.jsx)("a",{href:"https://defog.ai/signup",className:"font-semibold leading-6 text-blue-600 hover:text-blue-500",children:"Get Started Free"})]})]})]})})]})}}},function(e){e.O(0,[435,774,888,179],function(){return e(e.s=13831)}),_N_E=e.O()}]); -------------------------------------------------------------------------------- /out/_next/static/chunks/pages/msal_redirect-a4253aab45cea6c0.js: -------------------------------------------------------------------------------- 1 | (self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[593],{1599:function(n,_,e){(window.__NEXT_P=window.__NEXT_P||[]).push(["/msal_redirect",function(){return e(33137)}])},33137:function(n,_,e){"use strict";e.r(_);var u=e(28598);e(82684),_.default=()=>(0,u.jsx)("div",{children:"msal_redirect"})}},function(n){n.O(0,[774,888,179],function(){return n(n.s=1599)}),_N_E=n.O()}]); -------------------------------------------------------------------------------- /out/_next/static/chunks/pages/oracle-frontend-eff9fc199d2175f6.js: -------------------------------------------------------------------------------- 1 | (self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[269],{41599:function(e,t,s){(window.__NEXT_P=window.__NEXT_P||[]).push(["/oracle-frontend",function(){return s(70194)}])},26335:function(e,t,s){"use strict";var a=s(28598),l=s(1887),o=s.n(l);t.Z=()=>(0,a.jsxs)(o(),{children:[(0,a.jsx)("title",{children:"Defog.ai - AI Assistant for Data Analysis"}),(0,a.jsx)("meta",{name:"description",content:"Train your AI data assistant on your own device"}),(0,a.jsx)("meta",{name:"viewport",content:"width=device-width, initial-scale=1"}),(0,a.jsx)("link",{rel:"icon",href:"/favicon.ico"})]})},82067:function(e,t,s){"use strict";var a=s(28598),l=s(82684),o=s(97574),r=s(79869),n=s(34376);s(12691);var i=s(26978),c=s(47663),d=s(56572);t.Z=e=>{let{id:t,userType:s,children:u,rootClassNames:h="",contentClassNames:m=""}=e,{Content:f,Sider:p}=r.Layout,[g,x]=(0,l.useState)([]),[y,N]=(0,l.useContext)(o.S),j=(0,n.useRouter)(),redirect=e=>{j.push(e)},logout=()=>{localStorage.removeItem("defogUser"),localStorage.removeItem("defogToken"),localStorage.removeItem("defogUserType"),N({user:null,token:null,userType:null}),redirect("/static/log-in.html")},k=(0,c.usePathname)();(0,l.useEffect)(()=>{x(("admin"==s?[{key:"manage-database",title:"Manage Database",href:"/static/extract-metadata.html"},{key:"manage-users",title:"Manage Users",href:"/static/manage-users.html"},{key:"manage-tools",title:"Manage tools",href:"/static/manage-tools.html"},{key:"check-readiness",title:"Check Readiness",href:"/static/check-readiness.html"},{key:"align-model",title:"Align Model",href:"/static/align-model.html"},{key:"view-feedback",title:"View Feedback",href:"/static/view-feedback.html"},{key:"query-data",title:"Query Data",href:"/static/query-data.html"},{key:"logout",classNames:"self-end",title:"Logout",href:"#",onClick:logout}]:s?[{key:"query-data",title:"Query Data",href:"/static/query-data.html"},{key:"logout",classNames:"self-end",title:"Logout",href:"#",onClick:logout}]:[]).map(e=>(e.current=e.href==k,e)))},[s]);let[b,v]=(0,l.useState)("flex flex-col md:min-h-screen relative container mx-auto");return(0,l.useEffect)(()=>{"/query-data"===k&&v("flex flex-col md:min-h-screen relative")},[k]),(0,a.jsxs)("div",{className:(0,d.m6)(b,h),children:[g.length?(0,a.jsx)(i.l2,{rootClassNames:"border-b",items:g}):(0,a.jsx)(a.Fragment,{}),(0,a.jsx)("div",{className:(0,d.m6)("grow",m),children:u})]})}},70194:function(e,t,s){"use strict";s.r(t),s.d(t,{default:function(){return oracle_frontend}});var a=s(28598),l=s(20259),o=s.n(l),r=s(33612),n=s(37490),i=s(92696),c=s(24788),d=s(49603),u=s(82684),h=s(26335),m=s(82067),f=s(66079),p=s(61252),g=s(60071),x=s.n(g);function Sources(e){let{sources:t,setSources:s}=e;return 0===t.length?null:(0,a.jsxs)("div",{className:"bg-white flex w-full flex-col",children:[(0,a.jsx)("div",{className:"flex items-start gap-4 pb-3",children:(0,a.jsxs)("h3",{className:"text-base font-bold leading-6 text-black",children:["Suggested Sources"," "]})}),(0,a.jsx)("div",{className:"flex w-full items-center overflow-x-scroll gap-6 pb-3",children:t.map(e=>(0,a.jsx)(SourceCard,{source:e,setSources:s}))})]})}let SourceCard=e=>{let{source:t,setSources:s}=e,[l,o]=(0,u.useState)(t.selected);return(0,a.jsx)("div",{className:'"flex h-[79px] w-full items-center gap-2.5 rounded-lg border border-gray-100 px-1.5 py-1 shadow-md '.concat(l?"bg-gray-100 opacity-50":"bg-white"),children:(0,a.jsxs)("div",{className:"flex items-center relative",children:[(0,a.jsx)("span",{className:"shrink-0",children:(0,a.jsx)(x(),{unoptimized:!0,src:"https://www.google.com/s2/favicons?domain=".concat(t.link,"&sz=128"),alt:t.link,className:"rounded-full p-1",width:36,height:36})}),(0,a.jsxs)("span",{className:"flex min-w-0 max-w-[192px] flex-col justify-center gap-1 ml-2 mt-2",children:[(0,a.jsx)("h6",{className:"line-clamp-2 text-xs font-light",children:t.title}),(0,a.jsx)("a",{target:"_blank",rel:"noopener noreferrer",href:t.link,className:"truncate text-xs font-light text-[#1B1B16]/30",children:t.link})]}),(0,a.jsx)("div",{className:"ml-auto cursor-pointer top-2 right-2",onClick:()=>{o(!l),s(e=>e.map(e=>e.position===t.position?{...e,selected:!l}:e))},children:l?(0,a.jsx)(p.default,{style:{color:"green"}}):(0,a.jsx)(f.default,{})})]})})};var y=s(79204),N=s(13966),j=s(55658),k=s(82098),b=s(48497),oracle_frontend=function(){let e=(b.env.NEXT_PUBLIC_API_KEY_NAMES||"REPLACE_WITH_API_KEY_NAMES").split(","),[t,s]=(0,u.useState)(e[0]),[l,f]=(0,u.useState)(""),[p,g]=(0,u.useState)([]),[x,v]=(0,u.useState)(!1),[w,_]=(0,u.useState)([]),[S,T]=(0,u.useState)(!1),[C,E]=(0,u.useState)(!1),[I,P]=(0,u.useState)([]),getClarifications=async()=>{v(!0);let e=localStorage.getItem("defogToken"),s=await fetch((0,y.Z)("http","oracle/clarify_formulation"),{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify({token:e,key_name:t,question:l})});if(v(!1),s.ok){let e=await s.json();g(e.clarifications),E(e.ready)}else console.error("Failed to fetch clarifications")},deleteClarification=e=>{g(t=>t.filter((t,s)=>s!==e))},getSources=async()=>{T(!0);let e=localStorage.getItem("defogToken"),s=await fetch((0,y.Z)("http","oracle/suggest_web_sources"),{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify({token:e,key_name:t,question:l})});if(T(!1),s.ok){let e=await s.json(),t=e.organic;t.forEach(e=>{e.selected=!1}),_(t)}else console.error("Failed to fetch sources")},checkAllFinished=e=>e.every(e=>"done"===e.status||"error"===e.status),getReports=async()=>{let e=localStorage.getItem("defogToken"),pollReports=async()=>{try{let a=await fetch((0,y.Z)("http","oracle/list_reports"),{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify({token:e,key_name:t})});if(a.ok){let e=await a.json();P(e.reports),checkAllFinished(e.reports)&&clearInterval(s)}else console.error("Failed to fetch reports"),clearInterval(s)}catch(e){console.error("An error occurred:",e),clearInterval(s)}},s=setInterval(pollReports,1e3);pollReports()},deleteReport=async e=>{let s=localStorage.getItem("defogToken"),a=await fetch((0,y.Z)("http","oracle/delete_report"),{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify({token:s,key_name:t,report_id:I[e].report_id})});a.ok&&getReports()},generateReport=async()=>{let e=localStorage.getItem("defogToken"),s=w.filter(e=>e.selected);console.log(s);let a=await fetch((0,y.Z)("http","oracle/begin_generation"),{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify({token:e,key_name:t,question:l,sources:s})});a.ok&&getReports()};return(0,u.useEffect)(()=>{let e=setTimeout(()=>{l.length<5?console.log("User task is too short, not fetching clarifications yet"):(getClarifications(),getSources())},3e3);return()=>clearTimeout(e)},[l]),(0,u.useEffect)(()=>{getReports()},[]),(0,a.jsxs)(a.Fragment,{children:[(0,a.jsx)(h.Z,{}),(0,a.jsxs)(m.Z,{id:"align-model",userType:"admin",children:[e.length>1?(0,a.jsx)(n.default,{type:"flex",height:"100vh",children:(0,a.jsx)(i.default,{span:24,style:{paddingBottom:"1em"},children:(0,a.jsx)(c.default,{style:{width:"100%"},onChange:e=>{s(e)},options:e.map(e=>({value:e,key:e,label:e})),defaultValue:t})})}):null,(0,a.jsxs)("div",{className:"bg-white p-6 rounded-lg shadow-lg max-w-3xl mx-auto",children:[(0,a.jsxs)("div",{className:"mb-6",children:[(0,a.jsx)("h1",{className:"text-2xl font-semibold mb-2",children:"The Oracle"}),(0,a.jsx)("p",{className:"text-gray-600",children:"The Oracle is a background assistant, helping you to dig into your dataset for insights. To begin, please let us know what you are interested in below."})]}),(0,a.jsxs)("div",{className:"flex items-center mb-6",children:[(0,a.jsx)(r.default.TextArea,{placeholder:"Describe what you would like the Oracle to do...",className:"w-full p-3 border rounded-lg text-gray-700 focus:outline-none focus:border-purple-500",value:l,onChange:e=>{f(e.target.value)},autoSize:{minRows:2,maxRows:10},style:{flexBasis:"90%"}}),(0,a.jsx)("div",{className:"ml-2",children:x?(0,a.jsx)(d.default,{}):l&&(C?(0,a.jsx)(N.Z,{style:{color:"green"}}):(0,a.jsx)(j.Z,{style:{color:"#808080"}}))})]}),p.length>0&&(0,a.jsxs)("div",{className:"mt-6",children:[(0,a.jsx)("h2",{className:"text-xl font-semibold mb-2",children:"Clarifications"}),p.map((e,t)=>(0,a.jsxs)("div",{className:"bg-amber-100 p-4 rounded-lg mb-4 relative",children:[(0,a.jsx)("p",{className:"text-amber-500",children:e}),(0,a.jsx)(k.default,{className:"text-amber-500 absolute top-2 right-2 cursor-pointer",onClick:()=>deleteClarification(t)})]},t))]}),(0,a.jsx)("div",{className:"mt-6",children:(0,a.jsx)(Sources,{sources:w,setSources:_})}),(0,a.jsx)(o(),{className:"bg-purple-500 text-white py-2 px-4 rounded-lg hover:bg-purple-600",onClick:generateReport,children:"Generate"})]}),(0,a.jsxs)("div",{children:[(0,a.jsx)("h2",{className:"text-xl font-semibold mb-4",children:"Past Reports"}),I.map((e,t)=>(0,a.jsxs)("div",{className:"bg-purple-100 p-4 rounded-lg mb-4",children:[(0,a.jsx)("h3",{className:"text-lg font-semibold",children:e.report_id}),(0,a.jsx)("p",{className:"text-purple-700",children:e.report_name}),(0,a.jsx)("p",{className:"text-gray-600",children:e.status}),(0,a.jsxs)("p",{className:"text-gray-400",children:["Generated at ",e.date_created]}),(0,a.jsxs)("div",{className:"flex space-x-4",children:[(0,a.jsx)("button",{className:"text-purple-700 hover:text-purple-900",children:"Download"}),(0,a.jsx)("button",{className:"text-purple-700 hover:text-purple-900",onClick:()=>deleteReport(t),children:"Delete"})]})]},t))]})]})]})}},79204:function(e,t,s){"use strict";var a=s(48497);t.Z=(e,t)=>{if("http"!==e&&"ws"!==e)throw Error("Protocol not supported");return(console.log(a.env.NEXT_PUBLIC_AGENTS_ENDPOINT),""!==t)?"ws"===e?"".concat((a.env.NEXT_PUBLIC_AGENTS_ENDPOINT||"").replace("http","ws"),"/").concat(t):"".concat(a.env.NEXT_PUBLIC_AGENTS_ENDPOINT||"","/").concat(t):"".concat(a.env.NEXT_PUBLIC_AGENTS_ENDPOINT||"")}}},function(e){e.O(0,[435,200,774,888,179],function(){return e(e.s=41599)}),_N_E=e.O()}]); -------------------------------------------------------------------------------- /out/_next/static/chunks/pages/view-notebooks-31985eb96195a01a.js: -------------------------------------------------------------------------------- 1 | (self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[433],{84308:function(e,t,s){(window.__NEXT_P=window.__NEXT_P||[]).push(["/view-notebooks",function(){return s(91508)}])},26335:function(e,t,s){"use strict";var a=s(28598),o=s(1887),r=s.n(o);t.Z=()=>(0,a.jsxs)(r(),{children:[(0,a.jsx)("title",{children:"Defog.ai - AI Assistant for Data Analysis"}),(0,a.jsx)("meta",{name:"description",content:"Train your AI data assistant on your own device"}),(0,a.jsx)("meta",{name:"viewport",content:"width=device-width, initial-scale=1"}),(0,a.jsx)("link",{rel:"icon",href:"/favicon.ico"})]})},82067:function(e,t,s){"use strict";var a=s(28598),o=s(82684),r=s(97574),i=s(79869),c=s(34376);s(12691);var n=s(26978),l=s(47663),d=s(56572);t.Z=e=>{let{id:t,userType:s,children:u,rootClassNames:m="",contentClassNames:h=""}=e,{Content:f,Sider:g}=i.Layout,[v,p]=(0,o.useState)([]),[y,x]=(0,o.useContext)(r.S),k=(0,c.useRouter)(),redirect=e=>{k.push(e)},logout=()=>{localStorage.removeItem("defogUser"),localStorage.removeItem("defogToken"),localStorage.removeItem("defogUserType"),x({user:null,token:null,userType:null}),redirect("/static/log-in.html")},_=(0,l.usePathname)();(0,o.useEffect)(()=>{p(("admin"==s?[{key:"manage-database",title:"Manage Database",href:"/static/extract-metadata.html"},{key:"manage-users",title:"Manage Users",href:"/static/manage-users.html"},{key:"manage-tools",title:"Manage tools",href:"/static/manage-tools.html"},{key:"check-readiness",title:"Check Readiness",href:"/static/check-readiness.html"},{key:"align-model",title:"Align Model",href:"/static/align-model.html"},{key:"view-feedback",title:"View Feedback",href:"/static/view-feedback.html"},{key:"query-data",title:"Query Data",href:"/static/query-data.html"},{key:"logout",classNames:"self-end",title:"Logout",href:"#",onClick:logout}]:s?[{key:"query-data",title:"Query Data",href:"/static/query-data.html"},{key:"logout",classNames:"self-end",title:"Logout",href:"#",onClick:logout}]:[]).map(e=>(e.current=e.href==_,e)))},[s]);let[N,j]=(0,o.useState)("flex flex-col md:min-h-screen relative container mx-auto");return(0,o.useEffect)(()=>{"/query-data"===_&&j("flex flex-col md:min-h-screen relative")},[_]),(0,a.jsxs)("div",{className:(0,d.m6)(N,m),children:[v.length?(0,a.jsx)(n.l2,{rootClassNames:"border-b",items:v}):(0,a.jsx)(a.Fragment,{}),(0,a.jsx)("div",{className:(0,d.m6)("grow",h),children:u})]})}},91508:function(e,t,s){"use strict";s.r(t);var a=s(28598),o=s(82684),r=s(26335),i=s(54186),c=s(18953),n=s(34376),l=s(79204),d=s(82067),u=s(97574);t.default=()=>{let[e,t]=(0,o.useState)(!1),[s,m]=(0,o.useContext)(u.S),[h,f]=(0,o.useState)([]),[g,v]=(0,o.useState)([]),[p,y]=(0,o.useState)([]),x=(0,n.useRouter)();async function archiveToggle(e){let t=e.doc_id,s=e.archived,a=p,o=h;s?(a=p.filter(e=>e.doc_id!==t),o=[...h,...p.filter(e=>e.doc_id===t).map(e=>(e.archived=!1,e))]):(a=[...p,...h.filter(e=>e.doc_id===t).map(e=>(e.archived=!0,e))],o=h.filter(e=>e.doc_id!==t)),y(a),f(o);let r=await fetch((0,l.Z)("http","toggle_archive_status"),{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify({doc_id:t,archive_status:!s})});(r=await r.json()).success&&c.default.success("Successfully ".concat(s?"un":"","archived doc")),r.success||c.default.error("Error ".concat(s?"un":"","archiving doc"))}let getNotebooks=async()=>{if(!s.token)return;let e=await fetch((0,l.Z)("http","get_docs"),{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify({token:s.token})});if((e=await e.json()).success){let t=e.docs,a=e.recently_viewed_docs||[];t.forEach(e=>{e.timestamp=new Date(e.timestamp)}),a=a.map(e=>({...e,timestamp:new Date(e.timestamp),created_by:e.username===s.user?"You":e.username})),t.sort((e,t)=>t.timestamp-e.timestamp),v(a),a.sort((e,t)=>t.timestamp-e.timestamp);let o=t.filter(e=>e.archived);t=t.filter(e=>!e.archived),y(o),f(t)}if(!e.success)throw Error(e.error_message);t(!1)};return(0,o.useEffect)(()=>{t(!0);let e=s.token,a=s.userType;if(!a){let t=localStorage.getItem("defogUser");if(e=localStorage.getItem("defogToken"),a=localStorage.getItem("defogUserType"),!t||!e||!a){x.push("/static/log-in.html");return}m({user:t,token:e,userType:a})}e?getNotebooks():x.push("/static/log-in.html")},[s,s.token]),(0,a.jsxs)(a.Fragment,{children:[(0,a.jsx)(r.Z,{}),(0,a.jsxs)(d.Z,{id:"view-notebooks",userType:s.userType,children:[(0,a.jsx)("h1",{className:"text-2xl font-bold mb-4",children:"Notebooks"}),(0,a.jsx)("h2",{className:"text-lg mb-4",children:"Your notebooks"}),(0,a.jsx)("div",{className:"flex flex-wrap justify-start",children:h&&!e?(0,a.jsx)(a.Fragment,{}):(0,a.jsx)("div",{children:"Loading docs..."})}),g.length?(0,a.jsx)("h2",{className:"text-lg mb-4",children:"Recently viewed"}):null,(0,a.jsx)("div",{className:"flex flex-wrap justify-start",children:g&&!e?(0,a.jsx)(a.Fragment,{children:g.map(e=>(0,a.jsx)(DocIcon,{doc:e,onClick:archiveToggle,recentlyViewed:!0},e.doc_id))}):(0,a.jsx)("div",{children:"Loading recently viewed docs..."})}),0===p.length?(0,a.jsx)(a.Fragment,{}):(0,a.jsx)(a.Fragment,{children:(0,a.jsx)(i.default,{bordered:!1,size:"small",rootClassName:"archived-collapse",items:[{label:"Archived notebooks",key:"archived-docs",children:(0,a.jsx)("div",{className:"doc-icons-container",children:p.map(e=>(0,a.jsx)(DocIcon,{doc:e,onClick:archiveToggle},e.doc_id))})}]})})]})]})}},79204:function(e,t,s){"use strict";var a=s(48497);t.Z=(e,t)=>{if("http"!==e&&"ws"!==e)throw Error("Protocol not supported");return(console.log(a.env.NEXT_PUBLIC_AGENTS_ENDPOINT),""!==t)?"ws"===e?"".concat((a.env.NEXT_PUBLIC_AGENTS_ENDPOINT||"").replace("http","ws"),"/").concat(t):"".concat(a.env.NEXT_PUBLIC_AGENTS_ENDPOINT||"","/").concat(t):"".concat(a.env.NEXT_PUBLIC_AGENTS_ENDPOINT||"")}}},function(e){e.O(0,[435,774,888,179],function(){return e(e.s=84308)}),_N_E=e.O()}]); -------------------------------------------------------------------------------- /out/_next/static/chunks/webpack-4e69afc7053360d9.js: -------------------------------------------------------------------------------- 1 | !function(){"use strict";var e,r,_,t,n,u,i={},o={};function __webpack_require__(e){var r=o[e];if(void 0!==r)return r.exports;var _=o[e]={id:e,loaded:!1,exports:{}},t=!0;try{i[e].call(_.exports,_,_.exports,__webpack_require__),t=!1}finally{t&&delete o[e]}return _.loaded=!0,_.exports}__webpack_require__.m=i,e=[],__webpack_require__.O=function(r,_,t,n){if(_){n=n||0;for(var u=e.length;u>0&&e[u-1][2]>n;u--)e[u]=e[u-1];e[u]=[_,t,n];return}for(var i=1/0,u=0;u=n&&Object.keys(__webpack_require__.O).every(function(e){return __webpack_require__.O[e](_[c])})?_.splice(c--,1):(o=!1,nDefog.ai - AI Assistant for Data Analysis

Align Model

Here, you can see the instructions and golden queries that the model is currently using to create SQL queries. Feel free to change them to get the best results.

Glossary

This the information about your data that the model considers when generating your SQL queries. Feel free to edit these instructions to get the best results.

Golden Queries

The golden queries are SQL queries used as examples by the model to learn about how your database is structured. You can see and edit them below.

QuestionSQL QueryActions
Simple Empty
No data
-------------------------------------------------------------------------------- /out/align-model.html: -------------------------------------------------------------------------------- 1 | Defog.ai - AI Assistant for Data Analysis

Align Model

Here, you can see the instructions and golden queries that the model is currently using to create SQL queries. Feel free to change them to get the best results.

Glossary

This the information about your data that the model considers when generating your SQL queries. Feel free to edit these instructions to get the best results.

Golden Queries

The golden queries are SQL queries used as examples by the model to learn about how your database is structured. You can see and edit them below.

QuestionSQL QueryActions
Simple Empty
No data
-------------------------------------------------------------------------------- /out/doc: -------------------------------------------------------------------------------- 1 |
Verifying your details...
-------------------------------------------------------------------------------- /out/doc.html: -------------------------------------------------------------------------------- 1 |
Verifying your details...
-------------------------------------------------------------------------------- /out/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/defog-ai/defog-desktop/02eb40f7090be763604b5c7631796aecc0e9beae/out/favicon.ico -------------------------------------------------------------------------------- /out/index: -------------------------------------------------------------------------------- 1 | Defog.ai - AI Assistant for Data Analysis

Welcome to Defog!

Please wait while we log you in and redirect you to the right page...

-------------------------------------------------------------------------------- /out/index.html: -------------------------------------------------------------------------------- 1 | Defog.ai - AI Assistant for Data Analysis

Welcome to Defog!

Please wait while we log you in and redirect you to the right page...

-------------------------------------------------------------------------------- /out/log-in: -------------------------------------------------------------------------------- 1 | Defog.ai - AI Assistant for Data Analysis
Defog.ai

Sign in to Defog

Don't have an API key? Get Started Free

-------------------------------------------------------------------------------- /out/log-in.html: -------------------------------------------------------------------------------- 1 | Defog.ai - AI Assistant for Data Analysis
Defog.ai

Sign in to Defog

Don't have an API key? Get Started Free

-------------------------------------------------------------------------------- /out/logo512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/defog-ai/defog-desktop/02eb40f7090be763604b5c7631796aecc0e9beae/out/logo512.png -------------------------------------------------------------------------------- /out/manage-tools: -------------------------------------------------------------------------------- 1 | Defog.ai - AI Assistant for Data Analysis
Fetching your tools...
-------------------------------------------------------------------------------- /out/manage-tools.html: -------------------------------------------------------------------------------- 1 | Defog.ai - AI Assistant for Data Analysis
Fetching your tools...
-------------------------------------------------------------------------------- /out/msal_redirect: -------------------------------------------------------------------------------- 1 |
msal_redirect
-------------------------------------------------------------------------------- /out/msal_redirect.html: -------------------------------------------------------------------------------- 1 |
msal_redirect
-------------------------------------------------------------------------------- /out/next.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /out/oracle-frontend: -------------------------------------------------------------------------------- 1 | Defog.ai - AI Assistant for Data Analysis

The Oracle

The Oracle is a background assistant, helping you to dig into your dataset for insights. To begin, please let us know what you are interested in below.

Past Reports

-------------------------------------------------------------------------------- /out/oracle-frontend.html: -------------------------------------------------------------------------------- 1 | Defog.ai - AI Assistant for Data Analysis

The Oracle

The Oracle is a background assistant, helping you to dig into your dataset for insights. To begin, please let us know what you are interested in below.

Past Reports

-------------------------------------------------------------------------------- /out/query-data: -------------------------------------------------------------------------------- 1 | Defog.ai - AI Assistant for Data Analysis
-------------------------------------------------------------------------------- /out/query-data.html: -------------------------------------------------------------------------------- 1 | Defog.ai - AI Assistant for Data Analysis
-------------------------------------------------------------------------------- /out/vercel.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /out/view-feedback: -------------------------------------------------------------------------------- 1 | Defog.ai - AI Assistant for Data Analysis

Feedback History

Recommendation
Simple Empty
No data
-------------------------------------------------------------------------------- /out/view-feedback.html: -------------------------------------------------------------------------------- 1 | Defog.ai - AI Assistant for Data Analysis

Feedback History

Recommendation
Simple Empty
No data
-------------------------------------------------------------------------------- /out/view-notebooks: -------------------------------------------------------------------------------- 1 | Defog.ai - AI Assistant for Data Analysis

Notebooks

Your notebooks

-------------------------------------------------------------------------------- /out/view-notebooks.html: -------------------------------------------------------------------------------- 1 | Defog.ai - AI Assistant for Data Analysis

Notebooks

Your notebooks

-------------------------------------------------------------------------------- /query_routes.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Request 2 | from defog import Defog 3 | from db_utils import validate_user, get_db_type_creds 4 | import pandas as pd 5 | import asyncio 6 | import os 7 | from generic_utils import make_request, get_api_key_from_key_name 8 | from fastapi.responses import JSONResponse 9 | 10 | router = APIRouter() 11 | 12 | 13 | @router.post("/query") 14 | async def query(request: Request): 15 | body = await request.json() 16 | question = body.get("question") 17 | previous_context = body.get("previous_context") 18 | dev = body.get("dev", False) 19 | key_name = body.get("key_name") 20 | glossary = body.get("glossary", "") 21 | api_key = get_api_key_from_key_name(key_name) 22 | res = get_db_type_creds(api_key) 23 | 24 | if res: 25 | db_type, db_creds = res 26 | else: 27 | return {"error": "no db creds found"} 28 | ignore_cache = body.get("ignore_cache", False) 29 | token = body.get("token") 30 | if not validate_user(token): 31 | return JSONResponse( 32 | status_code=401, 33 | content={ 34 | "error": "unauthorized", 35 | "message": "Invalid username or password", 36 | }, 37 | ) 38 | 39 | print( 40 | "Base Url: ", 41 | os.environ.get("DEFOG_BASE_URL", "https://api.defog.ai"), 42 | flush=True, 43 | ) 44 | 45 | defog = Defog(api_key=api_key, db_type=db_type, db_creds=db_creds) 46 | defog.base_url = os.environ.get("DEFOG_BASE_URL", "https://api.defog.ai") 47 | defog.generate_query_url = os.environ.get( 48 | "DEFOG_GENERATE_URL", f"{defog.base_url}/generate_query_chat" 49 | ) 50 | print("Generate Query URL: ", defog.generate_query_url, flush=True) 51 | res = await asyncio.to_thread( 52 | defog.run_query, 53 | question, 54 | previous_context=previous_context, 55 | dev=dev, 56 | profile=True, 57 | ignore_cache=ignore_cache, 58 | glossary=glossary, 59 | ) 60 | 61 | if "generation_time_taken" in res: 62 | res["debug_info"] = ( 63 | f"Query Generation Time: {res.get('generation_time_taken', '')}\nQuery Execution Time: {res.get('execution_time_taken', '-')}" 64 | ) 65 | else: 66 | res["debug_info"] = ( 67 | f"Query Execution Time: {res.get('execution_time_taken', '-')}" 68 | ) 69 | # do this to prevent frontend from breaking if a columns is all empty 70 | if "data" in res and res["data"] is not None: 71 | res["data"] = pd.DataFrame(res["data"]) 72 | res["data"] = res["data"].fillna("").values.tolist() 73 | else: 74 | res["data"] = [] 75 | res["columns"] = [] 76 | return res 77 | 78 | 79 | @router.post("/get_chart_types") 80 | async def get_chart_types(request: Request): 81 | print("CALLED GET CHART TYPES", flush=True) 82 | body = await request.json() 83 | columns = body.get("columns") 84 | question = body.get("question") 85 | key_name = body.get("key_name") 86 | api_key = get_api_key_from_key_name(key_name) 87 | 88 | res = await make_request( 89 | "https://api.defog.ai/get_chart_type", 90 | json={"api_key": api_key, "columns": columns, "question": question}, 91 | ) 92 | return res 93 | -------------------------------------------------------------------------------- /readiness_routes.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Request 2 | import os 3 | from db_utils import validate_user, get_db_type_creds 4 | from generic_utils import make_request, get_api_key_from_key_name 5 | from defog import Defog 6 | from fastapi.responses import JSONResponse 7 | 8 | DEFOG_BASE_URL = os.environ.get("DEFOG_BASE_URL", "https://api.defog.ai") 9 | 10 | router = APIRouter() 11 | 12 | 13 | @router.post("/readiness/basic") 14 | async def check_basic_readiness(request: Request): 15 | params = await request.json() 16 | token = params.get("token") 17 | dev = params.get("dev") 18 | if not validate_user(token, user_type="admin"): 19 | return JSONResponse( 20 | status_code=401, 21 | content={ 22 | "error": "unauthorized", 23 | "message": "Invalid username or password", 24 | }, 25 | ) 26 | 27 | key_name = params.get("key_name") 28 | api_key = get_api_key_from_key_name(key_name) 29 | 30 | metadata_ready = False 31 | golden_queries_ready = False 32 | glossary_ready = False 33 | 34 | r = await make_request( 35 | f"{DEFOG_BASE_URL}/get_metadata", {"api_key": api_key, "dev": dev} 36 | ) 37 | 38 | if r["table_metadata"]: 39 | metadata_ready = True 40 | if r["glossary"]: 41 | glossary_ready = True 42 | 43 | r = await make_request( 44 | f"{DEFOG_BASE_URL}/get_golden_queries", {"api_key": api_key, "dev": dev} 45 | ) 46 | 47 | if r["golden_queries"] and len(r["golden_queries"]) > 0: 48 | golden_queries_ready = True 49 | 50 | return { 51 | "success": True, 52 | "metadata": metadata_ready, 53 | "golden_queries": golden_queries_ready, 54 | "glossary": glossary_ready, 55 | } 56 | 57 | 58 | @router.post("/readiness/check_golden_queries_validity") 59 | async def check_golden_queries_validity(request: Request): 60 | params = await request.json() 61 | token = params.get("token") 62 | dev = params.get("dev") 63 | if not validate_user(token, user_type="admin"): 64 | return JSONResponse( 65 | status_code=401, 66 | content={ 67 | "error": "unauthorized", 68 | "message": "Invalid username or password", 69 | }, 70 | ) 71 | 72 | key_name = params.get("key_name") 73 | api_key = get_api_key_from_key_name(key_name) 74 | res = get_db_type_creds(api_key) 75 | if res: 76 | db_type, db_creds = res 77 | else: 78 | return {"error": "no db creds found"} 79 | 80 | resp = await make_request( 81 | f"{DEFOG_BASE_URL}/check_gold_queries_valid", 82 | json={"api_key": api_key, "db_type": db_type, "dev": dev}, 83 | ) 84 | return resp 85 | 86 | 87 | @router.post("/readiness/check_instruction_consistency") 88 | async def check_glossary_consistency(request: Request): 89 | params = await request.json() 90 | token = params.get("token") 91 | dev = params.get("dev") 92 | if not validate_user(token, user_type="admin"): 93 | return JSONResponse( 94 | status_code=401, 95 | content={ 96 | "error": "unauthorized", 97 | "message": "Invalid username or password", 98 | }, 99 | ) 100 | 101 | key_name = params.get("key_name") 102 | api_key = get_api_key_from_key_name(key_name) 103 | 104 | resp = await make_request( 105 | f"{DEFOG_BASE_URL}/check_glossary_consistency", 106 | json={"api_key": api_key, "dev": dev}, 107 | ) 108 | return resp 109 | 110 | 111 | @router.post("/readiness/check_golden_query_coverage") 112 | async def check_golden_query_coverage(request: Request): 113 | params = await request.json() 114 | token = params.get("token") 115 | dev = params.get("dev") 116 | if not validate_user(token, user_type="admin"): 117 | return JSONResponse( 118 | status_code=401, 119 | content={ 120 | "error": "unauthorized", 121 | "message": "Invalid username or password", 122 | }, 123 | ) 124 | 125 | key_name = params.get("key_name") 126 | api_key = get_api_key_from_key_name(key_name) 127 | res = get_db_type_creds(api_key) 128 | if res: 129 | db_type, db_creds = res 130 | else: 131 | return {"error": "no db creds found"} 132 | 133 | resp = await make_request( 134 | f"{DEFOG_BASE_URL}/get_golden_queries_coverage", 135 | json={"api_key": api_key, "dev": dev, "db_type": db_type}, 136 | ) 137 | return resp 138 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp 2 | colorama 3 | databricks-sql-connector 4 | defog==0.65.9 5 | fastapi 6 | google-auth 7 | google-cloud-bigquery 8 | httpx 9 | hypercorn 10 | matplotlib 11 | mysql-connector-python 12 | numpy 13 | pandas 14 | pandasql 15 | psycopg2-binary 16 | python-dotenv 17 | pyyaml 18 | redis 19 | redshift_connector 20 | requests 21 | resend 22 | scikit-learn 23 | scipy 24 | seaborn 25 | snowflake-connector-python 26 | sqlalchemy 27 | statsmodels 28 | tabulate 29 | sqlparse -------------------------------------------------------------------------------- /tool_code_utilities.py: -------------------------------------------------------------------------------- 1 | # includes utilties that a user can import when writing their tool code 2 | # top level for a cleaner import statement 3 | # from tool_code_utilities import xx 4 | 5 | import asyncio 6 | import yaml 7 | from defog import Defog 8 | from defog.query import execute_query 9 | import re 10 | import json 11 | import pandas as pd 12 | import os 13 | from db_utils import get_db_type_creds 14 | 15 | from pathlib import Path 16 | 17 | home_dir = Path.home() 18 | # see if we have a custom report assets directory 19 | if not os.path.exists(home_dir / "defog_report_assets"): 20 | # create one 21 | os.mkdir(home_dir / "defog_report_assets") 22 | 23 | analysis_assets_dir = home_dir / "defog_report_assets" 24 | analysis_assets_dir = os.environ.get( 25 | "REPORT_ASSETS_DIR", analysis_assets_dir.as_posix() 26 | ) 27 | 28 | import matplotlib.pyplot as plt 29 | import seaborn as sns 30 | 31 | available_colors = plt.colormaps() 32 | 33 | sns.set_palette(["#009D94", "#FF5C1C", "#0057CF", "#691A6B", "#FFBD00"]) 34 | 35 | 36 | # make sure the query does not contain any malicious commands like drop, delete, etc. 37 | def safe_sql(query): 38 | if query is None: 39 | return False 40 | 41 | query = query.lower() 42 | if ( 43 | "drop" in query 44 | or "delete" in query 45 | or "truncate" in query 46 | or "append" in query 47 | or "insert" in query 48 | or "update" in query 49 | ): 50 | return False 51 | 52 | return True 53 | 54 | 55 | async def fetch_query_into_df( 56 | api_key: str, sql_query: str, temp: bool = False 57 | ) -> pd.DataFrame: 58 | """ 59 | Runs a sql query and stores the results in a pandas dataframe. 60 | """ 61 | 62 | # important note: this is currently a blocking call 63 | # TODO: add an option to the defog library to make this async 64 | if not temp: 65 | res = get_db_type_creds(api_key) 66 | db_type, db_creds = res 67 | else: 68 | db_type = "postgres" 69 | db_creds = { 70 | "host": "agents-postgres", 71 | "port": 5432, 72 | "database": "postgres", 73 | "user": "postgres", 74 | "password": "postgres", 75 | } 76 | 77 | # make sure not unsafe 78 | if not safe_sql(sql_query): 79 | raise ValueError("Unsafe SQL Query") 80 | 81 | colnames, data, new_sql_query = await asyncio.to_thread( 82 | execute_query, sql_query, api_key, db_type, db_creds, retries=2, temp=temp 83 | ) 84 | 85 | # again, make sure new query that was run is safe 86 | # make sure not unsafe 87 | if not safe_sql(new_sql_query): 88 | raise ValueError("Unsafe SQL Query") 89 | 90 | df = pd.DataFrame(data, columns=colnames) 91 | 92 | # if this df has any columns that have lists, remove those columns 93 | for col in df.columns: 94 | if df[col].apply(type).eq(list).any(): 95 | df = df.drop(col, axis=1) 96 | 97 | if new_sql_query: 98 | sql_query = new_sql_query 99 | else: 100 | sql_query = sql_query 101 | 102 | df.sql_query = sql_query 103 | return df, sql_query 104 | 105 | 106 | def natural_sort_function(l, ascending=True): 107 | """ 108 | Sorts a list or a pandas series in a natural way. 109 | If it's a list of numbers or datetimes, just sort them normally. 110 | If it's a string, check if there are numbers in the string, and sort them as a heirarchy of numbers. 111 | Example 1: ['a', 'b', 'c'] would be sorted as ['a', 'b', 'c'] 112 | Example 2: ['1', '10', '2'] would be sorted as ['1', '2', '10'] 113 | Example 3: ['a1', 'a10', 'a2'] would be sorted as ['a1', 'a2', 'a10'] 114 | Example 4: ['C1D1', 'C10D10', 'C2D2', 'C1D11'] would be sorted as ['C1D1', 'C1D11', 'C2D2', 'C10D10'] 115 | """ 116 | 117 | def convert(text): 118 | return int(text) if text.isdigit() else text 119 | 120 | def alphanum_key(key): 121 | return [convert(c) for c in re.split("([0-9]+)", key)] 122 | 123 | if type(l) == pd.Series: 124 | # TODO do this in a more efficient way 125 | l = l.tolist() 126 | 127 | l.sort(key=alphanum_key, reverse=not ascending) 128 | return l 129 | 130 | 131 | def natural_sort(df, time_column, units=None, ascending=True): 132 | """ 133 | Sorts a dataframe in a natural way, using the natural_sort_function. 134 | """ 135 | if df[time_column].dtype == "object": 136 | try: 137 | order = natural_sort_function(df[time_column].unique().tolist()) 138 | df[time_column] = pd.Categorical( 139 | df[time_column], categories=order, ordered=True 140 | ) 141 | except Exception as e: 142 | # if there are any errors, just pass 143 | pass 144 | if units: 145 | df = df.sort_values(by=[units, time_column], ascending=ascending) 146 | else: 147 | df = df.sort_values(by=time_column, ascending=ascending) 148 | else: 149 | df = df.sort_values(by=time_column, ascending=ascending) 150 | return df 151 | 152 | 153 | default_top_level_imports = "\n\n".join( 154 | [ 155 | "from agents.planner_executor.tool_helpers.tool_param_types import (", 156 | " DBColumn,", 157 | " DropdownSingleSelect,", 158 | " ListWithDefault,", 159 | " db_column_list_type_creator,", 160 | ")", 161 | "from tool_code_utilities import available_colors", 162 | "import pandas", 163 | "import pandas as pd", 164 | ] 165 | ) 166 | 167 | 168 | def add_default_imports(code): 169 | """ 170 | Adds the default imports to the code. 171 | """ 172 | return default_top_level_imports + "\n\n" + code 173 | 174 | 175 | def fix_savefig_calls(code): 176 | """ 177 | Fixes the savefig calls in the code by changing the path and always appending analysis_assets_dir variable to the path. 178 | """ 179 | # check both for double and single quote 180 | code = code.replace('savefig("', f'savefig({analysis_assets_dir} + "') 181 | code = code.replace("savefig('", f"savefig({analysis_assets_dir} + '") 182 | # remove jic we got two slashes 183 | code = code.replace("//", "/") 184 | return code 185 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | import inspect 3 | import re 4 | import json 5 | import traceback 6 | from colorama import Fore, Style 7 | import httpx 8 | import os 9 | 10 | import pandas as pd 11 | 12 | 13 | # custom list class with a overwrite_key attribute 14 | class YieldList(list): 15 | def __init__(self, *args, **kwargs): 16 | super().__init__(*args, **kwargs) 17 | self.overwrite_key = None 18 | 19 | 20 | def replace_whitespace(s): 21 | pattern = re.compile(r'",\s*"') 22 | return re.sub(pattern, '", "', s) 23 | 24 | 25 | def fix_JSON(json_message=None): 26 | result = json_message 27 | json_message = replace_whitespace(json_message) 28 | try: 29 | # First, try to load the JSON string as is 30 | result = json.loads(json_message) 31 | except json.JSONDecodeError as e: 32 | try: 33 | # If the JSON string can't be loaded, it means there are unescaped characters 34 | # Use Python's string escape to escape the string 35 | escaped_message = json_message.encode("unicode_escape").decode("utf-8") 36 | # Try loading the JSON string again 37 | result = json.loads(escaped_message) 38 | except Exception as e_inner: 39 | # If it still fails, print the error 40 | print("Error while trying to fix JSON string: ", str(e_inner)) 41 | return None 42 | except Exception as e: 43 | print("Unexpected error: ", str(e)) 44 | return None 45 | return result 46 | 47 | 48 | def api_response(ran_successfully=False, **extra): 49 | """Returns a JSON object with the ran_successfully key and any extra keys passed in.""" 50 | return {"ran_successfully": ran_successfully, **extra} 51 | 52 | 53 | def missing_param_error(param_name): 54 | """Returns a JSON object with the error_message key and a message saying that the param_name is missing.""" 55 | return api_response( 56 | error_message=f"Missing parameter in request: {param_name}. Request must contain question, agent, and/or generate_report/get_report params." 57 | ) 58 | 59 | 60 | def success_str(msg=""): 61 | return f"{Fore.GREEN}{Style.BRIGHT}{msg}{Style.RESET_ALL}" 62 | 63 | 64 | def error_str(msg=""): 65 | return f"{Fore.RED}{Style.BRIGHT}{msg}{Style.RESET_ALL}" 66 | 67 | 68 | def log_str(msg=""): 69 | return f"{Fore.BLUE}{Style.BRIGHT}{msg}{Style.RESET_ALL}" 70 | 71 | 72 | def warn_str(msg=""): 73 | return f"{Fore.YELLOW}{Style.BRIGHT}{msg}{Style.RESET_ALL}" 74 | 75 | 76 | def log_success(msg=""): 77 | print(f"{Fore.GREEN}{Style.BRIGHT}{msg}{Style.RESET_ALL}") 78 | 79 | 80 | def log_error(msg=""): 81 | print(f"{Fore.RED}{Style.BRIGHT}{msg}{Style.RESET_ALL}") 82 | 83 | 84 | def log_msg(msg=""): 85 | print(f"{Fore.BLUE}{Style.BRIGHT}{msg}{Style.RESET_ALL}") 86 | 87 | 88 | def log_warn(msg=""): 89 | print(f"{Fore.YELLOW}{Style.BRIGHT}{msg}{Style.RESET_ALL}") 90 | 91 | 92 | simple_tool_types = { 93 | "DBColumn": "Column name", 94 | "DBColumnList": "List of column names", 95 | "pandas.core.frame.DataFrame": "Dataframe", 96 | "str": "String", 97 | "int": "Integer", 98 | "float": "Float", 99 | "bool": "Boolean", 100 | "list[str]": "List of strings", 101 | "list": "List", 102 | "DropdownSingleSelect": "String", 103 | } 104 | 105 | 106 | def create_simple_tool_types(_type): 107 | # if type starts with DBColumnList... 108 | if _type.startswith("DBColumnList"): 109 | return "List of column names" 110 | if _type.startswith("ListWithDefault"): 111 | return "List" 112 | 113 | else: 114 | return simple_tool_types.get(_type, _type) 115 | 116 | 117 | def get_clean_plan(analysis_data): 118 | generated_plan = analysis_data.get("gen_steps", {}).get("steps", []) 119 | cleaned_plan = [] 120 | for item in generated_plan: 121 | cleaned_item = {} 122 | for key, value in item.items(): 123 | if key in [ 124 | "tool_name", 125 | "model_generated_inputs", 126 | "outputs_storage_keys", 127 | "done", 128 | "error_message", 129 | ]: 130 | # if key is model_generated_inputs, just change it to inputs 131 | if key == "model_generated_inputs": 132 | cleaned_item["inputs"] = value 133 | else: 134 | cleaned_item[key] = value 135 | cleaned_plan.append(cleaned_item) 136 | 137 | return cleaned_plan 138 | 139 | 140 | def snake_case(s): 141 | return re.sub(r"(?